repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
laosiaudi/tensorflow
refs/heads/master
tensorflow/examples/tutorials/monitors/iris_monitors.py
4
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Model training for Iris data set using Validation Monitor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec tf.logging.set_verbosity(tf.logging.INFO) # Data sets IRIS_TRAINING = "iris_training.csv" IRIS_TEST = "iris_test.csv" # Load datasets. training_set = tf.contrib.learn.datasets.base.load_csv_with_header( filename=IRIS_TRAINING, target_dtype=np.int, features_dtype=np.float) test_set = tf.contrib.learn.datasets.base.load_csv_with_header( filename=IRIS_TEST, target_dtype=np.int, features_dtype=np.float) validation_metrics = { "accuracy": MetricSpec( metric_fn=tf.contrib.metrics.streaming_accuracy, prediction_key="classes"), "recall": MetricSpec( metric_fn=tf.contrib.metrics.streaming_recall, prediction_key="classes"), "precision": MetricSpec( metric_fn=tf.contrib.metrics.streaming_precision, prediction_key="classes") } validation_monitor = tf.contrib.learn.monitors.ValidationMonitor( test_set.data, test_set.target, every_n_steps=50, metrics=validation_metrics, early_stopping_metric="loss", early_stopping_metric_minimize=True, early_stopping_rounds=200) # Specify that all features have real-value data feature_columns = [tf.contrib.layers.real_valued_column("", dimension=4)] # Build 3 layer DNN with 10, 20, 10 units respectively. classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3, model_dir="/tmp/iris_model", config=tf.contrib.learn.RunConfig( save_checkpoints_secs=1)) # Fit model. classifier.fit(x=training_set.data, y=training_set.target, steps=2000, monitors=[validation_monitor]) # Evaluate accuracy. accuracy_score = classifier.evaluate(x=test_set.data, y=test_set.target)["accuracy"] print("Accuracy: {0:f}".format(accuracy_score)) # Classify two new flower samples. new_samples = np.array( [[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]], dtype=float) y = list(classifier.predict(new_samples, as_iterable=True)) print("Predictions: {}".format(str(y)))
s390guy/SATK
refs/heads/master
tools/ipl/fbautil.py
1
#!/usr/bin/python3 # Copyright (C) 2012, 2013, 2016, 2017 Harold Grovesteen # # This file is part of SATK. # # SATK is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SATK is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SATK. If not, see <http://www.gnu.org/licenses/>. # This module provides utility functions for accessing AWS Tapes from Python # # It is one of a number of modules that support Hercules emulated device # media: # # media.py Instances of media records are converted to emulated # media: card images, AWS tape image file, FBA image file # or CKD image file. All image files are uncompressed. # recsutil.py Python classes for individual records targeted to a device # type: card, tape, fba or ckd # rdrpun.py Handles writing and reading card decks. # awsutil.py Handles writing and reading AWS tape image files. # fbautil.py This module. Handles writing and reading of FBA image files. # ckdutil.py Handles writing and reading of CKD image files. # # See media.py for usage of FBA image files. this_module="fbautil.py" # Python imports: import os # Access to the OS functions import stat # Access to file stat data # SATK imports: import hexdump # Get the dump function for hex display from structure import eloc # Access the error reporting function from fbadscb import Extent # Access FBA data set control block Extent class # ASMA imports: None # # +---------------------+ # | | # | Module Functions | # | | # +---------------------+ # # Converts a number of bytes into number of sectors # Returns: # the number of whole sectors exactly matched by the number of bytes # Exception: # ValueError if the number of bytes implies a length exceeding whole sectors def bytes2sectors(byts): if isinstance(byts,(bytes,bytearray)): l=len(byts) else: l=byts sectors,excess=divmod(l,512) if excess != 0: raise ValueError(\ "%s.bytes2sectors() - %s byts if %s sectors with excess: %s" \ % (this_module,byts,sectors,excess)) return sectors # Dump bytes/bytearray object content as hexadecimal digits with byte positions # Function Arguments: # byts the bytes/bytearray sequence being dumped # hdr An optional header preceding the information # indent How much each line should be indented. Defaults to "". # string Whether a string is to be returned (True) or the output # printed (False) def dump(byts,hdr="",indent="",string=False): if hdr: s="%s%s\n" % (indent,hdr) else: s="" s="%s%s\n" % (s,hexdump.dump(byts,indent=indent)) if string: return s print(s) # Returns the size of the file object file in bytes # Function Argument: # fo An open file object of the file whose size is returned # Returns: # the size of the file object's file in bytes def filesize(fo): # Determines the file size using Python modules s=os.fstat(fo.fileno()) # Get a stat instance from file number return s[stat.ST_SIZE] # Get the file size from the stat data # This function dumps an FBA image file by physical sector or an extent within the # image file by both logical and physical sector numbers. # Function Arbuments: # path the path to the image file being dumped # extent An fbadscb.Extent object def image_dump(path,extent=None): fo=open(path,"rb") image=fo.read() image_size=filesize(fo) fo.close() if extent is None: # Print physical volume sectors print("\nImage File %s\n" % path) for n,rba in enumerate(range(0,image_size,512)): hdr="%s PSEC" % n chunk=image[rba:min(rba+512,image_size)] dump(chunk,hdr=hdr) else: # Prnt extent from the volume assert isinstance(extent,Extent),\ "%s - image_dump() - 'extent' argument must be an Extent object: %s" \ % (this_module,extent) print("\n%s in Image File %s\n" % (extent,path)) pbeg=extent.lower beg=pbeg*512 end=(extent.upper+1)*512 for n,rba in enumerate(range(beg,end,512)): hdr="%s LSEC %s PSEC" % (n,pbeg+n) chunk=image[rba:min(rba+512,image_size)] dump(chunk,hdr=hdr) # # +----------------------------------------+ # | | # | Fixed-Block Architecture Emulation | # | | # +----------------------------------------+ # # This class provides emulation of a FBA DASD device using a file. # # To open a new itialized FBA image for reading and writing: # fbao=fba.new(filename,dtype,comp=True|False) # To open an existings FBA image for reading and writing (or just reading if ro=True): # fbao=fba.attach(filename,ro=True|False) # To re-initialze an existing image already opened for reading and writing use the # init class method with the fba object returned by the attach() method # fba.init(fbao) # # The fba class must be instantiated by means of either the attach() or new() # methods. After accesses are complete, the instance method detach() terminates # access to the image, completes pending writes to the image file and closes it. # # Following creation of the fba object, the following methods are available for # single physical sector accesses: # # read() Read the next or specified physical sector from the image file. # seek() Position to a specific sector of the image file, making it next # tell() Retreive the next physical sector to be accessed # write() Write to the next or specified physical sector in the image file # # The following methods are available for logical sector accesses of multiple # sectors within an opened data set extent. Positioning always occurs. # # ds_open() Provide extent information for data set accesses # ds_erase() Overwrite the content of the logical sectors with a constant # ds_extent() Create an Extent object for use with ds_open() method. # ds_read() Read one or more sectors from within a data set extent # ds_tell() Retrieve the next logical sector to be accessed in the data set extent # ds_write() Write one or more sectors from within a data set extent # ds_erase() Overwrite the content of the logical sectors with a constant # ds_close() Close a data set extent # # Only one data set extent may be open at any given time. # # Refer to the respective method descriptions for details of method usage. class fba(object): # Dictionary mapping device type to sectors. Used to register devices. sectors={} # Sectors File Size File Size K-bytes #"3310":124664, # 124,664 64,851,968 3DD9000 63,332K #"3370":558000, # 558,000 285,696,000 11076000 279,000K #"3370-A1":558000, # 558,000 285,696,000 11076000 279,000K #"3370-B1":558000, # 558,000 285,696,000 11076000 279,000K #"3370-A2":712752, # 712,752 364,929,024 15C06000 356,376K #"3370-B2":712752, # 712,752 364,929,024 15C06000 356,376K #"9313":246240, # 246,240 126,074,880 783C000 123,120K #"9313-1":246240, # 246,240 126,074,880 783C000 123,120K #"9332":360036, # 360,036 184,338,432 AFCC800 180,016K #"9332-200":360036, # 360,036 184,338,432 AFCC800 180,016K #"9332-400":360036, # 360,036 184,338,432 AFCC800 180.016K #"9332-600":554800, # 554,800 284,057,600 10EE6000 277,400K #"9335":804714, # 804,714 412,013,568 188ED400 402,357K #"9335-1":804714, # 804,714 412,013,568 188ED400 402,357K #"9336":920115, # 920,115 471,098,880 1C146600 460,057K #"9336-10":920115, # 920,115 471,098,880 1C146600 460,057K #"9336-20":1672881, # 1,672,881 856,515,072 330D6200 836,440K #"9336-25":1672881, # 1,672,881 856,515,072 330D6200 836,440K #"0671":574560, # 574,560 294,174,720 1188C000 287,280K #"0671-04":624456, # 624,456 319,721,472 130E9000 312,228K #"0671-08":513072} # 513,072 262,692,864 FA86000 256,536K record=["fba"] # recsutil class name of fba records pad=512*b"\x00" # Sector pad # Dump bytes/bytearray object content as hexadecimal digits with byte positions # Method Arguments: # byts the bytes/bytearray sequence being dumped # hdr An optional header preceding the information # indent How much each line should be indented. Defaults to "". # string Whether a string is to be returned (True) or the output # printed (False) @staticmethod def dump(byts,hdr="",indent="",string=False): if hdr: s="%s%s\n" % (indent,hdr) else: s="" s="%s%s\n" % (s,dump(byts,indent=indent)) if string: return s print(s) # Returns the size of the file object file in bytes # Method Argument: # fo An open file object of the file whose size is returned # Returns: # the size of the file object's file in bytes @staticmethod def filesize(fo): # Determines the file size using Python modules s=os.fstat(fo.fileno()) # Get a stat instance from file number return s[stat.ST_SIZE] # Get the file size from the stat data @staticmethod def size(dtype,hwm=None,comp=False): # Size a new device. # hwm (high water mark) is the last used sector number # This method is required by media.py to size an emulating media file if hwm is None: return fba.sectors[dtype].sectors if comp: (grps,excess)=divmod(hwm+1,120) if excess>0: grps+=1 return grps*120 return hwm+1 # Validates whether the size is valid for an FBA image file # Method Argument: # size The size being validated # filename The name of the file if an actual image file is being tested for # reporting purposes. Defaults to None # Returns: # the number of emulated sectors for the given size # Exception: # ValueError if the size is not valid. If filename supplied an informational # message is included with the exception. @staticmethod def volume_size(size,filename=None): sectors,excess=divmod(size,512) if excess!=0: if filename is None: raise ValueError() else: raise ValueError("%s FBA image truncated last physical sector %s: %s"\ % (sectors,filename)) return sectors # Attach an existing emulated FBA volume image file for access # Method Arguments: # filename The path to the existing FBA volume image file # ro Whether access is read-only (True) or read-write (False). Defaults # to True. @classmethod def attach(cls,filename,ro=False): # Access an existing FBA emulating media file for reading or writing. if ro: mode="rb" else: mode="r+b" try: fo=open(filename,mode) except IOError: raise IOError(\ "Could not open existing FBA image: %s" % filename) from None return fba(fo,ro) # Initialize all sectors in an FBA image file to binary zeros. # Method Arguments: # fo A fba object created by methods new() or attach(). Or a Python # file object opened for writing. # dtype The FBA device type being emulated as a string or the non-standard # FBA image size as an integer. # comp Whether the image file is intended for compression by a Hercules # utility. Defaults to False. @classmethod def init(cls,fo,dtype,size=None,comp=False): if isinstance(fo,fba): if fba.ro: raise ValueError(\ "%s - %s.init() - can not initialize a read-only FBA image" \ % (this_module,cls.__name__)) f=fba.fo else: f=fo if isinstance(dtype,str): # String device type supplied try: dev=fba.sectors[dtype] sectors=dev.sectors except KeyError: raise ValueError(\ "%s - %s.init() - unrecognized FBA device type: %s" \ % (this_module,cls.__name__,dtype)) if size is not None: sectors=size elif isinstance(dtype,int): # Integer number of sectors supplied sectors=dtype else: # Don't know what to do with this raise ValueError(\ "%s - %s.init() - 'dtype' argument must be a string or an integer: %s" \ % (this_module,cls.__name__,dtype)) # Adjust sectors for Hercules compression if the FBA image will be compressed if comp: blkgrp=120 # A block group is 120 sectors (grps,excess)=divmod(sectors,blkgrp) if excess!=0: sectors=(grps+1)*blkgrp f.truncate() for x in range(sectors): try: f.write(fba.pad) except IOError: raise IOError(\ "%s - %s.init() - error initializing FBA image sector %s: %s" \ % (this_module,cls.__name__,x,f.name)) f.flush() # Create a new FBA image file and initialize all sector to binary zeros. # Method Arguments: # filename The file path of the FBA image file being created. An existing # file will be overwriten. # dtype The FBA device type being emulated as a string or the non-standard # FBA image size as an integer. # comp Whether the image file is intended for compression by a Hercules # utility. Defaults to False. # Returns: # the fba object providing access to the emulated FBA image # Note: size is retained for media.py compatibility. @classmethod def new(cls,filename,dtype,size=None,comp=False): try: fo=open(filename,"w+b") except IOError: raise IOError(\ "%s - %s.new() - could not open new FBA image: %s" \ % (this_module,cls.__name__,filename)) from None fba.init(fo,dtype,size=size,comp=comp) return fba(fo,ro=False,pending=True) # See the description above for def __init__(self,fo,ro=True,pending=False): # Image file controls and status self.filename=fo.name # Remember the filename of the image file # Image file size in bytes self.filesize=fba.filesize(fo) # Validate the image files size and determine the number of sectors it # emulcates. sectors=fba.volume_size(self.filesize,filename=self.filename) self.fo=fo # Open file object from new() or attach() self.pending=pending # Whether file object writes may be pending # Emulation controls and status self.ro=ro # Set read-only (True) or read-write (False) self.last=sectors-1 # Last physical sector number self.sector=0 # Current physical sector position # Logical data set extent controls self.extent=None # Currently open data set extent self.lower=None # The first physical sector of the extent self.upper=None # The last physical sector of the extent self.ds_sector=None # Current logical sector position self.ds_sectors=0 # The number of sectors in the extent self.ds_last=None # The last logical sector in the extent # I/O tracing control self._trace=False # Trace I/O operations self._tdump=False # Dump sector content while tracing # Have to wait until self._trace is defined. self.seek(0) # Position file to sector 0 def __str__(self): if self.fo.closed: return "FBA: detached image file: %s" % self.filename return "FBA: extent:%s sector:%s self.fo:%s"\ " ro=%s last:%s file:%s" \ % (self.extent,self.sector,\ self.fo.tell(),self.ro,self.last,self.filename) # This method validates that a physical extent is within the actual FBA image # Exception: # ValueError is raised if either the lower or upper extent boundary is # greater than the last physical sector or the Extent is in the 'not used' # state. def _ck_extent(self,extent): if extent.notused: raise ValueError("%s Extent object must be used: %s" \ % (eloc(self,"_ck_extent",module=this_module),extent)) lower=extent.lower if lower>self.last: raise ValueError(\ "%s extent lower boundary is not within the FBA image (0-%s): %s"\ % (eloc(self,"_ck_extent",module=this_module),self.last,lower)) upper=extent.upper if upper>self.last: raise ValueError(\ "%s extent upper boundary is not within the FBA image (0-%s): %s"\ % (eloc(self,"_ck_extent",module=this_module),self.last,upper)) # Perform the actual forcing of possible pending writes to occur def _flush(self): self.fo.flush() self.pending=False # Performs a low level read. # Method Argument: # size the number of bytes to read from the image file current position # Returns: # the bytes read def _read(self,size): # Force writing any pending writes before attempting to read the file # otherwise, the image file may have stale sector data. if self.pending: # By using this method we get to trace the flush operation self.flush() # Read the requested bytes try: byts=self.fo.read(size) except IOError: raise IOError("%s IOError while reading FBA physical sector: %s" \ % (eloc(self,"_read",module=this_module),self.sector)) # Ensure we actually read the number of expected bytes. if len(byts)!=size: raise ValueError(\ "%s did not read requested bytes (%s) from image file: %s" % (eloc(self,"_read",module=this_module),size,len(byts))) return byts # Convert logical sector number to physical and validate logical sectors are # within the open extent # Method Arguments: # sector The starting logical sector of the operation # sectors The number of sectors in the logical operation # Returns: # A tuple where: # tuple[0] is the starting physical sector being read # tuple[1] is the last physical sector being read # tuple[2] is the last logical sector being read # Exception: # ValueError if the number of sectors in the operation exceeds the open # extent. def _to_physical(self,sector,sectors=1): assert sector >=0,"%s 'sector' argument must be >= 0: %s" \ % (eloc(self,"_to_physical",module=this_module),sector) assert sectors >=1,"%s 'sectors' argument must be >= 1: %s" \ % (eloc(self,"_to_physical",module=this_module),sectors) l_last=sector+sectors-1 if l_last > self.upper: raise ValueError(\ "%s end of operation beyond end of extent (%s): %s" \ % (eloc(self,"_to_physical",module=this_module),\ self.upper,l_last)) return (self.lower+sector,self.lower+l_last,l_last) # Performs the low level write. # Method Argument: # data a bytes sequence being written. Must be bytes not bytearray def _write(self,data): # Ensure bytes sequence is being written not bytearray. Python requires # a bytes sequence. Sequence can not be bytearray. This give the using # software the freedom to use either sequence. if isinstance(data,bytearray): byts=bytes(data) else: byts=data assert isinstance(byts,bytes),\ "%s 'data' argument must be a bytes/bytearray sequence for sector %s: %s" \ % (eloc(self,"_write",module=this_module),byts,self.sector) # Write the bytes try: self.fo.write(byts) except IOError: raise IOError(\ "%s IOError while writing FBA physical sector: %s" \ % (eloc(self,"write",module=this_module),self.sector)) self.pending=True # Indicate the write might be pending # Detach and close the image file def detach(self): if __debug__: if self._trace: print("%s detaching image file: %s" \ % (eloc(self,"detach",module=this_module),self.filename)) if self.extent: self.ds_close() try: self.fo.flush() self.fo.close() except IOError: raise IOError(\ "%s IOError detaching %s FBA image: %s" \ % (eloc(self,"detach",module=this_module),self.filename)) self.pending=False # Force pending writes def flush(self): if __debug__: if self._trace: print("%s forcing pending image file writes" \ % eloc(self,"flush",module=this_module)) self._flush() # Use the low-level method to actually force pending writes # Enable/Disable I/O operation tracing. # Method Argument: # state Specify True to enable tracing. Specify False to disable tracing. # tdump Specify True to dump sector content while tracing, False otherwise. def trace(self,state,dump=False): if state: self._trace=True self._tdump=dump if __debug__: print("%s set tracing:%s, dump:%s" \ % (eloc(self,"trace",module=this_module),self._trace,self._tdump)) else: if __debug__: was_tracing=self._trace self._trace=False self._tdump=False if __debug__: if was_tracing: print("%s set tracing:%s, dump:%s" \ % (eloc(self,"trace",module=this_module),\ self._trace,self.dump)) # # Physical sector accessing methods # # Read the next sector or a specified sector. Following the read the image is # positioned at the next physical sector. # Method Argument: # sector Specify a physical sector number to be read. Specify None to read # from the next sector to which the image is positioned based upon # the previously accessed sector. Defaults to None. # array Whether a bytearray (True) or bytes (False) sequence is returned # Exception: # IOError if there is a file related problem # # Programming Note: # Use array=True if the user software expects to update the content of the # sector. def read(self,sector=None,array=False): # Position the image file if requested to do so. if sector is not None: self.seek(sector) # Trace the read if physical tracing is enabled if __debug__: if self._trace: sec=self.sector fpos=self.fo.tell() # Read the physical sector using the low-level routine data=self._read(512) self.sector+=1 # Trace the read if physical tracing is enabled if __debug__: if self._trace: print("%s READ: sector: %s file pos: %s" \ % (eloc(self,"read",module=this_module),sec,fpos)) if self._tdump: dump(data,indent=" ") # Return the information is the requested sequence type if array: return bytearray(data) return data # Position the image file to a specific physical sector. # Method Argument: # sector the physical sector to which the image file is to be positioned. # Exception: # ValueError if the physical sector does not exist in the image file. # IOError if there is a file related problem def seek(self,sector): # Determine the position in the file for a physical sector access if sector>self.last: raise ValueError("%s FBA sector %s is beyond last sector %s" \ % (eloc(self,"seek",module=this_module),sector,self.last)) sector_loc=sector*512 if sector_loc>self.filesize: raise IOError("%s FBA sector %s file position %s is beyond EOF: %s" \ % (eloc(self,"seek",module=this_module),sector,sector_loc,\ self.filesize)) # Perform the positioning by physical sector number in this object and # position the file object accordingly try: self.fo.seek(sector_loc) except IOError: raise IOError("%s IOError while positioning to FBA sector: %s" \ % (eloc(self,"seek",module=this_module),self.sector)) self.sector=sector # Trace the seek if physical tracing is enabled if __debug__: if self._trace: print("%s SEEK: sector: %s file pos: %s" \ % (eloc(self,"seek",module=this_module),sector,self.fo.tell())) # Return the current physical sector position def tell(self): if __debug__: if self._trace: print("%s returning: %s" % (eloc(self,"tell",module=this_module),\ self.sector)) return self.sector # Write content to the next sector or a specified sector. Following the write # operation the image is positioned at the next physical sector. # Method Arguments: # byts A bytes/bytearray sequence of the content to be written # sector Specify a physical sector number to which the content is written. # Specify None to write to the next sector to which the image is # positioned based upon the previously accessed sector. Defaults to # None. # pad Whether content is to be padded to a full sector (True) or the # content must be a full sector (False). Defaults to False. # Exception # NotImplementedError if the image file is read-only. def write(self,byts,sector=None,pad=False): if self.ro: raise NotImplementedError(\ "%s can not write to read-only FBA image: %s" \ % (eloc(self,"write",module=this_module),self.filename)) # Position the image file if requested to do so. if sector is not None: self.seek(sector) # Pad or detect truncated sector content data=byts if len(data)!=512: if pad: data=data+fba.pad data=data[:512] else: raise ValueError("%s FBA image sector must be 512 bytes: %s"\ % (eloc(self,"write",module=this_module),len(data))) # Trace the write operation if physical sector tracing is enabled if __debug__: if self._trace: if len(data)>len(byts): padded=" pad: %s" % len(data) - len(byts) else: padded="" print("%s WRITE: sector: %s file pos: %s%s" \ % (eloc(self,"write",module=this_module),\ sector,self.fo.tell(),padded)) if self._tdump: dump(data,indent=" ") # Write to the sector using the low-level routine self._write(data) self.sector+=1 # # Logical data set extent accessing methods # # Closes the open extent. It no extent is open, resets the extent controls def ds_close(self): # Trace the data set close if logical sector tracing is enabled if __debug__: if self._trace: print("%s DS_CLOSE: closing extent: %s" \ % (eloc(self,"ds_close",module=this_module),self.extent)) if self.extent: self.flush() # Write any pending sectors to the image file self.ds_sector=self.ds_last=self.extent=self.lower=self.upper=None self.ds_sectors=0 # Erase one or more sectors of the extent or the entire extent with a defined # value. # Method Arguments: # sector starting logical sector of the erased area. Defaults to 0. # sectors Specify the number of sectors whose content is erased. Specify # True to erase all logical sectors following the first. # Defaults to 1. # fill The value to filling the erased bytes of each sector. May be a # character or numeric value between 0-255. Defaults to 0 # Exception: # NotImplementedError if the image file is read-only or has no open extent. # # Programming Notes: # To erase an entire extent to zeros use an opened extent: # fbao.ds_erase(sector=0,sectors=True) # If an EBCDIC character is desired for the fill character, fill must be a hex # integer, for example: # fill=0x40 def ds_erase(self,sector=0,sectors=1,fill=0): assert isinstance(sector,int) and sector>=0,\ "%s 'sector' argument must be an integer >= 0: %s"\ % (eloc(self,"ds_erase",module=this_module),sector) if self.ro: raise NotImplementedError("%s can not erase a read-only image file: %s"\ % (eloc(self,"ds_erase",module=this_module),self.filename)) if not self.extent: raise NotImplementedError("%s no open extent for erasing" \ % (eloc(self,"ds_erase",module=this_module))) # Create the content for an erased sector if isinstance(fill,str): if len(fill)>0: f=ord(fill[0]) else: raise ValueError("%s 'fill' argument must not be an empty string"\ % eloc(self,"ds_erase",module=this_module)) elif isinstance(fill,int): if fill<0 or fill >255: raise ValueError("%s 'fill' argument out of range (0-255): %s" \ % eloc(self,"ds_erase",module=this_module),fill) f=fill byts=bytes([f,]*512) # Determine the physical sectors of erased area if sectors == True: secs=self.ds_sectors else: assert isinstance(sectors,int) and sectors>=1,\ "%s 'sectors' argument must be True or an integer >= 1: %s" \ % (eloc(self,"ds_erase",module=this_module),sectors) secs=sectors p_first,p_last,l_last = self._to_physical(sector,sectors=secs) # Erase the requested sectors' content self.seek(p_first) sec=sector if __debug__: if self._trace: if secs == 1: s="%s DS_ERASING: logical sector:%s physical sector: %s"\ "file pos:%s with:%02X" \ % (eloc(self,"ds_read",module=this_module),\ sector,p_first,self.fo.tell(),f) else: s="%s DS_ERASING: logical sectors:%s-%s physical sectors: %s-%s" \ " sectors:%s file pos:%s with:0x%02X" \ % (eloc(self,"ds_read",module=this_module),\ sector,l_last,p_first,p_last,secs,self.fo.tell(),\ f) print(s) # Erase the requested sectors' content for n in range(secs): self._write(byts) self.sector+=1 self.ds_sector+=1 # This method returns an Extent object for a specific range of physical sectors # Method Arguments: # lower the first physical sector of the extent # upper the last physical sector of the extent. If True is specified the # upper limit of the extent is the last physical sector of the image # file. # Programming Note: To access all sectors of the image fila as a data set use: # fbao.ds_open(fbao.ds_extent(0,True)) # Using the entire volume as an extent allows multi-sector operations on the # image file at the physical level. def ds_extent(self,lower,upper): if upper is True: up=self.last else: up=upper ext=Extent(lower=lower,upper=up) if __debug__: if self._trace: print("%s returned: %s" % (eloc(self,"ds_extent"),ext)) return ext # Open an extent for logical sector accesses # Exception # NotImplementedError if the image file already has an open extent. def ds_open(self,extent): assert isinstance(extent,Extent),\ "%s 'extent' argument must be a fbadscb.Extent object: %s" \ % (eloc(self,"ds_open",module=this_module),extent) if self.extent: raise NotImplementedError("%s extent already open: %s" \ % (eloc(self,"ds_open",module=this_module),self.extent)) self._ck_extent(extent) self.extent=extent # Remember the physical extent self.lower=extent.lower # The first physical sector of the extent self.upper=extent.upper # The last physical sector of the extent self.ds_sector=0 # The next logical sector to be accessed self.ds_sectors=extent.sectors() # Calculate the number of sectors self.ds_last=self.ds_sectors-1 # The last logical sector in the extent # Trace the data set open if tracing is enabled if __debug__: if self._trace: print("%s DS_OPEN: extent:%s sectors:%s logical sectors:0-%s" \ % (eloc(self,"ds_open",module=this_module),\ extent,self.ds_sectors,self.ds_last)) self.seek(self.lower) # Position at start of the extent # Read one or more sectors from the extent. # Method Arguments: # sector the starting logical sector number within the extent # sectors the number of sectors to be read including the first sector. # Defaults to 1. # array Whether a bytearray is to be returned (True) or a bytes sequence # (False). Defaults to False. # Programming Note: use array=True if the user plans to update the content # Returns: # a bytes/bytearray sequence (as requested) of the sector or sectors content # form the image file # Exception: # ValueError for various detected errors. See _to_physical() method # for detected errors. # IOError if an error occurs during reading # NotImplementedError if the image file has no open extent. def ds_read(self,sector=None,sectors=1,array=False): if not self.extent: raise NotImplementedError("%s no open extent for reading" \ % eloc(self,"ds_read",module=this_module)) if sector is None: sec=self.ds_sector else: sec=sector p_first,p_last,l_last=self._to_physical(sec,sectors=sectors) # Position to the starting sector self.seek(p_first) self.ds_sector=sec # Perform tracing if requested if __debug__: if self._trace: if sectors == 1: s="%s DS_READ: logical sector:%s physical sector: %s"\ " file pos:%s" % (eloc(self,"ds_read",module=this_module),\ sec,p_first,self.fo.tell()) else: s="%s DS_READ: logical sectors:%s-%s physical sectors: %s-%s" \ " sectors:%s file pos: %s"\ % (eloc(self,"ds_read",module=this_module),\ sec,l_last,p_first,p_last,sectors,self.fo.tell()) print(s) # Read the sector' or sectors' content byts=self._read(sectors*512) self.sector+=sectors self.ds_sector+=sectors # Dump the content read from the file image if content tracing enabled if __debug__: if self._tdump: dump(byts,indent=" ") # Return the content read from the sector or sectors as bytes or bytearray # as requested. if array: return bytearray(byts) return byts # Returns the next logical sector for access in the extent # Exception: # NotImplementedError if the image file has no open extent. def ds_tell(self): if not self.extent: raise NotImplementedError("%s no open extent for logical sector position" \ % (eloc(self,"ds_tell",module=this_module))) if __debug__: if self._trace: print("%s returning: %s" \ % (eloc(self,"ds_tell",module=this_module),self.ds_sector)) return self.ds_sector # Write the content of the byte/bytearray sequence to the sector or sectors # implied by the length of the sequence starting with a given logical sector # Method Arguments: # byts the bytes/bytearray sequence being written to the file image # sector Specify the starting logical sector of the write operation. Omit # to write at the next positioned sector. # Exceptions: # ValueError for various detected errors. See bytes2sectors() function and # _to_physical() method for detected errors. # IOError if an error occurs during writing # NotImplementedError if the image file is read-only or no open extent. def ds_write(self,byts,sector=None): if self.ro: raise NotImplementedError("%s can not write to a read-only image file: %s"\ % (eloc(self,"ds_write",module=this_module),self.filename)) if not self.extent: raise NotImplementedError("%s no open extent for writing" \ % (eloc(self,"ds_write",module=this_module))) # Validate there is enough data to write entire sectors try: sectors=bytes2sectors(byts) except ValueError: raise ValueError("%s 'byts' argument not full sectors, length: %s" \ % (eloc(self,"ds_write",module=this_module),len(byts))) from None # Locate where the write operation begins if sector is None: sec=self.ds_sector else: sec=sector p_first,p_last,l_last=self._to_physical(sec,sectors=sectors) # Position to the starting sector self.seek(p_first) self.ds_sector=sec # Perform tracing if requested. if __debug__: if self._trace: if sectors == 1: s="%s DS_WRITE: logical sector:%s physical sector: %s"\ "file pos: %s" % (eloc(self,"ds_write",module=this_module),\ sector,p_first,self.fo.tell()) else: s="%s DS_WRITE: logical sectors:%s-%s physical sectors: %s-%s" \ "sectors:%s file pos:%s" \ % (eloc(self,"ds_write",module=this_module),sector,\ l_last,p_first,p_last,sectors,self.fo.tell()) print(s) if self._tdump: dump(byts,indent=" ") # Write the sector' or sectors' content self._write(byts) self.sector+=sectors self.ds_sector+=sectors class fba_info(object): # K=1024 # M=1024*1024 # G=1024*1024*1024 units=[1024*1024*1024,1024*1024,1024] @staticmethod def KMG(value): for n,x in enumerate(fba_info.units): if value>=x: unit="GMK"[n] unit_metric=fba_info.units[n] units,excess=divmod(value,unit_metric) tenths=excess*10 // unit_metric return "%s.%s%sB" % (units,tenths,unit) return "%sB" % (value) # This class gives access to the information managed by the fbadev class. # Once created, a fba_info instance is read only. def __init__(self,dtype,blocklen=0): try: self.__dev=fba.sectors[dtype] # Access fba's sector dictionary except KeyError: raise TypeError("unrecognized FBA device type: %s" % dtype) self.__block=blocklen # Size of the block for this instance def __str__(self): string="Volume: TYPE=%s SECTORS=%s LFS=%s" \ % (self.device,self.sectors,self.lfs) string="%s\nHost: FILE=%s (%s)" \ % (string,self.host,fba_info.KMG(self.host)) string="%s\nBlock: LENGTH=%s SECTORS=%s BLOCKS=%s" \ % (string,self.block,self.required,self.capacity) return string @property def block(self): # Returns the specified block length return self.__block @property def capacity(self): # Return the number of blocks that can be stored on the volume if self.__block<=0: return None return self.host//self.__block @property def device(self): # Returns the device type return self.__dev.dtype @property def required(self): # Returns the number of sectors required for the specified block length return (self.__block+511)//512 @property def sectors(self): # Returns the number of 512-bytes sectors in this device type. return self.__dev.sectors # # Provides volume related information @property def host(self): # Returns the host file size of the emulated volume return self.sectors*512 @property def lfs(self): # Returns whether host large file system support is required return False class fbadev(object): def __init__(self,dtype,devtyp,cls,typ,mdl,bpg,bpp,size,blks,cu): # All of the elements of the Hercules FBADEV table are provided in # the instance definition. Only those elements required by fbautil # are set to attributes of the fbadev instance. # Hercules FBADEV field self.dtype=dtype # Device type name self.devtyp=devtyp # Device number devt self.model=mdl # Device model mdl self.sectors=blks # Number of sectors blks self.logcyl=bpp # Sectors per logical cylinder bpp self.logtrk=bpg # Sectors per logical track bpg fba.sectors[self.dtype]=self # Build the sectors dictionary fbadev("3310", 0x3310,0x21,0x01,0x01, 32,352,512, 125664,0x4331) fbadev("3310-1", 0x3310,0x21,0x01,0x01, 32,352,512, 125664,0x4331) fbadev("3370", 0x3370,0x21,0x02,0x00, 62,744,512, 558000,0x3880) fbadev("3370-1", 0x3370,0x21,0x02,0x00, 62,744,512, 558000,0x3880) fbadev("3370-A1", 0x3370,0x21,0x02,0x00, 62,744,512, 558000,0x3880) fbadev("3370-B1", 0x3370,0x21,0x02,0x00, 62,744,512, 558000,0x3880) fbadev("3370-2", 0x3370,0x21,0x05,0x04, 62,744,512, 712752,0x3880) fbadev("3370-A2", 0x3370,0x21,0x05,0x04, 62,744,512, 712752,0x3880) fbadev("3370-B2", 0x3370,0x21,0x05,0x04, 62,744,512, 712752,0x3880) fbadev("9332", 0x9332,0x21,0x07,0x00, 73,292,512, 360036,0x6310) fbadev("9332-400", 0x9332,0x21,0x07,0x00, 73,292,512, 360036,0x6310) fbadev("9332-600", 0x9332,0x21,0x07,0x01, 73,292,512, 554800,0x6310) fbadev("9335", 0x9335,0x21,0x06,0x01, 71,426,512, 804714,0x6310) fbadev("9313", 0x9313,0x21,0x08,0x00, 96,480,512, 246240,0x6310) fbadev("9336", 0x9336,0x21,0x11,0x00, 63,315,512, 920115,0x6310) fbadev("9336-10", 0x9336,0x21,0x11,0x00, 63,315,512, 920115,0x6310) fbadev("9336-20", 0x9336,0x21,0x11,0x10,111,777,512,1672881,0x6310) fbadev("9336-25", 0x9336,0x21,0x11,0x10,111,777,512,1672881,0x6310) fbadev("0671-08", 0x0671,0x21,0x12,0x08, 63,504,512, 513072,0x6310) fbadev("0671", 0x0671,0x21,0x12,0x00, 63,504,512, 574560,0x6310) fbadev("0671-04", 0x0671,0x21,0x12,0x04, 63,504,512, 624456,0x6310) # media.py expects this function to be available def register_devices(dtypes): for x in fba.sectors.values(): dtypes.dtype(x.dtype,fba) dtypes.dndex(dtypes.number(x.devtyp,x.model),x.dtype) if __name__=="__main__": raise NotImplementedError("%s is only intended for import use" % this_module)
patmcb/odoo
refs/heads/8.0
addons/product_email_template/models/product.py
379
# -*- coding: utf-8 -*- from openerp.osv import fields, osv class product_template(osv.Model): """ Product Template inheritance to add an optional email.template to a product.template. When validating an invoice, an email will be send to the customer based on this template. The customer will receive an email for each product linked to an email template. """ _inherit = "product.template" _columns = { 'email_template_id': fields.many2one( 'email.template', 'Product Email Template', help='When validating an invoice, an email will be sent to the customer' 'based on this template. The customer will receive an email for each' 'product linked to an email template.'), }
thaumos/ansible
refs/heads/devel
lib/ansible/modules/files/archive.py
14
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2016, Ben Doherty <bendohmv@gmail.com> # Sponsored by Oomph, Inc. http://www.oomphinc.com # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: archive version_added: '2.3' short_description: Creates a compressed archive of one or more files or trees extends_documentation_fragment: files description: - Packs an archive. - It is the opposite of M(unarchive). - By default, it assumes the compression source exists on the target. - It will not copy the source file from the local system to the target before archiving. - Source files can be deleted after archival by specifying I(remove=True). options: path: description: - Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive. type: list required: true format: description: - The type of compression to use. - Support for xz was added in Ansible 2.5. type: str choices: [ bz2, gz, tar, xz, zip ] default: gz dest: description: - The file name of the destination archive. - This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list. type: path exclude_path: description: - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from the archive. type: list version_added: '2.4' force_archive: version_added: '2.8' description: - Allow you to force the module to treat this as an archive even if only a single file is specified. - By default behaviour is maintained. i.e A when a single file is specified it is compressed only (not archived). type: bool default: false remove: description: - Remove any added source files and trees after adding to archive. type: bool default: no notes: - Requires tarfile, zipfile, gzip and bzip2 packages on target host. - Requires lzma or backports.lzma if using xz format. - Can produce I(gzip), I(bzip2), I(lzma) and I(zip) compressed files or archives. seealso: - module: unarchive author: - Ben Doherty (@bendoh) ''' EXAMPLES = r''' - name: Compress directory /path/to/foo/ into /path/to/foo.tgz archive: path: /path/to/foo dest: /path/to/foo.tgz - name: Compress regular file /path/to/foo into /path/to/foo.gz and remove it archive: path: /path/to/foo remove: yes - name: Create a zip archive of /path/to/foo archive: path: /path/to/foo format: zip - name: Create a bz2 archive of multiple files, rooted at /path archive: path: - /path/to/foo - /path/wong/foo dest: /path/file.tar.bz2 format: bz2 - name: Create a bz2 archive of a globbed path, while excluding specific dirnames archive: path: - /path/to/foo/* dest: /path/file.tar.bz2 exclude_path: - /path/to/foo/bar - /path/to/foo/baz format: bz2 - name: Create a bz2 archive of a globbed path, while excluding a glob of dirnames archive: path: - /path/to/foo/* dest: /path/file.tar.bz2 exclude_path: - /path/to/foo/ba* format: bz2 - name: Use gzip to compress a single archive (i.e don't archive it first with tar) archive: path: /path/to/foo/single.file dest: /path/file.gz format: gz - name: Create a tar.gz archive of a single file. archive: path: /path/to/foo/single.file dest: /path/file.tar.gz format: gz force_archive: true ''' RETURN = r''' state: description: The current state of the archived file. If 'absent', then no source files were found and the archive does not exist. If 'compress', then the file source file is in the compressed state. If 'archive', then the source file or paths are currently archived. If 'incomplete', then an archive was created, but not all source paths were found. type: str returned: always missing: description: Any files that were missing from the source. type: list returned: success archived: description: Any files that were compressed or added to the archive. type: list returned: success arcroot: description: The archive root. type: str returned: always expanded_paths: description: The list of matching paths from paths argument. type: list returned: always expanded_exclude_paths: description: The list of matching exclude paths from the exclude_path argument. type: list returned: always ''' import bz2 import filecmp import glob import gzip import io import os import re import shutil import tarfile import zipfile from traceback import format_exc from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils._text import to_native from ansible.module_utils.six import PY3 LZMA_IMP_ERR = None if PY3: try: import lzma HAS_LZMA = True except ImportError: LZMA_IMP_ERR = format_exc() HAS_LZMA = False else: try: from backports import lzma HAS_LZMA = True except ImportError: LZMA_IMP_ERR = format_exc() HAS_LZMA = False def main(): module = AnsibleModule( argument_spec=dict( path=dict(type='list', required=True), format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']), dest=dict(type='path'), exclude_path=dict(type='list'), force_archive=dict(type='bool', default=False), remove=dict(type='bool', default=False), ), add_file_common_args=True, supports_check_mode=True, ) params = module.params check_mode = module.check_mode paths = params['path'] dest = params['dest'] exclude_paths = params['exclude_path'] remove = params['remove'] expanded_paths = [] expanded_exclude_paths = [] format = params['format'] force_archive = params['force_archive'] globby = False changed = False state = 'absent' # Simple or archive file compression (inapplicable with 'zip' since it's always an archive) archive = False successes = [] # Fail early if not HAS_LZMA and format == 'xz': module.fail_json(msg=missing_required_lib("lzma or backports.lzma", reason="when using xz format"), exception=LZMA_IMP_ERR) module.fail_json(msg="lzma or backports.lzma is required when using xz format.") for path in paths: path = os.path.expanduser(os.path.expandvars(path)) # Expand any glob characters. If found, add the expanded glob to the # list of expanded_paths, which might be empty. if ('*' in path or '?' in path): expanded_paths = expanded_paths + glob.glob(path) globby = True # If there are no glob characters the path is added to the expanded paths # whether the path exists or not else: expanded_paths.append(path) # Only attempt to expand the exclude paths if it exists if exclude_paths: for exclude_path in exclude_paths: exclude_path = os.path.expanduser(os.path.expandvars(exclude_path)) # Expand any glob characters. If found, add the expanded glob to the # list of expanded_paths, which might be empty. if ('*' in exclude_path or '?' in exclude_path): expanded_exclude_paths = expanded_exclude_paths + glob.glob(exclude_path) # If there are no glob character the exclude path is added to the expanded # exclude paths whether the path exists or not. else: expanded_exclude_paths.append(exclude_path) if not expanded_paths: return module.fail_json(path=', '.join(paths), expanded_paths=', '.join(expanded_paths), msg='Error, no source paths were found') # Only try to determine if we are working with an archive or not if we haven't set archive to true if not force_archive: # If we actually matched multiple files or TRIED to, then # treat this as a multi-file archive archive = globby or os.path.isdir(expanded_paths[0]) or len(expanded_paths) > 1 else: archive = True # Default created file name (for single-file archives) to # <file>.<format> if not dest and not archive: dest = '%s.%s' % (expanded_paths[0], format) # Force archives to specify 'dest' if archive and not dest: module.fail_json(dest=dest, path=', '.join(paths), msg='Error, must specify "dest" when archiving multiple files or trees') archive_paths = [] missing = [] arcroot = '' for path in expanded_paths: # Use the longest common directory name among all the files # as the archive root path if arcroot == '': arcroot = os.path.dirname(path) + os.sep else: for i in range(len(arcroot)): if path[i] != arcroot[i]: break if i < len(arcroot): arcroot = os.path.dirname(arcroot[0:i + 1]) arcroot += os.sep # Don't allow archives to be created anywhere within paths to be removed if remove and os.path.isdir(path): path_dir = path if path[-1] != '/': path_dir += '/' if dest.startswith(path_dir): module.fail_json(path=', '.join(paths), msg='Error, created archive can not be contained in source paths when remove=True') if os.path.lexists(path) and path not in expanded_exclude_paths: archive_paths.append(path) else: missing.append(path) # No source files were found but the named archive exists: are we 'compress' or 'archive' now? if len(missing) == len(expanded_paths) and dest and os.path.exists(dest): # Just check the filename to know if it's an archive or simple compressed file if re.search(r'(\.tar|\.tar\.gz|\.tgz|\.tbz2|\.tar\.bz2|\.tar\.xz|\.zip)$', os.path.basename(dest), re.IGNORECASE): state = 'archive' else: state = 'compress' # Multiple files, or globbiness elif archive: if not archive_paths: # No source files were found, but the archive is there. if os.path.lexists(dest): state = 'archive' elif missing: # SOME source files were found, but not all of them state = 'incomplete' archive = None size = 0 errors = [] if os.path.lexists(dest): size = os.path.getsize(dest) if state != 'archive': if check_mode: changed = True else: try: # Slightly more difficult (and less efficient!) compression using zipfile module if format == 'zip': arcfile = zipfile.ZipFile(dest, 'w', zipfile.ZIP_DEFLATED, True) # Easier compression using tarfile module elif format == 'gz' or format == 'bz2': arcfile = tarfile.open(dest, 'w|' + format) # python3 tarfile module allows xz format but for python2 we have to create the tarfile # in memory and then compress it with lzma. elif format == 'xz': arcfileIO = io.BytesIO() arcfile = tarfile.open(fileobj=arcfileIO, mode='w') # Or plain tar archiving elif format == 'tar': arcfile = tarfile.open(dest, 'w') match_root = re.compile('^%s' % re.escape(arcroot)) for path in archive_paths: if os.path.isdir(path): # Recurse into directories for dirpath, dirnames, filenames in os.walk(path, topdown=True): if not dirpath.endswith(os.sep): dirpath += os.sep for dirname in dirnames: fullpath = dirpath + dirname arcname = match_root.sub('', fullpath) try: if format == 'zip': arcfile.write(fullpath, arcname) else: arcfile.add(fullpath, arcname, recursive=False) except Exception as e: errors.append('%s: %s' % (fullpath, to_native(e))) for filename in filenames: fullpath = dirpath + filename arcname = match_root.sub('', fullpath) if not filecmp.cmp(fullpath, dest): try: if format == 'zip': arcfile.write(fullpath, arcname) else: arcfile.add(fullpath, arcname, recursive=False) successes.append(fullpath) except Exception as e: errors.append('Adding %s: %s' % (path, to_native(e))) else: if format == 'zip': arcfile.write(path, match_root.sub('', path)) else: arcfile.add(path, match_root.sub('', path), recursive=False) successes.append(path) except Exception as e: module.fail_json(msg='Error when writing %s archive at %s: %s' % (format == 'zip' and 'zip' or ('tar.' + format), dest, to_native(e)), exception=format_exc()) if arcfile: arcfile.close() state = 'archive' if format == 'xz': with lzma.open(dest, 'wb') as f: f.write(arcfileIO.getvalue()) arcfileIO.close() if errors: module.fail_json(msg='Errors when writing archive at %s: %s' % (dest, '; '.join(errors))) if state in ['archive', 'incomplete'] and remove: for path in successes: try: if os.path.isdir(path): shutil.rmtree(path) elif not check_mode: os.remove(path) except OSError as e: errors.append(path) if errors: module.fail_json(dest=dest, msg='Error deleting some source files: ', files=errors) # Rudimentary check: If size changed then file changed. Not perfect, but easy. if not check_mode and os.path.getsize(dest) != size: changed = True if successes and state != 'incomplete': state = 'archive' # Simple, single-file compression else: path = expanded_paths[0] # No source or compressed file if not (os.path.exists(path) or os.path.lexists(dest)): state = 'absent' # if it already exists and the source file isn't there, consider this done elif not os.path.lexists(path) and os.path.lexists(dest): state = 'compress' else: if module.check_mode: if not os.path.exists(dest): changed = True else: size = 0 f_in = f_out = arcfile = None if os.path.lexists(dest): size = os.path.getsize(dest) try: if format == 'zip': arcfile = zipfile.ZipFile(dest, 'w', zipfile.ZIP_DEFLATED, True) arcfile.write(path, path[len(arcroot):]) arcfile.close() state = 'archive' # because all zip files are archives elif format == 'tar': arcfile = tarfile.open(dest, 'w') arcfile.add(path) arcfile.close() else: f_in = open(path, 'rb') if format == 'gz': f_out = gzip.open(dest, 'wb') elif format == 'bz2': f_out = bz2.BZ2File(dest, 'wb') elif format == 'xz': f_out = lzma.LZMAFile(dest, 'wb') else: raise OSError("Invalid format") shutil.copyfileobj(f_in, f_out) successes.append(path) except OSError as e: module.fail_json(path=path, dest=dest, msg='Unable to write to compressed file: %s' % to_native(e), exception=format_exc()) if arcfile: arcfile.close() if f_in: f_in.close() if f_out: f_out.close() # Rudimentary check: If size changed then file changed. Not perfect, but easy. if os.path.getsize(dest) != size: changed = True state = 'compress' if remove and not check_mode: try: os.remove(path) except OSError as e: module.fail_json(path=path, msg='Unable to remove source file: %s' % to_native(e), exception=format_exc()) params['path'] = dest file_args = module.load_file_common_arguments(params) if not check_mode: changed = module.set_fs_attributes_if_different(file_args, changed) module.exit_json(archived=successes, dest=dest, changed=changed, state=state, arcroot=arcroot, missing=missing, expanded_paths=expanded_paths, expanded_exclude_paths=expanded_exclude_paths) if __name__ == '__main__': main()
jordsti/hacker-jeopardy
refs/heads/master
webservice/funcs/__init__.py
1
from add_team import add_team from ask_question import ask_question from get_all_categories import get_all_categories from get_all_teams import get_all_teams from answer_question import answer_question from test_key import test_key from remove_team import remove_team from get_points_table import get_points_table from get_categories_rank import get_categories_rank from add_point_to_team import add_point_to_team from start_game import start_game from get_game_state import get_game_state from current_question import current_question
motion2015/edx-platform
refs/heads/master
common/djangoapps/util/password_policy_validators.py
113
# pylint: disable=no-member """ This file exposes a number of password complexity validators which can be optionally added to account creation This file was inspired by the django-passwords project at https://github.com/dstufft/django-passwords authored by dstufft (https://github.com/dstufft) """ from __future__ import division import string # pylint: disable=deprecated-module from django.core.exceptions import ValidationError from django.utils.translation import ugettext_lazy as _ from django.conf import settings import nltk def validate_password_length(value): """ Validator that enforces minimum length of a password """ message = _("Invalid Length ({0})") code = "length" min_length = getattr(settings, 'PASSWORD_MIN_LENGTH', None) max_length = getattr(settings, 'PASSWORD_MAX_LENGTH', None) if min_length and len(value) < min_length: raise ValidationError(message.format(_("must be {0} characters or more").format(min_length)), code=code) elif max_length and len(value) > max_length: raise ValidationError(message.format(_("must be {0} characters or less").format(max_length)), code=code) def validate_password_complexity(value): """ Validator that enforces minimum complexity """ message = _("Must be more complex ({0})") code = "complexity" complexities = getattr(settings, "PASSWORD_COMPLEXITY", None) if complexities is None: return uppercase, lowercase, digits, non_ascii, punctuation = set(), set(), set(), set(), set() for character in value: if character.isupper(): uppercase.add(character) elif character.islower(): lowercase.add(character) elif character.isdigit(): digits.add(character) elif character in string.punctuation: punctuation.add(character) else: non_ascii.add(character) words = set(value.split()) errors = [] if len(uppercase) < complexities.get("UPPER", 0): errors.append(_("must contain {0} or more uppercase characters").format(complexities["UPPER"])) if len(lowercase) < complexities.get("LOWER", 0): errors.append(_("must contain {0} or more lowercase characters").format(complexities["LOWER"])) if len(digits) < complexities.get("DIGITS", 0): errors.append(_("must contain {0} or more digits").format(complexities["DIGITS"])) if len(punctuation) < complexities.get("PUNCTUATION", 0): errors.append(_("must contain {0} or more punctuation characters").format(complexities["PUNCTUATION"])) if len(non_ascii) < complexities.get("NON ASCII", 0): errors.append(_("must contain {0} or more non ascii characters").format(complexities["NON ASCII"])) if len(words) < complexities.get("WORDS", 0): errors.append(_("must contain {0} or more unique words").format(complexities["WORDS"])) if errors: raise ValidationError(message.format(u', '.join(errors)), code=code) def validate_password_dictionary(value): """ Insures that the password is not too similar to a defined set of dictionary words """ password_max_edit_distance = getattr(settings, "PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD", None) password_dictionary = getattr(settings, "PASSWORD_DICTIONARY", None) if password_max_edit_distance and password_dictionary: for word in password_dictionary: distance = nltk.metrics.distance.edit_distance(value, word) if distance <= password_max_edit_distance: raise ValidationError(_("Too similar to a restricted dictionary word."), code="dictionary_word")
eHealthAfrica/ureport
refs/heads/develop
ureport/polls/migrations/0010_auto_20140820_1629.py
3
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('polls', '0009_auto_20140820_1559'), ] operations = [ migrations.AlterField( model_name='poll', name='category', field=models.ForeignKey(help_text='The category this Poll belongs to', to='categories.Category'), ), ]
hoosteeno/kuma
refs/heads/master
vendor/packages/translate/convert/po2idml.py
23
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2014 Zuza Software Foundation # # This file is part of translate. # # translate is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # translate is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. """Takes an IDML template file and a PO file containing translations of strings in the IDML template. It creates a new IDML file using the translations of the PO file. """ from cStringIO import StringIO from zipfile import ZIP_DEFLATED, ZipFile import lxml.etree as etree from translate.convert import convert from translate.storage import factory from translate.storage.idml import (NO_TRANSLATE_ELEMENTS, INLINE_ELEMENTS, copy_idml, open_idml) from translate.storage.xml_extract.extract import (ParseState, process_idml_translatable) from translate.storage.xml_extract.generate import (apply_translations, replace_dom_text) from translate.storage.xml_extract.unit_tree import XPathTree, build_unit_tree def translate_idml(template, input_file, translatable_files): def load_dom_trees(template): """Return a dict with translatable files in the template IDML package. The keys are the filenames inside the IDML package, and the values are the etrees for each of those translatable files. """ idml_data = open_idml(template) parser = etree.XMLParser(strip_cdata=False) return dict((filename, etree.fromstring(data, parser).getroottree()) for filename, data in idml_data.iteritems()) def load_unit_tree(input_file): """Return a dict with the translations grouped by files IDML package. The keys are the filenames inside the template IDML package, and the values are XPathTree instances for each of those files. """ store = factory.getobject(input_file) def extract_unit_tree(filename, root_dom_element_name): """Find the subtree in 'tree' which corresponds to the data in XML file 'filename' """ tree = build_unit_tree(store, filename) try: file_tree = tree.children[root_dom_element_name, 0] except KeyError: file_tree = XPathTree() return (filename, file_tree) return dict(extract_unit_tree(filename, 'idPkg:Story') for filename in translatable_files) def translate_dom_trees(unit_trees, dom_trees): """Return a dict with the translated files for the IDML package. The keys are the filenames for the translatable files inside the template IDML package, and the values are etree ElementTree instances for each of those files. """ def get_po_doms(unit): """Return a tuple with unit source and target DOM objects. This method is method is meant to provide a way to retrieve the DOM objects for the unit source and target for PO stores. Since POunit doesn't have any source_dom nor target_dom attributes, it is necessary to craft those objects. """ def add_node_content(string, node): """Append the translatable content to the node. The string is going to have XLIFF placeables, so we have to parse it as XML in order to get the right nodes to append to the node. """ # Add a wrapper "whatever" tag to avoid problems when parsing # several sibling tags at the root level. fake_string = "<whatever>" + string + "</whatever>" # Copy the children to the XLIFF unit's source or target node. fake_node = etree.fromstring(fake_string) node.extend(fake_node.getchildren()) return node source_dom = etree.Element("source") source_dom = add_node_content(unit.source, source_dom) target_dom = etree.Element("target") if unit.target: target_dom = add_node_content(unit.target, target_dom) else: target_dom = add_node_content(unit.source, target_dom) return (source_dom, target_dom) make_parse_state = lambda: ParseState(NO_TRANSLATE_ELEMENTS, INLINE_ELEMENTS) for filename, dom_tree in dom_trees.iteritems(): file_unit_tree = unit_trees[filename] apply_translations(dom_tree.getroot(), file_unit_tree, replace_dom_text(make_parse_state, dom_retriever=get_po_doms, process_translatable=process_idml_translatable)) return dom_trees dom_trees = load_dom_trees(template) unit_trees = load_unit_tree(input_file) return translate_dom_trees(unit_trees, dom_trees) def write_idml(template_zip, output_file, dom_trees): """Write the translated IDML package.""" output_zip = ZipFile(output_file, 'w', compression=ZIP_DEFLATED) # Copy the IDML package. output_zip = copy_idml(template_zip, output_zip, dom_trees.keys()) # Replace the translated files in the IDML package. for filename, dom_tree in dom_trees.iteritems(): output_zip.writestr(filename, etree.tostring(dom_tree, encoding='UTF-8', xml_declaration=True, standalone='yes')) def convertpo(input_file, output_file, template): """Create a translated IDML using an IDML template and a PO file.""" # Since the convertoptionsparser will give us a open files, we risk that # they could have been opened in non-binary mode on Windows, and then we'll # have problems, so let's make sure we have what we want. template.close() template = file(template.name, mode='rb') output_file.close() output_file = file(output_file.name, mode='wb') # Now proceed with the conversion. template_zip = ZipFile(template, 'r') translatable_files = [filename for filename in template_zip.namelist() if filename.startswith('Stories/')] po_data = input_file.read() dom_trees = translate_idml(template, StringIO(po_data), translatable_files) write_idml(template_zip, output_file, dom_trees) output_file.close() return True def main(argv=None): formats = { ('po', 'idml'): ("idml", convertpo), } parser = convert.ConvertOptionParser(formats, usetemplates=True, description=__doc__) parser.run(argv) if __name__ == '__main__': main()
Neozaru/depot_tools
refs/heads/master
third_party/logilab/astng/inspector.py
19
# This program is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved. # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr # copyright 2003-2010 Sylvain Thenault, all rights reserved. # contact mailto:thenault@gmail.com # # This file is part of logilab-astng. # # logilab-astng is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 2.1 of the License, or (at your # option) any later version. # # logilab-astng is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License # for more details. # # You should have received a copy of the GNU Lesser General Public License along # with logilab-astng. If not, see <http://www.gnu.org/licenses/>. """visitor doing some postprocessing on the astng tree. Try to resolve definitions (namespace) dictionary, relationship... This module has been imported from pyreverse """ __docformat__ = "restructuredtext en" from os.path import dirname from logilab.common.modutils import get_module_part, is_relative, \ is_standard_module from logilab import astng from logilab.astng.exceptions import InferenceError from logilab.astng.utils import LocalsVisitor class IdGeneratorMixIn: """ Mixin adding the ability to generate integer uid """ def __init__(self, start_value=0): self.id_count = start_value def init_counter(self, start_value=0): """init the id counter """ self.id_count = start_value def generate_id(self): """generate a new identifier """ self.id_count += 1 return self.id_count class Linker(IdGeneratorMixIn, LocalsVisitor): """ walk on the project tree and resolve relationships. According to options the following attributes may be added to visited nodes: * uid, a unique identifier for the node (on astng.Project, astng.Module, astng.Class and astng.locals_type). Only if the linker has been instantiated with tag=True parameter (False by default). * Function a mapping from locals names to their bounded value, which may be a constant like a string or an integer, or an astng node (on astng.Module, astng.Class and astng.Function). * instance_attrs_type as locals_type but for klass member attributes (only on astng.Class) * implements, list of implemented interface _objects_ (only on astng.Class nodes) """ def __init__(self, project, inherited_interfaces=0, tag=False): IdGeneratorMixIn.__init__(self) LocalsVisitor.__init__(self) # take inherited interface in consideration or not self.inherited_interfaces = inherited_interfaces # tag nodes or not self.tag = tag # visited project self.project = project def visit_project(self, node): """visit an astng.Project node * optionally tag the node with a unique id """ if self.tag: node.uid = self.generate_id() for module in node.modules: self.visit(module) def visit_package(self, node): """visit an astng.Package node * optionally tag the node with a unique id """ if self.tag: node.uid = self.generate_id() for subelmt in node.values(): self.visit(subelmt) def visit_module(self, node): """visit an astng.Module node * set the locals_type mapping * set the depends mapping * optionally tag the node with a unique id """ if hasattr(node, 'locals_type'): return node.locals_type = {} node.depends = [] if self.tag: node.uid = self.generate_id() def visit_class(self, node): """visit an astng.Class node * set the locals_type and instance_attrs_type mappings * set the implements list and build it * optionally tag the node with a unique id """ if hasattr(node, 'locals_type'): return node.locals_type = {} if self.tag: node.uid = self.generate_id() # resolve ancestors for baseobj in node.ancestors(recurs=False): specializations = getattr(baseobj, 'specializations', []) specializations.append(node) baseobj.specializations = specializations # resolve instance attributes node.instance_attrs_type = {} for assattrs in node.instance_attrs.values(): for assattr in assattrs: self.handle_assattr_type(assattr, node) # resolve implemented interface try: node.implements = list(node.interfaces(self.inherited_interfaces)) except InferenceError: node.implements = () def visit_function(self, node): """visit an astng.Function node * set the locals_type mapping * optionally tag the node with a unique id """ if hasattr(node, 'locals_type'): return node.locals_type = {} if self.tag: node.uid = self.generate_id() link_project = visit_project link_module = visit_module link_class = visit_class link_function = visit_function def visit_assname(self, node): """visit an astng.AssName node handle locals_type """ # avoid double parsing done by different Linkers.visit # running over the same project: if hasattr(node, '_handled'): return node._handled = True if node.name in node.frame(): frame = node.frame() else: # the name has been defined as 'global' in the frame and belongs # there. Btw the frame is not yet visited as the name is in the # root locals; the frame hence has no locals_type attribute frame = node.root() try: values = node.infered() try: already_infered = frame.locals_type[node.name] for valnode in values: if not valnode in already_infered: already_infered.append(valnode) except KeyError: frame.locals_type[node.name] = values except astng.InferenceError: pass def handle_assattr_type(self, node, parent): """handle an astng.AssAttr node handle instance_attrs_type """ try: values = list(node.infer()) try: already_infered = parent.instance_attrs_type[node.attrname] for valnode in values: if not valnode in already_infered: already_infered.append(valnode) except KeyError: parent.instance_attrs_type[node.attrname] = values except astng.InferenceError: pass def visit_import(self, node): """visit an astng.Import node resolve module dependencies """ context_file = node.root().file for name in node.names: relative = is_relative(name[0], context_file) self._imported_module(node, name[0], relative) def visit_from(self, node): """visit an astng.From node resolve module dependencies """ basename = node.modname context_file = node.root().file if context_file is not None: relative = is_relative(basename, context_file) else: relative = False for name in node.names: if name[0] == '*': continue # analyze dependencies fullname = '%s.%s' % (basename, name[0]) if fullname.find('.') > -1: try: # XXX: don't use get_module_part, missing package precedence fullname = get_module_part(fullname) except ImportError: continue if fullname != basename: self._imported_module(node, fullname, relative) def compute_module(self, context_name, mod_path): """return true if the module should be added to dependencies""" package_dir = dirname(self.project.path) if context_name == mod_path: return 0 elif is_standard_module(mod_path, (package_dir,)): return 1 return 0 # protected methods ######################################################## def _imported_module(self, node, mod_path, relative): """notify an imported module, used to analyze dependencies """ module = node.root() context_name = module.name if relative: mod_path = '%s.%s' % ('.'.join(context_name.split('.')[:-1]), mod_path) if self.compute_module(context_name, mod_path): # handle dependencies if not hasattr(module, 'depends'): module.depends = [] mod_paths = module.depends if not mod_path in mod_paths: mod_paths.append(mod_path)
ksambor/pyxos
refs/heads/master
pyxos/tests/test_state_machine.py
1
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import unittest from pyxos.lib import state_machine @mock.patch('pyxos.lib.state_machine.open', create=True) class TestStateMachine(unittest.TestCase): def test_machine_init(self, mopen): mopen.return_value = mock.MagicMock(spec=file) machine = state_machine.StateMachine('x') self.assertDictEqual(machine.state, {}) self.assertListEqual(machine.ops, []) def test_machine_op(self, mopen): mopen.return_value = mock.MagicMock(spec=file) machine = state_machine.StateMachine('x') machine.op('x', 'y') self.assertDictEqual(machine.state, {'x': 'y'}) self.assertListEqual(machine.ops, [('x', 'y')]) machine.op('x', 'z') self.assertDictEqual(machine.state, {'x': 'z'}) self.assertListEqual(machine.ops, [('x', 'y'), ('x', 'z')])
scottferg/web-console
refs/heads/master
django/contrib/gis/tests/geoapp/tests.py
10
import re, os, unittest from django.db import connection from django.contrib.gis import gdal from django.contrib.gis.geos import * from django.contrib.gis.measure import Distance from django.contrib.gis.tests.utils import \ no_mysql, no_oracle, no_postgis, no_spatialite, \ mysql, oracle, postgis, spatialite from django.test import TestCase from models import Country, City, PennsylvaniaCity, State, Track if not spatialite: from models import Feature, MinusOneSRID class GeoModelTest(TestCase): def test01_fixtures(self): "Testing geographic model initialization from fixtures." # Ensuring that data was loaded from initial data fixtures. self.assertEqual(2, Country.objects.count()) self.assertEqual(8, City.objects.count()) self.assertEqual(2, State.objects.count()) def test02_proxy(self): "Testing Lazy-Geometry support (using the GeometryProxy)." ## Testing on a Point pnt = Point(0, 0) nullcity = City(name='NullCity', point=pnt) nullcity.save() # Making sure TypeError is thrown when trying to set with an # incompatible type. for bad in [5, 2.0, LineString((0, 0), (1, 1))]: try: nullcity.point = bad except TypeError: pass else: self.fail('Should throw a TypeError') # Now setting with a compatible GEOS Geometry, saving, and ensuring # the save took, notice no SRID is explicitly set. new = Point(5, 23) nullcity.point = new # Ensuring that the SRID is automatically set to that of the # field after assignment, but before saving. self.assertEqual(4326, nullcity.point.srid) nullcity.save() # Ensuring the point was saved correctly after saving self.assertEqual(new, City.objects.get(name='NullCity').point) # Setting the X and Y of the Point nullcity.point.x = 23 nullcity.point.y = 5 # Checking assignments pre & post-save. self.assertNotEqual(Point(23, 5), City.objects.get(name='NullCity').point) nullcity.save() self.assertEqual(Point(23, 5), City.objects.get(name='NullCity').point) nullcity.delete() ## Testing on a Polygon shell = LinearRing((0, 0), (0, 100), (100, 100), (100, 0), (0, 0)) inner = LinearRing((40, 40), (40, 60), (60, 60), (60, 40), (40, 40)) # Creating a State object using a built Polygon ply = Polygon(shell, inner) nullstate = State(name='NullState', poly=ply) self.assertEqual(4326, nullstate.poly.srid) # SRID auto-set from None nullstate.save() ns = State.objects.get(name='NullState') self.assertEqual(ply, ns.poly) # Testing the `ogr` and `srs` lazy-geometry properties. if gdal.HAS_GDAL: self.assertEqual(True, isinstance(ns.poly.ogr, gdal.OGRGeometry)) self.assertEqual(ns.poly.wkb, ns.poly.ogr.wkb) self.assertEqual(True, isinstance(ns.poly.srs, gdal.SpatialReference)) self.assertEqual('WGS 84', ns.poly.srs.name) # Changing the interior ring on the poly attribute. new_inner = LinearRing((30, 30), (30, 70), (70, 70), (70, 30), (30, 30)) ns.poly[1] = new_inner ply[1] = new_inner self.assertEqual(4326, ns.poly.srid) ns.save() self.assertEqual(ply, State.objects.get(name='NullState').poly) ns.delete() def test03a_kml(self): "Testing KML output from the database using GeoQuerySet.kml()." # Only PostGIS supports KML serialization if not postgis: self.assertRaises(NotImplementedError, State.objects.all().kml, field_name='poly') return # Should throw a TypeError when trying to obtain KML from a # non-geometry field. qs = City.objects.all() self.assertRaises(TypeError, qs.kml, 'name') # The reference KML depends on the version of PostGIS used # (the output stopped including altitude in 1.3.3). if connection.ops.spatial_version >= (1, 3, 3): ref_kml = '<Point><coordinates>-104.609252,38.255001</coordinates></Point>' else: ref_kml = '<Point><coordinates>-104.609252,38.255001,0</coordinates></Point>' # Ensuring the KML is as expected. ptown1 = City.objects.kml(field_name='point', precision=9).get(name='Pueblo') ptown2 = City.objects.kml(precision=9).get(name='Pueblo') for ptown in [ptown1, ptown2]: self.assertEqual(ref_kml, ptown.kml) def test03b_gml(self): "Testing GML output from the database using GeoQuerySet.gml()." if mysql or spatialite: self.assertRaises(NotImplementedError, Country.objects.all().gml, field_name='mpoly') return # Should throw a TypeError when tyring to obtain GML from a # non-geometry field. qs = City.objects.all() self.assertRaises(TypeError, qs.gml, field_name='name') ptown1 = City.objects.gml(field_name='point', precision=9).get(name='Pueblo') ptown2 = City.objects.gml(precision=9).get(name='Pueblo') if oracle: # No precision parameter for Oracle :-/ gml_regex = re.compile(r'^<gml:Point srsName="SDO:4326" xmlns:gml="http://www.opengis.net/gml"><gml:coordinates decimal="\." cs="," ts=" ">-104.60925\d+,38.25500\d+ </gml:coordinates></gml:Point>') for ptown in [ptown1, ptown2]: self.failUnless(gml_regex.match(ptown.gml)) else: gml_regex = re.compile(r'^<gml:Point srsName="EPSG:4326"><gml:coordinates>-104\.60925\d+,38\.255001</gml:coordinates></gml:Point>') for ptown in [ptown1, ptown2]: self.failUnless(gml_regex.match(ptown.gml)) def test03c_geojson(self): "Testing GeoJSON output from the database using GeoQuerySet.geojson()." # Only PostGIS 1.3.4+ supports GeoJSON. if not connection.ops.geojson: self.assertRaises(NotImplementedError, Country.objects.all().geojson, field_name='mpoly') return if connection.ops.spatial_version >= (1, 4, 0): pueblo_json = '{"type":"Point","coordinates":[-104.609252,38.255001]}' houston_json = '{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4326"}},"coordinates":[-95.363151,29.763374]}' victoria_json = '{"type":"Point","bbox":[-123.30519600,48.46261100,-123.30519600,48.46261100],"coordinates":[-123.305196,48.462611]}' chicago_json = '{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4326"}},"bbox":[-87.65018,41.85039,-87.65018,41.85039],"coordinates":[-87.65018,41.85039]}' else: pueblo_json = '{"type":"Point","coordinates":[-104.60925200,38.25500100]}' houston_json = '{"type":"Point","crs":{"type":"EPSG","properties":{"EPSG":4326}},"coordinates":[-95.36315100,29.76337400]}' victoria_json = '{"type":"Point","bbox":[-123.30519600,48.46261100,-123.30519600,48.46261100],"coordinates":[-123.30519600,48.46261100]}' chicago_json = '{"type":"Point","crs":{"type":"EPSG","properties":{"EPSG":4326}},"bbox":[-87.65018,41.85039,-87.65018,41.85039],"coordinates":[-87.65018,41.85039]}' # Precision argument should only be an integer self.assertRaises(TypeError, City.objects.geojson, precision='foo') # Reference queries and values. # SELECT ST_AsGeoJson("geoapp_city"."point", 8, 0) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Pueblo'; self.assertEqual(pueblo_json, City.objects.geojson().get(name='Pueblo').geojson) # 1.3.x: SELECT ST_AsGeoJson("geoapp_city"."point", 8, 1) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Houston'; # 1.4.x: SELECT ST_AsGeoJson("geoapp_city"."point", 8, 2) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Houston'; # This time we want to include the CRS by using the `crs` keyword. self.assertEqual(houston_json, City.objects.geojson(crs=True, model_att='json').get(name='Houston').json) # 1.3.x: SELECT ST_AsGeoJson("geoapp_city"."point", 8, 2) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Victoria'; # 1.4.x: SELECT ST_AsGeoJson("geoapp_city"."point", 8, 1) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Houston'; # This time we include the bounding box by using the `bbox` keyword. self.assertEqual(victoria_json, City.objects.geojson(bbox=True).get(name='Victoria').geojson) # 1.(3|4).x: SELECT ST_AsGeoJson("geoapp_city"."point", 5, 3) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Chicago'; # Finally, we set every available keyword. self.assertEqual(chicago_json, City.objects.geojson(bbox=True, crs=True, precision=5).get(name='Chicago').geojson) def test03d_svg(self): "Testing SVG output using GeoQuerySet.svg()." if mysql or oracle: self.assertRaises(NotImplementedError, City.objects.svg) return self.assertRaises(TypeError, City.objects.svg, precision='foo') # SELECT AsSVG(geoapp_city.point, 0, 8) FROM geoapp_city WHERE name = 'Pueblo'; svg1 = 'cx="-104.609252" cy="-38.255001"' # Even though relative, only one point so it's practically the same except for # the 'c' letter prefix on the x,y values. svg2 = svg1.replace('c', '') self.assertEqual(svg1, City.objects.svg().get(name='Pueblo').svg) self.assertEqual(svg2, City.objects.svg(relative=5).get(name='Pueblo').svg) @no_mysql def test04_transform(self): "Testing the transform() GeoManager method." # Pre-transformed points for Houston and Pueblo. htown = fromstr('POINT(1947516.83115183 6322297.06040572)', srid=3084) ptown = fromstr('POINT(992363.390841912 481455.395105533)', srid=2774) prec = 3 # Precision is low due to version variations in PROJ and GDAL. # Asserting the result of the transform operation with the values in # the pre-transformed points. Oracle does not have the 3084 SRID. if not oracle: h = City.objects.transform(htown.srid).get(name='Houston') self.assertEqual(3084, h.point.srid) self.assertAlmostEqual(htown.x, h.point.x, prec) self.assertAlmostEqual(htown.y, h.point.y, prec) p1 = City.objects.transform(ptown.srid, field_name='point').get(name='Pueblo') p2 = City.objects.transform(srid=ptown.srid).get(name='Pueblo') for p in [p1, p2]: self.assertEqual(2774, p.point.srid) self.assertAlmostEqual(ptown.x, p.point.x, prec) self.assertAlmostEqual(ptown.y, p.point.y, prec) @no_mysql @no_spatialite # SpatiaLite does not have an Extent function def test05_extent(self): "Testing the `extent` GeoQuerySet method." # Reference query: # `SELECT ST_extent(point) FROM geoapp_city WHERE (name='Houston' or name='Dallas');` # => BOX(-96.8016128540039 29.7633724212646,-95.3631439208984 32.7820587158203) expected = (-96.8016128540039, 29.7633724212646, -95.3631439208984, 32.782058715820) qs = City.objects.filter(name__in=('Houston', 'Dallas')) extent = qs.extent() for val, exp in zip(extent, expected): self.assertAlmostEqual(exp, val, 4) # Only PostGIS has support for the MakeLine aggregate. @no_mysql @no_oracle @no_spatialite def test06_make_line(self): "Testing the `make_line` GeoQuerySet method." # Ensuring that a `TypeError` is raised on models without PointFields. self.assertRaises(TypeError, State.objects.make_line) self.assertRaises(TypeError, Country.objects.make_line) # Reference query: # SELECT AsText(ST_MakeLine(geoapp_city.point)) FROM geoapp_city; ref_line = GEOSGeometry('LINESTRING(-95.363151 29.763374,-96.801611 32.782057,-97.521157 34.464642,174.783117 -41.315268,-104.609252 38.255001,-95.23506 38.971823,-87.650175 41.850385,-123.305196 48.462611)', srid=4326) self.assertEqual(ref_line, City.objects.make_line()) @no_mysql def test09_disjoint(self): "Testing the `disjoint` lookup type." ptown = City.objects.get(name='Pueblo') qs1 = City.objects.filter(point__disjoint=ptown.point) self.assertEqual(7, qs1.count()) qs2 = State.objects.filter(poly__disjoint=ptown.point) self.assertEqual(1, qs2.count()) self.assertEqual('Kansas', qs2[0].name) def test10_contains_contained(self): "Testing the 'contained', 'contains', and 'bbcontains' lookup types." # Getting Texas, yes we were a country -- once ;) texas = Country.objects.get(name='Texas') # Seeing what cities are in Texas, should get Houston and Dallas, # and Oklahoma City because 'contained' only checks on the # _bounding box_ of the Geometries. if not oracle: qs = City.objects.filter(point__contained=texas.mpoly) self.assertEqual(3, qs.count()) cities = ['Houston', 'Dallas', 'Oklahoma City'] for c in qs: self.assertEqual(True, c.name in cities) # Pulling out some cities. houston = City.objects.get(name='Houston') wellington = City.objects.get(name='Wellington') pueblo = City.objects.get(name='Pueblo') okcity = City.objects.get(name='Oklahoma City') lawrence = City.objects.get(name='Lawrence') # Now testing contains on the countries using the points for # Houston and Wellington. tx = Country.objects.get(mpoly__contains=houston.point) # Query w/GEOSGeometry nz = Country.objects.get(mpoly__contains=wellington.point.hex) # Query w/EWKBHEX self.assertEqual('Texas', tx.name) self.assertEqual('New Zealand', nz.name) # Spatialite 2.3 thinks that Lawrence is in Puerto Rico (a NULL geometry). if not spatialite: ks = State.objects.get(poly__contains=lawrence.point) self.assertEqual('Kansas', ks.name) # Pueblo and Oklahoma City (even though OK City is within the bounding box of Texas) # are not contained in Texas or New Zealand. self.assertEqual(0, len(Country.objects.filter(mpoly__contains=pueblo.point))) # Query w/GEOSGeometry object self.assertEqual((mysql and 1) or 0, len(Country.objects.filter(mpoly__contains=okcity.point.wkt))) # Qeury w/WKT # OK City is contained w/in bounding box of Texas. if not oracle: qs = Country.objects.filter(mpoly__bbcontains=okcity.point) self.assertEqual(1, len(qs)) self.assertEqual('Texas', qs[0].name) @no_mysql def test11_lookup_insert_transform(self): "Testing automatic transform for lookups and inserts." # San Antonio in 'WGS84' (SRID 4326) sa_4326 = 'POINT (-98.493183 29.424170)' wgs_pnt = fromstr(sa_4326, srid=4326) # Our reference point in WGS84 # Oracle doesn't have SRID 3084, using 41157. if oracle: # San Antonio in 'Texas 4205, Southern Zone (1983, meters)' (SRID 41157) # Used the following Oracle SQL to get this value: # SELECT SDO_UTIL.TO_WKTGEOMETRY(SDO_CS.TRANSFORM(SDO_GEOMETRY('POINT (-98.493183 29.424170)', 4326), 41157)) FROM DUAL; nad_wkt = 'POINT (300662.034646583 5416427.45974934)' nad_srid = 41157 else: # San Antonio in 'NAD83(HARN) / Texas Centric Lambert Conformal' (SRID 3084) nad_wkt = 'POINT (1645978.362408288754523 6276356.025927528738976)' # Used ogr.py in gdal 1.4.1 for this transform nad_srid = 3084 # Constructing & querying with a point from a different SRID. Oracle # `SDO_OVERLAPBDYINTERSECT` operates differently from # `ST_Intersects`, so contains is used instead. nad_pnt = fromstr(nad_wkt, srid=nad_srid) if oracle: tx = Country.objects.get(mpoly__contains=nad_pnt) else: tx = Country.objects.get(mpoly__intersects=nad_pnt) self.assertEqual('Texas', tx.name) # Creating San Antonio. Remember the Alamo. sa = City.objects.create(name='San Antonio', point=nad_pnt) # Now verifying that San Antonio was transformed correctly sa = City.objects.get(name='San Antonio') self.assertAlmostEqual(wgs_pnt.x, sa.point.x, 6) self.assertAlmostEqual(wgs_pnt.y, sa.point.y, 6) # If the GeometryField SRID is -1, then we shouldn't perform any # transformation if the SRID of the input geometry is different. # SpatiaLite does not support missing SRID values. if not spatialite: m1 = MinusOneSRID(geom=Point(17, 23, srid=4326)) m1.save() self.assertEqual(-1, m1.geom.srid) @no_mysql def test12_null_geometries(self): "Testing NULL geometry support, and the `isnull` lookup type." # Creating a state with a NULL boundary. State.objects.create(name='Puerto Rico') # Querying for both NULL and Non-NULL values. nullqs = State.objects.filter(poly__isnull=True) validqs = State.objects.filter(poly__isnull=False) # Puerto Rico should be NULL (it's a commonwealth unincorporated territory) self.assertEqual(1, len(nullqs)) self.assertEqual('Puerto Rico', nullqs[0].name) # The valid states should be Colorado & Kansas self.assertEqual(2, len(validqs)) state_names = [s.name for s in validqs] self.assertEqual(True, 'Colorado' in state_names) self.assertEqual(True, 'Kansas' in state_names) # Saving another commonwealth w/a NULL geometry. nmi = State.objects.create(name='Northern Mariana Islands', poly=None) self.assertEqual(nmi.poly, None) # Assigning a geomery and saving -- then UPDATE back to NULL. nmi.poly = 'POLYGON((0 0,1 0,1 1,1 0,0 0))' nmi.save() State.objects.filter(name='Northern Mariana Islands').update(poly=None) self.assertEqual(None, State.objects.get(name='Northern Mariana Islands').poly) # Only PostGIS has `left` and `right` lookup types. @no_mysql @no_oracle @no_spatialite def test13_left_right(self): "Testing the 'left' and 'right' lookup types." # Left: A << B => true if xmax(A) < xmin(B) # Right: A >> B => true if xmin(A) > xmax(B) # See: BOX2D_left() and BOX2D_right() in lwgeom_box2dfloat4.c in PostGIS source. # Getting the borders for Colorado & Kansas co_border = State.objects.get(name='Colorado').poly ks_border = State.objects.get(name='Kansas').poly # Note: Wellington has an 'X' value of 174, so it will not be considered # to the left of CO. # These cities should be strictly to the right of the CO border. cities = ['Houston', 'Dallas', 'Oklahoma City', 'Lawrence', 'Chicago', 'Wellington'] qs = City.objects.filter(point__right=co_border) self.assertEqual(6, len(qs)) for c in qs: self.assertEqual(True, c.name in cities) # These cities should be strictly to the right of the KS border. cities = ['Chicago', 'Wellington'] qs = City.objects.filter(point__right=ks_border) self.assertEqual(2, len(qs)) for c in qs: self.assertEqual(True, c.name in cities) # Note: Wellington has an 'X' value of 174, so it will not be considered # to the left of CO. vic = City.objects.get(point__left=co_border) self.assertEqual('Victoria', vic.name) cities = ['Pueblo', 'Victoria'] qs = City.objects.filter(point__left=ks_border) self.assertEqual(2, len(qs)) for c in qs: self.assertEqual(True, c.name in cities) def test14_equals(self): "Testing the 'same_as' and 'equals' lookup types." pnt = fromstr('POINT (-95.363151 29.763374)', srid=4326) c1 = City.objects.get(point=pnt) c2 = City.objects.get(point__same_as=pnt) c3 = City.objects.get(point__equals=pnt) for c in [c1, c2, c3]: self.assertEqual('Houston', c.name) @no_mysql def test15_relate(self): "Testing the 'relate' lookup type." # To make things more interesting, we will have our Texas reference point in # different SRIDs. pnt1 = fromstr('POINT (649287.0363174 4177429.4494686)', srid=2847) pnt2 = fromstr('POINT(-98.4919715741052 29.4333344025053)', srid=4326) # Not passing in a geometry as first param shoud # raise a type error when initializing the GeoQuerySet self.assertRaises(ValueError, Country.objects.filter, mpoly__relate=(23, 'foo')) # Making sure the right exception is raised for the given # bad arguments. for bad_args, e in [((pnt1, 0), ValueError), ((pnt2, 'T*T***FF*', 0), ValueError)]: qs = Country.objects.filter(mpoly__relate=bad_args) self.assertRaises(e, qs.count) # Relate works differently for the different backends. if postgis or spatialite: contains_mask = 'T*T***FF*' within_mask = 'T*F**F***' intersects_mask = 'T********' elif oracle: contains_mask = 'contains' within_mask = 'inside' # TODO: This is not quite the same as the PostGIS mask above intersects_mask = 'overlapbdyintersect' # Testing contains relation mask. self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt1, contains_mask)).name) self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, contains_mask)).name) # Testing within relation mask. ks = State.objects.get(name='Kansas') self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, within_mask)).name) # Testing intersection relation mask. if not oracle: self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt1, intersects_mask)).name) self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, intersects_mask)).name) self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, intersects_mask)).name) def test16_createnull(self): "Testing creating a model instance and the geometry being None" c = City() self.assertEqual(c.point, None) @no_mysql def test17_unionagg(self): "Testing the `unionagg` (aggregate union) GeoManager method." tx = Country.objects.get(name='Texas').mpoly # Houston, Dallas -- Oracle has different order. union1 = fromstr('MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)') union2 = fromstr('MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)') qs = City.objects.filter(point__within=tx) self.assertRaises(TypeError, qs.unionagg, 'name') # Using `field_name` keyword argument in one query and specifying an # order in the other (which should not be used because this is # an aggregate method on a spatial column) u1 = qs.unionagg(field_name='point') u2 = qs.order_by('name').unionagg() tol = 0.00001 if oracle: union = union2 else: union = union1 self.assertEqual(True, union.equals_exact(u1, tol)) self.assertEqual(True, union.equals_exact(u2, tol)) qs = City.objects.filter(name='NotACity') self.assertEqual(None, qs.unionagg(field_name='point')) @no_spatialite # SpatiaLite does not support abstract geometry columns def test18_geometryfield(self): "Testing the general GeometryField." Feature(name='Point', geom=Point(1, 1)).save() Feature(name='LineString', geom=LineString((0, 0), (1, 1), (5, 5))).save() Feature(name='Polygon', geom=Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0)))).save() Feature(name='GeometryCollection', geom=GeometryCollection(Point(2, 2), LineString((0, 0), (2, 2)), Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0))))).save() f_1 = Feature.objects.get(name='Point') self.assertEqual(True, isinstance(f_1.geom, Point)) self.assertEqual((1.0, 1.0), f_1.geom.tuple) f_2 = Feature.objects.get(name='LineString') self.assertEqual(True, isinstance(f_2.geom, LineString)) self.assertEqual(((0.0, 0.0), (1.0, 1.0), (5.0, 5.0)), f_2.geom.tuple) f_3 = Feature.objects.get(name='Polygon') self.assertEqual(True, isinstance(f_3.geom, Polygon)) f_4 = Feature.objects.get(name='GeometryCollection') self.assertEqual(True, isinstance(f_4.geom, GeometryCollection)) self.assertEqual(f_3.geom, f_4.geom[2]) @no_mysql def test19_centroid(self): "Testing the `centroid` GeoQuerySet method." qs = State.objects.exclude(poly__isnull=True).centroid() if oracle: tol = 0.1 elif spatialite: tol = 0.000001 else: tol = 0.000000001 for s in qs: self.assertEqual(True, s.poly.centroid.equals_exact(s.centroid, tol)) @no_mysql def test20_pointonsurface(self): "Testing the `point_on_surface` GeoQuerySet method." # Reference values. if oracle: # SELECT SDO_UTIL.TO_WKTGEOMETRY(SDO_GEOM.SDO_POINTONSURFACE(GEOAPP_COUNTRY.MPOLY, 0.05)) FROM GEOAPP_COUNTRY; ref = {'New Zealand' : fromstr('POINT (174.616364 -36.100861)', srid=4326), 'Texas' : fromstr('POINT (-103.002434 36.500397)', srid=4326), } elif postgis or spatialite: # Using GEOSGeometry to compute the reference point on surface values # -- since PostGIS also uses GEOS these should be the same. ref = {'New Zealand' : Country.objects.get(name='New Zealand').mpoly.point_on_surface, 'Texas' : Country.objects.get(name='Texas').mpoly.point_on_surface } for c in Country.objects.point_on_surface(): if spatialite: # XXX This seems to be a WKT-translation-related precision issue? tol = 0.00001 else: tol = 0.000000001 self.assertEqual(True, ref[c.name].equals_exact(c.point_on_surface, tol)) @no_mysql @no_oracle def test21_scale(self): "Testing the `scale` GeoQuerySet method." xfac, yfac = 2, 3 tol = 5 # XXX The low precision tolerance is for SpatiaLite qs = Country.objects.scale(xfac, yfac, model_att='scaled') for c in qs: for p1, p2 in zip(c.mpoly, c.scaled): for r1, r2 in zip(p1, p2): for c1, c2 in zip(r1.coords, r2.coords): self.assertAlmostEqual(c1[0] * xfac, c2[0], tol) self.assertAlmostEqual(c1[1] * yfac, c2[1], tol) @no_mysql @no_oracle def test22_translate(self): "Testing the `translate` GeoQuerySet method." xfac, yfac = 5, -23 qs = Country.objects.translate(xfac, yfac, model_att='translated') for c in qs: for p1, p2 in zip(c.mpoly, c.translated): for r1, r2 in zip(p1, p2): for c1, c2 in zip(r1.coords, r2.coords): # XXX The low precision is for SpatiaLite self.assertAlmostEqual(c1[0] + xfac, c2[0], 5) self.assertAlmostEqual(c1[1] + yfac, c2[1], 5) @no_mysql def test23_numgeom(self): "Testing the `num_geom` GeoQuerySet method." # Both 'countries' only have two geometries. for c in Country.objects.num_geom(): self.assertEqual(2, c.num_geom) for c in City.objects.filter(point__isnull=False).num_geom(): # Oracle will return 1 for the number of geometries on non-collections, # whereas PostGIS will return None. if postgis: self.assertEqual(None, c.num_geom) else: self.assertEqual(1, c.num_geom) @no_mysql @no_spatialite # SpatiaLite can only count vertices in LineStrings def test24_numpoints(self): "Testing the `num_points` GeoQuerySet method." for c in Country.objects.num_points(): self.assertEqual(c.mpoly.num_points, c.num_points) if not oracle: # Oracle cannot count vertices in Point geometries. for c in City.objects.num_points(): self.assertEqual(1, c.num_points) @no_mysql def test25_geoset(self): "Testing the `difference`, `intersection`, `sym_difference`, and `union` GeoQuerySet methods." geom = Point(5, 23) tol = 1 qs = Country.objects.all().difference(geom).sym_difference(geom).union(geom) # XXX For some reason SpatiaLite does something screwey with the Texas geometry here. Also, # XXX it doesn't like the null intersection. if spatialite: qs = qs.exclude(name='Texas') else: qs = qs.intersection(geom) for c in qs: if oracle: # Should be able to execute the queries; however, they won't be the same # as GEOS (because Oracle doesn't use GEOS internally like PostGIS or # SpatiaLite). pass else: self.assertEqual(c.mpoly.difference(geom), c.difference) if not spatialite: self.assertEqual(c.mpoly.intersection(geom), c.intersection) self.assertEqual(c.mpoly.sym_difference(geom), c.sym_difference) self.assertEqual(c.mpoly.union(geom), c.union) @no_mysql def test26_inherited_geofields(self): "Test GeoQuerySet methods on inherited Geometry fields." # Creating a Pennsylvanian city. mansfield = PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)') # All transformation SQL will need to be performed on the # _parent_ table. qs = PennsylvaniaCity.objects.transform(32128) self.assertEqual(1, qs.count()) for pc in qs: self.assertEqual(32128, pc.point.srid) @no_mysql @no_oracle @no_spatialite def test27_snap_to_grid(self): "Testing GeoQuerySet.snap_to_grid()." # Let's try and break snap_to_grid() with bad combinations of arguments. for bad_args in ((), range(3), range(5)): self.assertRaises(ValueError, Country.objects.snap_to_grid, *bad_args) for bad_args in (('1.0',), (1.0, None), tuple(map(unicode, range(4)))): self.assertRaises(TypeError, Country.objects.snap_to_grid, *bad_args) # Boundary for San Marino, courtesy of Bjorn Sandvik of thematicmapping.org # from the world borders dataset he provides. wkt = ('MULTIPOLYGON(((12.41580 43.95795,12.45055 43.97972,12.45389 43.98167,' '12.46250 43.98472,12.47167 43.98694,12.49278 43.98917,' '12.50555 43.98861,12.51000 43.98694,12.51028 43.98277,' '12.51167 43.94333,12.51056 43.93916,12.49639 43.92333,' '12.49500 43.91472,12.48778 43.90583,12.47444 43.89722,' '12.46472 43.89555,12.45917 43.89611,12.41639 43.90472,' '12.41222 43.90610,12.40782 43.91366,12.40389 43.92667,' '12.40500 43.94833,12.40889 43.95499,12.41580 43.95795)))') sm = Country.objects.create(name='San Marino', mpoly=fromstr(wkt)) # Because floating-point arithmitic isn't exact, we set a tolerance # to pass into GEOS `equals_exact`. tol = 0.000000001 # SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.1)) FROM "geoapp_country" WHERE "geoapp_country"."name" = 'San Marino'; ref = fromstr('MULTIPOLYGON(((12.4 44,12.5 44,12.5 43.9,12.4 43.9,12.4 44)))') self.failUnless(ref.equals_exact(Country.objects.snap_to_grid(0.1).get(name='San Marino').snap_to_grid, tol)) # SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.05, 0.23)) FROM "geoapp_country" WHERE "geoapp_country"."name" = 'San Marino'; ref = fromstr('MULTIPOLYGON(((12.4 43.93,12.45 43.93,12.5 43.93,12.45 43.93,12.4 43.93)))') self.failUnless(ref.equals_exact(Country.objects.snap_to_grid(0.05, 0.23).get(name='San Marino').snap_to_grid, tol)) # SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.5, 0.17, 0.05, 0.23)) FROM "geoapp_country" WHERE "geoapp_country"."name" = 'San Marino'; ref = fromstr('MULTIPOLYGON(((12.4 43.87,12.45 43.87,12.45 44.1,12.5 44.1,12.5 43.87,12.45 43.87,12.4 43.87)))') self.failUnless(ref.equals_exact(Country.objects.snap_to_grid(0.05, 0.23, 0.5, 0.17).get(name='San Marino').snap_to_grid, tol)) @no_mysql @no_spatialite def test28_reverse(self): "Testing GeoQuerySet.reverse_geom()." coords = [ (-95.363151, 29.763374), (-95.448601, 29.713803) ] Track.objects.create(name='Foo', line=LineString(coords)) t = Track.objects.reverse_geom().get(name='Foo') coords.reverse() self.assertEqual(tuple(coords), t.reverse_geom.coords) if oracle: self.assertRaises(TypeError, State.objects.reverse_geom) @no_mysql @no_oracle @no_spatialite def test29_force_rhr(self): "Testing GeoQuerySet.force_rhr()." rings = ( ( (0, 0), (5, 0), (0, 5), (0, 0) ), ( (1, 1), (1, 3), (3, 1), (1, 1) ), ) rhr_rings = ( ( (0, 0), (0, 5), (5, 0), (0, 0) ), ( (1, 1), (3, 1), (1, 3), (1, 1) ), ) State.objects.create(name='Foo', poly=Polygon(*rings)) s = State.objects.force_rhr().get(name='Foo') self.assertEqual(rhr_rings, s.force_rhr.coords) @no_mysql @no_oracle @no_spatialite def test29_force_rhr(self): "Testing GeoQuerySet.geohash()." if not connection.ops.geohash: return # Reference query: # SELECT ST_GeoHash(point) FROM geoapp_city WHERE name='Houston'; # SELECT ST_GeoHash(point, 5) FROM geoapp_city WHERE name='Houston'; ref_hash = '9vk1mfq8jx0c8e0386z6' h1 = City.objects.geohash().get(name='Houston') h2 = City.objects.geohash(precision=5).get(name='Houston') self.assertEqual(ref_hash, h1.geohash) self.assertEqual(ref_hash[:5], h2.geohash) from test_feeds import GeoFeedTest from test_regress import GeoRegressionTests from test_sitemaps import GeoSitemapTest def suite(): s = unittest.TestSuite() s.addTest(unittest.makeSuite(GeoModelTest)) s.addTest(unittest.makeSuite(GeoFeedTest)) s.addTest(unittest.makeSuite(GeoSitemapTest)) s.addTest(unittest.makeSuite(GeoRegressionTests)) return s
Addepar/buck
refs/heads/master
python-dsl/buck_parser/deterministic_set.py
5
# Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class DeterministicSet(set): """ Set-like data structure with deterministic iteration order. In addition to set operations it also adds ability to use '+' operator for joining two depsets and 'to_list' for convenient conversion to list. """ def __init__(self, elements=None): set.__init__(self, elements or []) def __iter__(self): # make the order deterministic by sorting the underlying set. # Technically there are more efficient ways to implement this, but # this one is the easiest one :) for element in sorted(set.__iter__(self)): yield element def to_list(self): """Converts this depset into a deterministically ordered list.""" return sorted(self) def __add__(self, other): """Joins two depsets into a single one.""" return self.union(other)
grehx/spark-tk
refs/heads/master
regression-tests/sparktkregtests/testcases/frames/lda_groupby_flow_test.py
1
# vim: set encoding=utf-8 # Copyright (c) 2016 Intel Corporation  # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # #       http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Sample LDA/Groupby example""" import unittest from sparktkregtests.lib import sparktk_test import numpy class LDAExample(sparktk_test.SparkTKTestCase): def test_lda_example(self): """LDA demo from examples directory""" # this is a full worked example of lda and groupby # with known correct values data = [['nytimes', 'harry', 3], ['nytimes', 'economy', 35], ['nytimes', 'jobs', 40], ['nytimes', 'magic', 1], ['nytimes', 'realestate', 15], ['nytimes', 'movies', 6], ['economist', 'economy', 50], ['economist', 'jobs', 35], ['economist', 'realestate', 20], ['economist', 'movies', 1], ['economist', 'harry', 1], ['economist', 'magic', 1], ['harrypotter', 'harry', 40], ['harrypotter', 'magic', 30], ['harrypotter', 'chamber', 20], ['harrypotter', 'secrets', 30]] frame = self.context.frame.create( data, schema=[('doc_id', str), ('word_id', str), ('word_count', long)]) model = self.context.models.clustering.lda.train( frame, "doc_id", "word_id", "word_count", max_iterations=3, num_topics=2) doc_results = model.topics_given_doc_frame word_results = model.word_given_topics_frame doc_results.rename_columns({'topic_probabilities': 'lda_results_doc'}) word_results.rename_columns( {'topic_probabilities': 'lda_results_word'}) frame = frame.join_left( doc_results, left_on="doc_id", right_on="doc_id") frame = frame.join_left( word_results, left_on="word_id", right_on="word_id") # similar to calling predict on a model frame.dot_product( ['lda_results_doc'], ['lda_results_word'], 'lda_score') word_hist = frame.histogram('word_count', 4) lda_hist = frame.histogram('lda_score', 2) group_frame = frame.group_by( 'word_id_L', {'word_count': self.context.agg.histogram( cutoffs=word_hist.cutoffs, include_lowest=True, strict_binning=False), 'lda_score': self.context.agg.histogram(lda_hist.cutoffs)}) pandas = group_frame.to_pandas() for (index, row) in pandas.iterrows(): if str(row["word_id_L"]) == "magic": numpy.testing.assert_equal( list(row["word_count_HISTOGRAM"]), [float(2.0/3.0), 0, float(1.0/3.0), 0]) if __name__ == "__main__": unittest.main()
Raynxxx/CUIT-ACM-Website
refs/heads/master
view/__init__.py
1
__author__ = 'zhuzhiying' from flask import blueprints from flask import Flask, render_template, flash, request, redirect, url_for, abort, send_from_directory from flask.ext.login import LoginManager, login_user, login_required, logout_user, current_user from flask.ext.mail import Mail, Message from flask.ext.excel import make_response_from_array import pyexcel_xls from config import * mail = Mail()
shadowsocks/ChinaDNS
refs/heads/master
tests/test.py
165
#!/usr/bin/python # -*- coding: utf-8 -*- import sys import os import signal import time import argparse from subprocess import Popen parser = argparse.ArgumentParser(description='test ChinaDNS') parser.add_argument('-a', '--arguments', type=str, default=[]) parser.add_argument('-t', '--test-command', type=str, default=None) config = parser.parse_args() arguments = config.arguments chinadns = ['src/chinadns', '-p', '15353', '-v'] + arguments.split() print chinadns p1 = Popen(chinadns, shell=False, bufsize=0, close_fds=True) try: with open(config.test_command) as f: dig_cmd = f.read() time.sleep(1) p2 = Popen(dig_cmd.split() + ['-p', '15353'], shell=False, bufsize=0, close_fds=True) if p2 is not None: r = p2.wait() if r == 0: print 'test passed' sys.exit(r) finally: for p in [p1]: try: os.kill(p.pid, signal.SIGTERM) os.waitpid(p.pid, 0) except OSError: pass
mdaniel/intellij-community
refs/heads/master
python/helpers/py2only/docutils/parsers/rst/languages/gl.py
130
# -*- coding: utf-8 -*- # Author: David Goodger # Contact: goodger@users.sourceforge.net # Revision: $Revision: 4229 $ # Date: $Date: 2005-12-23 00:46:16 +0100 (Fri, 23 Dec 2005) $ # Copyright: This module has been placed in the public domain. # New language mappings are welcome. Before doing a new translation, please # read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be # translated for each language: one in docutils/languages, the other in # docutils/parsers/rst/languages. """ Galician-language mappings for language-dependent features of reStructuredText. """ __docformat__ = 'reStructuredText' directives = { # language-dependent: fixed u'atenci\u00f3n': 'attention', u'advertencia': 'caution', u'code (translation required)': 'code', u'perigo': 'danger', u'erro': 'error', u'pista': 'hint', u'importante': 'important', u'nota': 'note', u'consello': 'tip', u'aviso': 'warning', u'admonici\u00f3n': 'admonition', u'barra lateral': 'sidebar', u't\u00f3pico': 'topic', u'bloque-li\u00f1a': 'line-block', u'literal-analizado': 'parsed-literal', u'r\u00fabrica': 'rubric', u'ep\u00edgrafe': 'epigraph', u'realzados': 'highlights', u'coller-citaci\u00f3n': 'pull-quote', u'compor': 'compound', u'recipiente': 'container', #'questions': 'questions', u't\u00e1boa': 'table', u't\u00e1boa-csv': 'csv-table', u't\u00e1boa-listaxe': 'list-table', #'qa': 'questions', #'faq': 'questions', u'meta': 'meta', 'math (translation required)': 'math', #'imagemap': 'imagemap', u'imaxe': 'image', u'figura': 'figure', u'inclu\u00edr': 'include', u'cru': 'raw', u'substitu\u00edr': 'replace', u'unicode': 'unicode', u'data': 'date', u'clase': 'class', u'regra': 'role', u'regra-predeterminada': 'default-role', u't\u00edtulo': 'title', u'contido': 'contents', u'seccnum': 'sectnum', u'secci\u00f3n-numerar': 'sectnum', u'cabeceira': 'header', u'p\u00e9 de p\u00e1xina': 'footer', #'footnotes': 'footnotes', #'citations': 'citations', u'notas-destino': 'target-notes', u'texto restruturado-proba-directiva': 'restructuredtext-test-directive'} """Galician name to registered (in directives/__init__.py) directive name mapping.""" roles = { # language-dependent: fixed u'abreviatura': 'abbreviation', u'ab': 'abbreviation', u'acr\u00f3nimo': 'acronym', u'ac': 'acronym', u'code (translation required)': 'code', u'\u00edndice': 'index', u'i': 'index', u'sub\u00edndice': 'subscript', u'sub': 'subscript', u'super\u00edndice': 'superscript', u'sup': 'superscript', u'referencia t\u00edtulo': 'title-reference', u't\u00edtulo': 'title-reference', u't': 'title-reference', u'referencia-pep': 'pep-reference', u'pep': 'pep-reference', u'referencia-rfc': 'rfc-reference', u'rfc': 'rfc-reference', u'\u00e9nfase': 'emphasis', u'forte': 'strong', u'literal': 'literal', 'math (translation required)': 'math', u'referencia-nome': 'named-reference', u'referencia-an\u00f3nimo': 'anonymous-reference', u'referencia-nota ao p\u00e9': 'footnote-reference', u'referencia-citaci\u00f3n': 'citation-reference', u'referencia-substituci\u00f3n': 'substitution-reference', u'destino': 'target', u'referencia-uri': 'uri-reference', u'uri': 'uri-reference', u'url': 'uri-reference', u'cru': 'raw',} """Mapping of Galician role names to canonical role names for interpreted text. """
pong3489/TEST_Mission
refs/heads/master
Lib/UserDict.py
83
"""A more or less complete user-defined wrapper around dictionary objects.""" class UserDict: def __init__(self, dict=None, **kwargs): self.data = {} if dict is not None: self.update(dict) if len(kwargs): self.update(kwargs) def __repr__(self): return repr(self.data) def __cmp__(self, dict): if isinstance(dict, UserDict): return cmp(self.data, dict.data) else: return cmp(self.data, dict) __hash__ = None # Avoid Py3k warning def __len__(self): return len(self.data) def __getitem__(self, key): if key in self.data: return self.data[key] if hasattr(self.__class__, "__missing__"): return self.__class__.__missing__(self, key) raise KeyError(key) def __setitem__(self, key, item): self.data[key] = item def __delitem__(self, key): del self.data[key] def clear(self): self.data.clear() def copy(self): if self.__class__ is UserDict: return UserDict(self.data.copy()) import copy data = self.data try: self.data = {} c = copy.copy(self) finally: self.data = data c.update(self) return c def keys(self): return self.data.keys() def items(self): return self.data.items() def iteritems(self): return self.data.iteritems() def iterkeys(self): return self.data.iterkeys() def itervalues(self): return self.data.itervalues() def values(self): return self.data.values() def has_key(self, key): return key in self.data def update(self, dict=None, **kwargs): if dict is None: pass elif isinstance(dict, UserDict): self.data.update(dict.data) elif isinstance(dict, type({})) or not hasattr(dict, 'items'): self.data.update(dict) else: for k, v in dict.items(): self[k] = v if len(kwargs): self.data.update(kwargs) def get(self, key, failobj=None): if key not in self: return failobj return self[key] def setdefault(self, key, failobj=None): if key not in self: self[key] = failobj return self[key] def pop(self, key, *args): return self.data.pop(key, *args) def popitem(self): return self.data.popitem() def __contains__(self, key): return key in self.data @classmethod def fromkeys(cls, iterable, value=None): d = cls() for key in iterable: d[key] = value return d class IterableUserDict(UserDict): def __iter__(self): return iter(self.data) import _abcoll _abcoll.MutableMapping.register(IterableUserDict) class DictMixin: # Mixin defining all dictionary methods for classes that already have # a minimum dictionary interface including getitem, setitem, delitem, # and keys. Without knowledge of the subclass constructor, the mixin # does not define __init__() or copy(). In addition to the four base # methods, progressively more efficiency comes with defining # __contains__(), __iter__(), and iteritems(). # second level definitions support higher levels def __iter__(self): for k in self.keys(): yield k def has_key(self, key): try: self[key] except KeyError: return False return True def __contains__(self, key): return self.has_key(key) # third level takes advantage of second level definitions def iteritems(self): for k in self: yield (k, self[k]) def iterkeys(self): return self.__iter__() # fourth level uses definitions from lower levels def itervalues(self): for _, v in self.iteritems(): yield v def values(self): return [v for _, v in self.iteritems()] def items(self): return list(self.iteritems()) def clear(self): for key in self.keys(): del self[key] def setdefault(self, key, default=None): try: return self[key] except KeyError: self[key] = default return default def pop(self, key, *args): if len(args) > 1: raise TypeError, "pop expected at most 2 arguments, got "\ + repr(1 + len(args)) try: value = self[key] except KeyError: if args: return args[0] raise del self[key] return value def popitem(self): try: k, v = self.iteritems().next() except StopIteration: raise KeyError, 'container is empty' del self[k] return (k, v) def update(self, other=None, **kwargs): # Make progressively weaker assumptions about "other" if other is None: pass elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups for k, v in other.iteritems(): self[k] = v elif hasattr(other, 'keys'): for k in other.keys(): self[k] = other[k] else: for k, v in other: self[k] = v if kwargs: self.update(kwargs) def get(self, key, default=None): try: return self[key] except KeyError: return default def __repr__(self): return repr(dict(self.iteritems())) def __cmp__(self, other): if other is None: return 1 if isinstance(other, DictMixin): other = dict(other.iteritems()) return cmp(dict(self.iteritems()), other) def __len__(self): return len(self.keys())
akatsoulas/mozillians
refs/heads/master
mozillians/users/migrations/0030_auto_20180404_0405.py
3
# -*- coding: utf-8 -*- # Generated by Django 1.11.11 on 2018-04-04 11:05 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0029_remove_userprofile_privacy_idp_profile'), ] operations = [ migrations.AlterField( model_name='idpprofile', name='type', field=models.IntegerField(choices=[(0, b'Unknown Provider'), (10, b'Passwordless Provider'), (20, b'Google Provider'), (30, b'Github Provider'), (31, b'Firefox Accounts Provider'), (40, b'LDAP Provider')], default=None, null=True), ), ]
abergeron/pylearn2
refs/heads/master
pylearn2/sandbox/lisa_rl/bandit/gaussian_bandit.py
49
__author__ = "Ian Goodfellow" import numpy as np from theano import config from theano import function from theano import tensor as T from pylearn2.sandbox.lisa_rl.bandit.environment import Environment from pylearn2.utils import sharedX from pylearn2.utils.rng import make_np_rng, make_theano_rng class GaussianBandit(Environment): """ An n-armed bandit whose rewards are drawn from a different Gaussian distribution for each arm. The mean and standard deviation of the reward for each arm is drawn at initialization time from N(0, <corresponding std arg>). (For the standard deviation we use the absolute value of the Gaussian sample) .. todo:: WRITEME : parameter list """ def __init__(self, num_arms, mean_std = 1.0, std_std = 1.0): self.rng = make_np_rng(None, [2013, 11, 12], which_method="randn") self.means = sharedX(self.rng.randn(num_arms) * mean_std) self.stds = sharedX(np.abs(self.rng.randn(num_arms) * std_std)) self.theano_rng = make_theano_rng(None, self.rng.randint(2 ** 16), which_method="normal") def get_action_func(self): """ Returns a theano function that takes an action and returns a reward. """ action = T.iscalar() reward_mean = self.means[action] reward_std = self.stds[action] reward = self.theano_rng.normal(avg=reward_mean, std=reward_std, dtype=config.floatX, size=reward_mean.shape) rval = function([action], reward) return rval
mahak/keystone
refs/heads/master
keystone/tests/unit/tests/test_utils.py
26
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from testtools import matchers from testtools import testcase from keystone.tests.unit import utils class TestWipDecorator(testcase.TestCase): def test_raises_SkipError_when_broken_test_fails(self): @utils.wip('waiting on bug #000000') def test(): raise Exception('i expected a failure - this is a WIP') e = self.assertRaises(testcase.TestSkipped, test) self.assertThat(str(e), matchers.Contains('#000000')) def test_raises_AssertionError_when_test_passes(self): @utils.wip('waiting on bug #000000') def test(): pass # literally e = self.assertRaises(AssertionError, test) self.assertThat(str(e), matchers.Contains('#000000'))
sergiopasra/numina
refs/heads/master
numina/tests/test_datamodel.py
3
import datetime import astropy.io.fits as fits import numpy import numina.types.qc as qc from ..datamodel import DataModel def create_test_data(): return numpy.ones((10,10), dtype='int32') def create_test_image(hdr=None): data = create_test_data() hdu = fits.PrimaryHDU(data) hdr = {} if hdr is None else hdr for key, val in hdr.items(): hdu.header[key] = val img = fits.HDUList([hdu]) return img def test_datamodel1(): datamodel = DataModel() assert datamodel.name == 'UNKNOWN' datamodel = DataModel('CLODIA') assert datamodel.name == 'CLODIA' def test_datamodel2(): img = create_test_image() testdata = create_test_data() datamodel = DataModel('CLODIA') data = datamodel.get_data(img) assert numpy.allclose(data, testdata) def test_qc(): img = create_test_image() datamodel = DataModel('CLODIA') qcontrol = datamodel.get_quality_control(img) assert qcontrol == qc.QC.UNKNOWN def test_imgid(): CHECKSUM = 'RfAdUd2cRd9cRd9c' hdr = {'CHECKSUM': CHECKSUM} img = create_test_image(hdr) datamodel = DataModel('CLODIA') imgid_chsum = datamodel.get_imgid(img) assert imgid_chsum == CHECKSUM def test_ginfo(): CHECKSUM = 'RfAdUd2cRd9cRd9c' uuid_str = 'b2f3d815-6f59-48e3-bea1-4d1ea1a3abc1' hdr = { 'CHECKSUM': CHECKSUM, 'instrume': 'CLODIA', 'object': '', 'obsmode': 'TEST', 'numtype': 'test_img', 'exptime': 560, 'darktime': 573, 'uuid': uuid_str, 'DATE-OBS': '1975-03-31T12:23:45.00', 'blckuuid': 1, 'insconf': 'v1' } date_obs = datetime.datetime(1975, 3, 31, 12, 23, 45) ref = { 'instrument': 'CLODIA', 'object': '', 'n_ext': 1, 'name_ext': ['PRIMARY'], 'quality_control': qc.QC.UNKNOWN, 'mode': 'TEST', 'type': 'test_img', 'exptime': 560, 'darktime': 573, 'uuid': uuid_str, 'observation_date': date_obs, 'blckuuid': '1', 'block_uuid': 1, 'imgid': uuid_str, 'insconf': 'v1', 'insconf_uuid': 'v1' } img = create_test_image(hdr) datamodel = DataModel('CLODIA') imgid_chsum = datamodel.gather_info_hdu(img) print(imgid_chsum) #assert False assert imgid_chsum == ref
campbe13/openhatch
refs/heads/master
vendor/packages/twisted/doc/historic/2003/pycon/deferex/deferex-listing1.py
20
def prettyRequest(server, requestName): return server.makeRequest(requestName ).addCallback( lambda result: ', '.join(result.asList()) ).addErrback( lambda failure: failure.printTraceback())
sliz1/servo
refs/heads/master
tests/wpt/web-platform-tests/tools/html5lib/html5lib/tests/test_sanitizer.py
430
from __future__ import absolute_import, division, unicode_literals try: import json except ImportError: import simplejson as json from html5lib import html5parser, sanitizer, constants, treebuilders def toxmlFactory(): tree = treebuilders.getTreeBuilder("etree") def toxml(element): # encode/decode roundtrip required for Python 2.6 compatibility result_bytes = tree.implementation.tostring(element, encoding="utf-8") return result_bytes.decode("utf-8") return toxml def runSanitizerTest(name, expected, input, toxml=None): if toxml is None: toxml = toxmlFactory() expected = ''.join([toxml(token) for token in html5parser.HTMLParser(). parseFragment(expected)]) expected = json.loads(json.dumps(expected)) assert expected == sanitize_html(input) def sanitize_html(stream, toxml=None): if toxml is None: toxml = toxmlFactory() return ''.join([toxml(token) for token in html5parser.HTMLParser(tokenizer=sanitizer.HTMLSanitizer). parseFragment(stream)]) def test_should_handle_astral_plane_characters(): assert '<html:p xmlns:html="http://www.w3.org/1999/xhtml">\U0001d4b5 \U0001d538</html:p>' == sanitize_html("<p>&#x1d4b5; &#x1d538;</p>") def test_sanitizer(): toxml = toxmlFactory() for tag_name in sanitizer.HTMLSanitizer.allowed_elements: if tag_name in ['caption', 'col', 'colgroup', 'optgroup', 'option', 'table', 'tbody', 'td', 'tfoot', 'th', 'thead', 'tr']: continue # TODO if tag_name != tag_name.lower(): continue # TODO if tag_name == 'image': yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name, "<img title=\"1\"/>foo &lt;bad&gt;bar&lt;/bad&gt; baz", "<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name), toxml) elif tag_name == 'br': yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name, "<br title=\"1\"/>foo &lt;bad&gt;bar&lt;/bad&gt; baz<br/>", "<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name), toxml) elif tag_name in constants.voidElements: yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name, "<%s title=\"1\"/>foo &lt;bad&gt;bar&lt;/bad&gt; baz" % tag_name, "<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name), toxml) else: yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name, "<%s title=\"1\">foo &lt;bad&gt;bar&lt;/bad&gt; baz</%s>" % (tag_name, tag_name), "<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name), toxml) for tag_name in sanitizer.HTMLSanitizer.allowed_elements: tag_name = tag_name.upper() yield (runSanitizerTest, "test_should_forbid_%s_tag" % tag_name, "&lt;%s title=\"1\"&gt;foo &lt;bad&gt;bar&lt;/bad&gt; baz&lt;/%s&gt;" % (tag_name, tag_name), "<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name), toxml) for attribute_name in sanitizer.HTMLSanitizer.allowed_attributes: if attribute_name != attribute_name.lower(): continue # TODO if attribute_name == 'style': continue yield (runSanitizerTest, "test_should_allow_%s_attribute" % attribute_name, "<p %s=\"foo\">foo &lt;bad&gt;bar&lt;/bad&gt; baz</p>" % attribute_name, "<p %s='foo'>foo <bad>bar</bad> baz</p>" % attribute_name, toxml) for attribute_name in sanitizer.HTMLSanitizer.allowed_attributes: attribute_name = attribute_name.upper() yield (runSanitizerTest, "test_should_forbid_%s_attribute" % attribute_name, "<p>foo &lt;bad&gt;bar&lt;/bad&gt; baz</p>", "<p %s='display: none;'>foo <bad>bar</bad> baz</p>" % attribute_name, toxml) for protocol in sanitizer.HTMLSanitizer.allowed_protocols: yield (runSanitizerTest, "test_should_allow_%s_uris" % protocol, "<a href=\"%s\">foo</a>" % protocol, """<a href="%s">foo</a>""" % protocol, toxml) for protocol in sanitizer.HTMLSanitizer.allowed_protocols: yield (runSanitizerTest, "test_should_allow_uppercase_%s_uris" % protocol, "<a href=\"%s\">foo</a>" % protocol, """<a href="%s">foo</a>""" % protocol, toxml)
petabricks/petabricks
refs/heads/master
scripts/learningcompiler.py
1
#!/usr/bin/env python """This script compiles multiple instances of a program trying out different heuristics, and storing in the database the best one that is found""" import sys import os import shutil import sqlite3 import random import xml.dom.minidom import re import pbutil import tunerwarnings import maximaparser from candidatetester import Candidate from xml.sax.saxutils import escape from sgatuner import autotune from tunerconfig import config #--------- Config ------------------ conf_deleteTempDir = True conf_minTrialNumber = 10 conf_probabilityExploration = 0.7 conf_pickBestN = 3 #--------- Autotuner config -------- config.max_time=30 #Seconds #----------------------------------- class HeuristicDB: def __init__(self): #Open DB try: self.__db = sqlite3.connect(self.computeDBPath()) except: self.__db = sqlite3.connect(":memory:") self.__createTables() self.__bestNCache= dict() def __createTable(self, name, params): cur = self.__db.cursor() query = "CREATE TABLE IF NOT EXISTS '"+name+"' "+params cur.execute(query) cur.close() self.__db.commit() def __createTables(self): self.__createTable("HeuristicKind", "('ID' INTEGER PRIMARY KEY AUTOINCREMENT, " "'name' TEXT UNIQUE)") self.__createTable("Heuristic", "('kindID' INTEGER, 'formula' TEXT, " "'useCount' INTEGER, 'score' FLOAT," "PRIMARY KEY (kindID, formula), " "FOREIGN KEY ('kindID') REFERENCES 'HeuristicKind' ('ID')" "ON DELETE CASCADE ON UPDATE CASCADE)") #TODO:self.__createTable("InSet", "('setID' INTEGER, 'heuristicID' INTEGER)" def computeDBPath(self): #TODO: make the path more flexible dbPath= os.path.expanduser(config.output_dir+"/knowledge.db") return dbPath def getHeuristicKindID(self, kindName): cur = self.__db.cursor() query = "SELECT ID From HeuristicKind WHERE name='"+kindName+"'" cur.execute(query) kindID = cur.fetchone()[0] cur.close() return kindID def storeHeuristicKind(self, kindName): cur = self.__db.cursor() query = "INSERT OR IGNORE INTO HeuristicKind ('name') VALUES ('"+kindName+"')" cur.execute(query) cur.close() self.__db.commit() return self.getHeuristicKindID(kindName) def increaseHeuristicScore(self, name, formula, score): kindID=self.storeHeuristicKind(name) cur = self.__db.cursor() query = "UPDATE Heuristic SET score=score+? WHERE kindID=? AND formula=?" cur.execute(query, (score, kindID, formula)) if cur.rowcount == 0: #There was no such heuristic in the DB: probably it was taken from the defaults query = "INSERT INTO Heuristic (kindID, formula, useCount, score) VALUES (?, ?, 1, ?)" cur.execute(query, (kindID, formula, score)) cur.close() self.__db.commit() def increaseHeuristicUseCount(self, name, formula): kindID=self.storeHeuristicKind(name) cur = self.__db.cursor() query = "UPDATE Heuristic SET useCount=useCount+1 WHERE kindID=? AND formula=?" cur.execute(query, (kindID, formula)) if cur.rowcount == 0: #There was no such heuristic in the DB: let's add it query = "INSERT INTO Heuristic (kindID, formula, useCount, score) VALUES (?, ?, 1, 0)" cur.execute(query, (kindID, formula)) cur.close() self.__db.commit() def increaseScore(self, hSet, score): """Mark a set of heuristics as selected as the best one for an executable""" #TODO: also store it as a set for name, formula in hSet.iteritems(): self.increaseHeuristicScore(name, formula, score) def markAsUsed(self, hSet): """Mark a set of heuristics as used for generating a candidate executable""" #TODO: also store it as a set for name, formula in hSet.iteritems(): self.increaseHeuristicUseCount(name, formula) def getBestNHeuristics(self, name, N): try: cached = self.__bestNCache[name] return cached except: #Not in the cache #Fall back to accessing the db pass cur = self.__db.cursor() query = "SELECT formula FROM Heuristic JOIN HeuristicKind ON Heuristic.kindID=HeuristicKind.ID WHERE HeuristicKind.name=? ORDER BY Heuristic.score/Heuristic.useCount DESC LIMIT ?" cur.execute(query, (name, N)) result = [row[0] for row in cur.fetchall()] cur.close() self.__bestNCache[name]=result return result class HeuristicSet(dict): def toXmlStrings(self): return ["<heuristic name=\""+name+"\" formula=\""+escape(self[name])+"\" />" for name in self] def toXmlFile(self, filename): outfile = open(filename, "w") outfile.write("<heuristics>\n") for xmlstring in self.toXmlStrings(): outfile.write("\t") outfile.write(xmlstring) outfile.write("\n") outfile.write("</heuristics>\n") outfile.close() def importFromXml(self, xmlDOM): for heuristicXML in xmlDOM.getElementsByTagName("heuristic"): name=heuristicXML.getAttribute("name") formula=heuristicXML.getAttribute("formula") self[name] = formula def complete(self, heuristicNames, db, N): """Complete the sets using the given db, so that it contains all the heuristics specified in the heuristicNames list. Every missing heuristic is completed with one randomly taken from the best N heuristics in the database """ #Find the missing heuristics missingHeuristics = list(heuristicNames) for name in self: try: missingHeuristics.remove(name) except ValueError: #A heuristic could be in the input file, but useless, therefore not in #the missingHeuristic list pass #Complete the set for heuristic in missingHeuristics: bestN=db.getBestNHeuristics(heuristic, N) if len(bestN) == 0: #No such heuristic in the DB. Do not complete the set #This is not a problem. It's probably a new heuristic: #just ignore it and it will fall back on the default implemented #into the compiler continue formula=random.choice(bestN) if random.random() < conf_probabilityExploration: #Generete a new formula by modifying the existing one formulaObj = maximaparser.parse(formula) formulaObj.evolveValue() formula = str(formulaObj) self[heuristic] = formula class HeuristicManager: """Manages sets of heuristics stored in a file with the following format: <heuristics> <set> <heuristic name="heuristicName" formula="a+b+c" /> <heuristic name="heuristicName2" formula="a+b+d" /> </set> <set> <heuristic name="heuristicName3" formula="x+y*z" /> <heuristic name="heuristicName4" formula="a+g+s" /> </set> </heuristics> """ def __init__(self, heuristicSetFileName=None): self.__heuristicSets = [] if heuristicSetFileName is not None: self.__xml = xml.dom.minidom.parse(heuristicSetFileName) # Extract information for hSet in self.__xml.getElementsByTagName("set"): self.__heuristicSets.append(self.__parseHeuristicSet(hSet)) def __parseHeuristicSet(self, hSetXML): """Parses a xml heuristic set returning it as a list of pairs name-formula""" hSet = HeuristicSet() hSet.importFromXml(hSetXML) return hSet def heuristicSet(self, i): """Get the i-th heuristic set""" return self.__heuristicSets[i] def allHeuristicSets(self): return self.__heuristicSets def candidateKey(candidate): """Generates a comparison key for a candidate. Candidates are sorted by the number of dimensions (the highest, the better), then by average execution time of the biggest dimension (the lower the better)""" if candidate is None: return (float('inf'), float('inf')) numDimensions = len(candidate.metrics[0]) executionTime = candidate.metrics[0][2**(numDimensions-1)].mean() return (1/numDimensions, executionTime) class CandidateList(list): def addOriginalIndex(self): count = 0 for candidate in self: if candidate is None: continue candidate.originalIndex = count; count = count + 1 def sortBySpeed(self): """Adds the "score" and "originalIndex" attributes to every candidate. Also, sorts the list by score""" self.sort(key=candidateKey) class LearningCompiler: def __init__(self, pbcExe, heuristicSetFileName=None, jobs=None): self.__heuristicManager = HeuristicManager(heuristicSetFileName) self.__minTrialNumber = conf_minTrialNumber self.__pbcExe = pbcExe self.__jobs=jobs self.__db = HeuristicDB() random.seed() def storeCandidatesDataInDB(self, candidates, basesubdir, basename): """Store data from all the info file, with score. The candidates should already be ordered (from the best to the worst) and with the originalIndex field added""" numCandidates = len(candidates) count=0 for candidate in candidates: infoFile=os.path.join(basesubdir, str(candidate.originalIndex), basename+".info") score = (numCandidates - count) / float(numCandidates) #Take the data about the used heuristics scores and store it into the DB infoxml = xml.dom.minidom.parse(infoFile) hSet = HeuristicSet() hSet.importFromXml(infoxml) self.__db.increaseScore(hSet, score) self.__db.markAsUsed(hSet) count = count +1 def compileLearningHeuristics(self, benchmark, finalBinary=None): #Define file names path, basenameExt = os.path.split(benchmark) if path == "": path="./" basename, ext = os.path.splitext(basenameExt) basesubdir=os.path.join(path,basename+".tmp") #Init variables candidates=CandidateList() #Compile with current best heuristics outDir = os.path.join(basesubdir, "0") if not os.path.isdir(outDir): #Create the output directory os.makedirs(outDir) binary= os.path.join(outDir, basename) status=pbutil.compileBenchmark(self.__pbcExe, benchmark, binary=binary, jobs=self.__jobs) if status != 0: return status try: autotune(binary, candidates) except tunerwarnings.AlwaysCrashes: print "Current best Candidate always crashes!" #Add an empty entry for the candidate candidates.append(None) #Get the full set of heuristics used infoFile=binary+".info" currentBestHSet = HeuristicSet() currentBestHSet.importFromXml(xml.dom.minidom.parse(infoFile)) neededHeuristics = currentBestHSet.keys() #Get hSets allHSets = self.__heuristicManager.allHeuristicSets() while len(allHSets) < (self.__minTrialNumber): #Not enough hSets! allHSets.append(HeuristicSet()) numSets = len(allHSets) count=1 for hSet in allHSets: hSet.complete(neededHeuristics, self.__db, conf_pickBestN) #Define more file names outDir = os.path.join(basesubdir, str(count)) if not os.path.isdir(outDir): #Create the output directory os.makedirs(outDir) binary= os.path.join(outDir, basename) heuristicsFile= os.path.join(outDir, "heuristics.txt") hSet.toXmlFile(heuristicsFile) status = pbutil.compileBenchmark(self.__pbcExe, benchmark, binary=binary, heuristics=heuristicsFile, jobs=self.__jobs) if status != 0: print "Compile FAILED" print "while using heuristics: " print hSet return status #Autotune try: autotune(binary, candidates) except tunerwarnings.AlwaysCrashes: print "Candidate "+str(count)+" always crashes!" #Add an empty entry for the candidate candidates.append(None) count = count + 1 candidates.addOriginalIndex() candidates.sortBySpeed() if candidates[0] is None: raise tunerwarnings.AlwaysCrashes() self.storeCandidatesDataInDB(candidates, basesubdir, basename) bestIndex = candidates[0].originalIndex print "The best candidate is: "+str(bestIndex) #Move every file to the right place bestSubDir=os.path.join(basesubdir, str(bestIndex)) # compiled program: bestBin=os.path.join(bestSubDir, basename) if finalBinary is not None: finalBin=finalBinary else: finalBin=os.path.join(path, basename) shutil.move(bestBin, finalBin) # .cfg file bestCfg=os.path.join(bestSubDir, basename+".cfg") finalCfg=finalBin + ".cfg" shutil.move(bestCfg, finalCfg) # .info file bestInfo=os.path.join(bestSubDir, basename+".info") finalInfo=finalBin+".info" shutil.move(bestInfo, finalInfo) # .obj directory bestObjDir=os.path.join(bestSubDir, basename+".obj") destObjDir=finalBin+".obj" if os.path.isdir(destObjDir): shutil.rmtree(destObjDir) shutil.move(bestObjDir, destObjDir) # input heuristic file if bestIndex != 0: #Program 0 is run with only the best heuristics in the DB bestHeurFile=os.path.join(bestSubDir, "heuristics.txt") finalHeurFile=finalBin+".heur" shutil.move(bestHeurFile, finalHeurFile) #Delete all the rest if conf_deleteTempDir: shutil.rmtree(basesubdir) return 0 #TEST if __name__ == "__main__": #basedir="/afs/csail.mit.edu/u/m/mtartara/programs/petabricks/" basedir="/home/mikyt/programmi/petabricks/" pbc=basedir+"src/pbc" l=LearningCompiler(pbc, sys.argv[1], conf_minTrialNumber) l.compileLearningHeuristics(sys.argv[2])
wontonst/will
refs/heads/master
will/backends/analysis/nothing.py
5
import requests from will import settings from will.decorators import require_settings from .base import AnalysisBackend class NoAnalysis(AnalysisBackend): def do_analyze(self, message): return {}
tareqalayan/pytest
refs/heads/master
testing/test_pluginmanager.py
1
# encoding: UTF-8 from __future__ import absolute_import, division, print_function import pytest import os import re import sys import types from _pytest.config import get_config, PytestPluginManager from _pytest.main import EXIT_NOTESTSCOLLECTED, Session @pytest.fixture def pytestpm(): return PytestPluginManager() class TestPytestPluginInteractions(object): def test_addhooks_conftestplugin(self, testdir): testdir.makepyfile(newhooks=""" def pytest_myhook(xyz): "new hook" """) conf = testdir.makeconftest(""" import sys ; sys.path.insert(0, '.') import newhooks def pytest_addhooks(pluginmanager): pluginmanager.addhooks(newhooks) def pytest_myhook(xyz): return xyz + 1 """) config = get_config() pm = config.pluginmanager pm.hook.pytest_addhooks.call_historic( kwargs=dict(pluginmanager=config.pluginmanager)) config.pluginmanager._importconftest(conf) # print(config.pluginmanager.get_plugins()) res = config.hook.pytest_myhook(xyz=10) assert res == [11] def test_addhooks_nohooks(self, testdir): testdir.makeconftest(""" import sys def pytest_addhooks(pluginmanager): pluginmanager.addhooks(sys) """) res = testdir.runpytest() assert res.ret != 0 res.stderr.fnmatch_lines([ "*did not find*sys*" ]) def test_namespace_early_from_import(self, testdir): p = testdir.makepyfile(""" from pytest import Item from pytest import Item as Item2 assert Item is Item2 """) result = testdir.runpython(p) assert result.ret == 0 def test_do_ext_namespace(self, testdir): testdir.makeconftest(""" def pytest_namespace(): return {'hello': 'world'} """) p = testdir.makepyfile(""" from pytest import hello import pytest def test_hello(): assert hello == "world" assert 'hello' in pytest.__all__ """) reprec = testdir.inline_run(p) reprec.assertoutcome(passed=1) def test_do_option_postinitialize(self, testdir): config = testdir.parseconfigure() assert not hasattr(config.option, 'test123') p = testdir.makepyfile(""" def pytest_addoption(parser): parser.addoption('--test123', action="store_true", default=True) """) config.pluginmanager._importconftest(p) assert config.option.test123 def test_configure(self, testdir): config = testdir.parseconfig() values = [] class A(object): def pytest_configure(self, config): values.append(self) config.pluginmanager.register(A()) assert len(values) == 0 config._do_configure() assert len(values) == 1 config.pluginmanager.register(A()) # leads to a configured() plugin assert len(values) == 2 assert values[0] != values[1] config._ensure_unconfigure() config.pluginmanager.register(A()) assert len(values) == 2 def test_hook_tracing(self): pytestpm = get_config().pluginmanager # fully initialized with plugins saveindent = [] class api1(object): def pytest_plugin_registered(self): saveindent.append(pytestpm.trace.root.indent) class api2(object): def pytest_plugin_registered(self): saveindent.append(pytestpm.trace.root.indent) raise ValueError() values = [] pytestpm.trace.root.setwriter(values.append) undo = pytestpm.enable_tracing() try: indent = pytestpm.trace.root.indent p = api1() pytestpm.register(p) assert pytestpm.trace.root.indent == indent assert len(values) >= 2 assert 'pytest_plugin_registered' in values[0] assert 'finish' in values[1] values[:] = [] with pytest.raises(ValueError): pytestpm.register(api2()) assert pytestpm.trace.root.indent == indent assert saveindent[0] > indent finally: undo() def test_hook_proxy(self, testdir): """Test the gethookproxy function(#2016)""" config = testdir.parseconfig() session = Session(config) testdir.makepyfile(**{ 'tests/conftest.py': '', 'tests/subdir/conftest.py': '', }) conftest1 = testdir.tmpdir.join('tests/conftest.py') conftest2 = testdir.tmpdir.join('tests/subdir/conftest.py') config.pluginmanager._importconftest(conftest1) ihook_a = session.gethookproxy(testdir.tmpdir.join('tests')) assert ihook_a is not None config.pluginmanager._importconftest(conftest2) ihook_b = session.gethookproxy(testdir.tmpdir.join('tests')) assert ihook_a is not ihook_b def test_warn_on_deprecated_addhooks(self, pytestpm): warnings = [] class get_warnings(object): def pytest_logwarning(self, code, fslocation, message, nodeid): warnings.append(message) class Plugin(object): def pytest_testhook(): pass pytestpm.register(get_warnings()) before = list(warnings) pytestpm.addhooks(Plugin()) assert len(warnings) == len(before) + 1 assert "deprecated" in warnings[-1] def test_namespace_has_default_and_env_plugins(testdir): p = testdir.makepyfile(""" import pytest pytest.mark """) result = testdir.runpython(p) assert result.ret == 0 def test_default_markers(testdir): result = testdir.runpytest("--markers") result.stdout.fnmatch_lines([ "*tryfirst*first*", "*trylast*last*", ]) def test_importplugin_error_message(testdir, pytestpm): """Don't hide import errors when importing plugins and provide an easy to debug message. See #375 and #1998. """ testdir.syspathinsert(testdir.tmpdir) testdir.makepyfile(qwe=""" # encoding: UTF-8 def test_traceback(): raise ImportError(u'Not possible to import: ☺') test_traceback() """) with pytest.raises(ImportError) as excinfo: pytestpm.import_plugin("qwe") expected_message = '.*Error importing plugin "qwe": Not possible to import: .' expected_traceback = ".*in test_traceback" assert re.match(expected_message, str(excinfo.value)) assert re.match(expected_traceback, str(excinfo.traceback[-1])) class TestPytestPluginManager(object): def test_register_imported_modules(self): pm = PytestPluginManager() mod = types.ModuleType("x.y.pytest_hello") pm.register(mod) assert pm.is_registered(mod) values = pm.get_plugins() assert mod in values pytest.raises(ValueError, "pm.register(mod)") pytest.raises(ValueError, lambda: pm.register(mod)) # assert not pm.is_registered(mod2) assert pm.get_plugins() == values def test_canonical_import(self, monkeypatch): mod = types.ModuleType("pytest_xyz") monkeypatch.setitem(sys.modules, 'pytest_xyz', mod) pm = PytestPluginManager() pm.import_plugin('pytest_xyz') assert pm.get_plugin('pytest_xyz') == mod assert pm.is_registered(mod) def test_consider_module(self, testdir, pytestpm): testdir.syspathinsert() testdir.makepyfile(pytest_p1="#") testdir.makepyfile(pytest_p2="#") mod = types.ModuleType("temp") mod.pytest_plugins = ["pytest_p1", "pytest_p2"] pytestpm.consider_module(mod) assert pytestpm.get_plugin("pytest_p1").__name__ == "pytest_p1" assert pytestpm.get_plugin("pytest_p2").__name__ == "pytest_p2" def test_consider_module_import_module(self, testdir): pytestpm = get_config().pluginmanager mod = types.ModuleType("x") mod.pytest_plugins = "pytest_a" aplugin = testdir.makepyfile(pytest_a="#") reprec = testdir.make_hook_recorder(pytestpm) # syspath.prepend(aplugin.dirpath()) sys.path.insert(0, str(aplugin.dirpath())) pytestpm.consider_module(mod) call = reprec.getcall(pytestpm.hook.pytest_plugin_registered.name) assert call.plugin.__name__ == "pytest_a" # check that it is not registered twice pytestpm.consider_module(mod) values = reprec.getcalls("pytest_plugin_registered") assert len(values) == 1 def test_consider_env_fails_to_import(self, monkeypatch, pytestpm): monkeypatch.setenv('PYTEST_PLUGINS', 'nonexisting', prepend=",") with pytest.raises(ImportError): pytestpm.consider_env() def test_plugin_skip(self, testdir, monkeypatch): p = testdir.makepyfile(skipping1=""" import pytest pytest.skip("hello") """) p.copy(p.dirpath("skipping2.py")) monkeypatch.setenv("PYTEST_PLUGINS", "skipping2") result = testdir.runpytest("-rw", "-p", "skipping1", syspathinsert=True) assert result.ret == EXIT_NOTESTSCOLLECTED result.stdout.fnmatch_lines([ "*skipped plugin*skipping1*hello*", "*skipped plugin*skipping2*hello*", ]) def test_consider_env_plugin_instantiation(self, testdir, monkeypatch, pytestpm): testdir.syspathinsert() testdir.makepyfile(xy123="#") monkeypatch.setitem(os.environ, 'PYTEST_PLUGINS', 'xy123') l1 = len(pytestpm.get_plugins()) pytestpm.consider_env() l2 = len(pytestpm.get_plugins()) assert l2 == l1 + 1 assert pytestpm.get_plugin('xy123') pytestpm.consider_env() l3 = len(pytestpm.get_plugins()) assert l2 == l3 def test_pluginmanager_ENV_startup(self, testdir, monkeypatch): testdir.makepyfile(pytest_x500="#") p = testdir.makepyfile(""" import pytest def test_hello(pytestconfig): plugin = pytestconfig.pluginmanager.get_plugin('pytest_x500') assert plugin is not None """) monkeypatch.setenv('PYTEST_PLUGINS', 'pytest_x500', prepend=",") result = testdir.runpytest(p, syspathinsert=True) assert result.ret == 0 result.stdout.fnmatch_lines(["*1 passed*"]) def test_import_plugin_importname(self, testdir, pytestpm): pytest.raises(ImportError, 'pytestpm.import_plugin("qweqwex.y")') pytest.raises(ImportError, 'pytestpm.import_plugin("pytest_qweqwx.y")') testdir.syspathinsert() pluginname = "pytest_hello" testdir.makepyfile(**{pluginname: ""}) pytestpm.import_plugin("pytest_hello") len1 = len(pytestpm.get_plugins()) pytestpm.import_plugin("pytest_hello") len2 = len(pytestpm.get_plugins()) assert len1 == len2 plugin1 = pytestpm.get_plugin("pytest_hello") assert plugin1.__name__.endswith('pytest_hello') plugin2 = pytestpm.get_plugin("pytest_hello") assert plugin2 is plugin1 def test_import_plugin_dotted_name(self, testdir, pytestpm): pytest.raises(ImportError, 'pytestpm.import_plugin("qweqwex.y")') pytest.raises(ImportError, 'pytestpm.import_plugin("pytest_qweqwex.y")') testdir.syspathinsert() testdir.mkpydir("pkg").join("plug.py").write("x=3") pluginname = "pkg.plug" pytestpm.import_plugin(pluginname) mod = pytestpm.get_plugin("pkg.plug") assert mod.x == 3 def test_consider_conftest_deps(self, testdir, pytestpm): mod = testdir.makepyfile("pytest_plugins='xyz'").pyimport() with pytest.raises(ImportError): pytestpm.consider_conftest(mod) class TestPytestPluginManagerBootstrapming(object): def test_preparse_args(self, pytestpm): pytest.raises(ImportError, lambda: pytestpm.consider_preparse(["xyz", "-p", "hello123"])) def test_plugin_prevent_register(self, pytestpm): pytestpm.consider_preparse(["xyz", "-p", "no:abc"]) l1 = pytestpm.get_plugins() pytestpm.register(42, name="abc") l2 = pytestpm.get_plugins() assert len(l2) == len(l1) assert 42 not in l2 def test_plugin_prevent_register_unregistered_alredy_registered(self, pytestpm): pytestpm.register(42, name="abc") l1 = pytestpm.get_plugins() assert 42 in l1 pytestpm.consider_preparse(["xyz", "-p", "no:abc"]) l2 = pytestpm.get_plugins() assert 42 not in l2
lariodiniz/OramaBankTest
refs/heads/master
OramaBank/pynetbanking/admin.py
1
# coding: utf-8 #--------------//////////---------------------- #Projeto Criado por: Lário Diniz #Contatos: developer.lario@gmail.com #data: 30/09/2015 #--------------//////////---------------------- from django.contrib import admin from django.utils.translation import ugettext as _ from datetime import datetime from django.utils import timezone from .models import Cliente_Model, Conta_Model, Operacao_model class Cliente_ModelnAdmin(admin.ModelAdmin): list_display = ('user', 'codigo', 'cpf', 'saldo_total','conta_corrente','poupanca') search_fields = ['user', 'codigo', 'cpf'] def saldo_total(self, obj): contas = Conta_Model.objects.filter(user=Cliente_Model.objects.get(user=obj.user)) saldo = float() for conta in contas: saldo += conta.saldo return saldo def conta_corrente(self, obj): contas = Conta_Model.objects.filter(user=Cliente_Model.objects.get(user=obj.user)) val=False for conta in contas: if conta.tipo=='0': val=True return val saldo_total.short_description=_('Saldo Total') conta_corrente.short_description=_('Possui Conta Corrente?') conta_corrente.boolean = True def poupanca(self, obj): contas = Conta_Model.objects.filter(user=Cliente_Model.objects.get(user=obj.user)) val=False for conta in contas: if conta.tipo=='1': val=True return val saldo_total.short_description=_('Saldo Total') conta_corrente.short_description=_('Possui Conta Corrente?') conta_corrente.boolean = True poupanca.short_description=_('Possui Poupanca?') poupanca.boolean = True class Conta_ModelAdmin(admin.ModelAdmin): list_display = ('numero','user','tipo', 'saldo', 'data') list_filter = ['data'] search_fields = ['numero'] class Operacao_ModelAdmin(admin.ModelAdmin): list_display = ('conta','tipo','valor','data') list_filter = ['data', 'tipo'] admin.site.register(Cliente_Model, Cliente_ModelnAdmin) admin.site.register(Conta_Model, Conta_ModelAdmin) admin.site.register(Operacao_model, Operacao_ModelAdmin)
dynaryu/inasafe
refs/heads/develop
safe/impact_functions/volcanic/volcano_polygon_population/test/__init__.py
229
__author__ = 'akbar'
mmmavis/lightbeam-bedrock-website
refs/heads/master
bedrock/facebookapps/tests/test_views.py
7
# -*- coding: utf-8 -*- # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import urllib from django.conf import settings from funfactory.urlresolvers import reverse from mock import patch from nose.tools import eq_, ok_ from pyquery import PyQuery as pq from bedrock.facebookapps import tests @patch.object(settings, 'FACEBOOK_PAGE_NAMESPACE', 'some-namespace') @patch.object(settings, 'FACEBOOK_APP_ID', '123456789') class TestTabRedirect(tests.TestCase): def setUp(self): self.tab_url = '//www.facebook.com/some-namespace/app_123456789' def create_response(self, js_redirect=False, method='get', data={}): kwargs = {'redirect_type': 'js'} if js_redirect else None with self.activate('en-US'): url = reverse('facebookapps.tab_redirect', kwargs=kwargs) return getattr(self.client, method)(url, data) def test_facebook_tab_url(self): eq_(settings.FACEBOOK_TAB_URL, self.tab_url) def test_normal_redirect(self): """ Redirect to Facebook tab URL. """ response = self.create_response() # Django's redirect adds the protocol url = 'http:{url}'.format(url=self.tab_url) self.assert_response_redirect(response, url) def test_iframe_header(self): """ Should allow rendering in iframe. """ response = self.create_response(method='post') self.assert_iframe_able(response) def test_js_redirect(self): """ Redirect using JavaScript and window.top.location if `redirect_type` is `js`. """ response = self.create_response(js_redirect=True, method='post') self.assert_js_redirect(response, self.tab_url) def test_convert_query_string(self): """ Convert query string to app_data query string. """ response = self.create_response(data=tests.DUMMY_DICT) url = 'http:{url}?{query_string}'.format(url=self.tab_url, query_string=tests.DUMMY_APP_DATA_QUERY) eq_(urllib.unquote(response['Location']), url) class TestDownloadTab(tests.TestCase): def create_response(self): with self.activate('en-US'): url = reverse('facebookapps.downloadtab') return self.client.post(url) def test_normal_downloadtab(self): """ Should have normal Download Tab response code and content. """ response = self.create_response() eq_(response.status_code, 200) doc = pq(response.content) download_selector = '.download-button' share_selector = '.js-share' invite_selector = '.js-invite' ok_(doc(download_selector), 'Download Button element with selector' ' `{sel}` not found.'.format(sel=download_selector)) ok_(doc(share_selector), 'Facebook share button with selector `{sel}` ' 'not found.'.format(sel=share_selector)) ok_(doc(invite_selector), 'Facebook friend invite button with selector' ' `{sel}` not found.'.format(sel=invite_selector)) def test_iframe_header(self): """ Should allow rendering in iframe. """ response = self.create_response() self.assert_iframe_able(response)
sv-dev1/odoo
refs/heads/8.0
addons/mass_mailing/wizard/__init__.py
432
# -*- coding: utf-8 -*- import test_mailing import mail_compose_message
bussiere/gitfs
refs/heads/master
gitfs/merges/accept_mine.py
3
# Copyright 2014 PressLabs SRL # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pygit2 from gitfs.log import log from .base import Merger class AcceptMine(Merger): def _create_remote_copy(self, branch_name, upstream, new_branch): reference = "%s/%s" % (upstream, branch_name) remote = self.repository.lookup_branch(reference, pygit2.GIT_BRANCH_REMOTE) remote_commit = remote.get_object() local = self.repository.create_branch(new_branch, remote_commit) ref = self.repository.lookup_reference("refs/heads/%s" % new_branch) self.repository.checkout(ref, strategy=pygit2.GIT_CHECKOUT_FORCE) return local def _create_local_copy(self, branch_name, new_branch): old_branch = self.repository.lookup_branch(branch_name, pygit2.GIT_BRANCH_LOCAL) return old_branch.rename(new_branch, True) def __call__(self, local_branch, remote_branch, upstream): log.debug("AcceptMine: Copy local branch to merging_local") local = self._create_local_copy(local_branch, "merging_local") log.debug("AcceptMine: Copy remote branch to merging_remote") remote = self._create_remote_copy(remote_branch, upstream, "merging_remote") log.debug("AcceptMine: Find diverge commis") diverge_commits = self.repository.find_diverge_commits(local, remote) reference = "refs/heads/%s" % "merging_remote" log.debug("AcceptMine: Checkout to %s", reference) self.repository.checkout(reference, strategy=pygit2.GIT_CHECKOUT_FORCE) # actual merging for commit in diverge_commits.first_commits: log.debug("AcceptMine: Merging %s", commit.hex) self.repository.merge(commit.hex) log.debug("AcceptMine: Solving conflicts") self.solve_conflicts(self.repository.index.conflicts) log.debug("AcceptMine: Commiting changes") ref = self.repository.lookup_reference(reference) message = "merging: %s" % commit.message parents = [ref.target, commit.id] new_commit = self.repository.commit(message, self.author, self.commiter, ref=reference, parents=parents) if new_commit is not None: log.debug("AcceptMine: We have a non-empty commit") self.repository.create_reference(reference, new_commit, force=True) log.debug("AcceptMine: Checkout to %s", reference) self.repository.checkout(reference, strategy=pygit2.GIT_CHECKOUT_FORCE) log.debug("AcceptMine: Clean the state") self.repository.state_cleanup() log.debug("AcceptMine: Checkout to %s", local_branch) ref = self.repository.lookup_reference(reference) self.repository.create_reference("refs/heads/%s" % local_branch, ref.target, force=True) self.repository.checkout("refs/heads/%s" % local_branch, strategy=pygit2.GIT_CHECKOUT_FORCE) log.debug("AcceptMine: Delete merging_local") ref = self.repository.lookup_reference("refs/heads/merging_local") ref.delete() log.debug("AcceptMine: Delete merging_remote") ref = self.repository.lookup_reference("refs/heads/merging_remote") ref.delete() def solve_conflicts(self, conflicts): if conflicts: for common, theirs, ours in conflicts: if not ours and theirs: log.debug("AcceptMine: if we deleted the file and they " "didn't, remove the file") self.repository.index.remove(theirs.path, 2) self.repository.index.remove(theirs.path, 1) elif ours and not theirs: log.debug("AcceptMine: if they deleted the file and we " "didn't, add the file") self.repository.index.add(ours.path) else: log.debug("AcceptMine: overwrite all file with our " "content") with open(self.repository._full_path(ours.path), "w") as f: f.write(self.repository.get(ours.id).data) self.repository.index.add(ours.path) else: log.info("AcceptMine: No conflicts to solve")
igordejanovic/textX
refs/heads/master
tests/functional/regressions/test_issue166.py
1
from __future__ import unicode_literals import textx def test_issue166_wrong_multiple_rule_reference(): """ Test wrongly detected referencing of multiple rules from a single abstract rule alternative. Reported in issue #166. """ grammar = """ DSL: commands*=BaseCommand; BaseCommand: (Require | Group) '.'?; Require: 'require' /[a-z]/; Group: 'group' hello=/[0-9]/; """ metamodel = textx.metamodel_from_str(grammar) assert metamodel model = metamodel.model_from_str('require a. group 4') assert model.commands[0] == 'requirea' assert model.commands[1].hello == '4'
michalliu/OpenWrt-Firefly-Libraries
refs/heads/master
staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/test/test_asyncio/test_tasks.py
10
"""Tests for tasks.py.""" import os import re import sys import types import unittest import weakref from unittest import mock import asyncio from asyncio import coroutines from asyncio import test_utils try: from test import support from test.script_helper import assert_python_ok except ImportError: from asyncio import test_support as support from asyncio.test_support import assert_python_ok PY34 = (sys.version_info >= (3, 4)) PY35 = (sys.version_info >= (3, 5)) @asyncio.coroutine def coroutine_function(): pass def format_coroutine(qualname, state, src, source_traceback, generator=False): if generator: state = '%s' % state else: state = '%s, defined' % state if source_traceback is not None: frame = source_traceback[-1] return ('coro=<%s() %s at %s> created at %s:%s' % (qualname, state, src, frame[0], frame[1])) else: return 'coro=<%s() %s at %s>' % (qualname, state, src) class Dummy: def __repr__(self): return '<Dummy>' def __call__(self, *args): pass class TaskTests(test_utils.TestCase): def setUp(self): self.loop = self.new_test_loop() def test_task_class(self): @asyncio.coroutine def notmuch(): return 'ok' t = asyncio.Task(notmuch(), loop=self.loop) self.loop.run_until_complete(t) self.assertTrue(t.done()) self.assertEqual(t.result(), 'ok') self.assertIs(t._loop, self.loop) loop = asyncio.new_event_loop() self.set_event_loop(loop) t = asyncio.Task(notmuch(), loop=loop) self.assertIs(t._loop, loop) loop.run_until_complete(t) loop.close() def test_async_coroutine(self): @asyncio.coroutine def notmuch(): return 'ok' t = asyncio.async(notmuch(), loop=self.loop) self.loop.run_until_complete(t) self.assertTrue(t.done()) self.assertEqual(t.result(), 'ok') self.assertIs(t._loop, self.loop) loop = asyncio.new_event_loop() self.set_event_loop(loop) t = asyncio.async(notmuch(), loop=loop) self.assertIs(t._loop, loop) loop.run_until_complete(t) loop.close() def test_async_future(self): f_orig = asyncio.Future(loop=self.loop) f_orig.set_result('ko') f = asyncio.async(f_orig) self.loop.run_until_complete(f) self.assertTrue(f.done()) self.assertEqual(f.result(), 'ko') self.assertIs(f, f_orig) loop = asyncio.new_event_loop() self.set_event_loop(loop) with self.assertRaises(ValueError): f = asyncio.async(f_orig, loop=loop) loop.close() f = asyncio.async(f_orig, loop=self.loop) self.assertIs(f, f_orig) def test_async_task(self): @asyncio.coroutine def notmuch(): return 'ok' t_orig = asyncio.Task(notmuch(), loop=self.loop) t = asyncio.async(t_orig) self.loop.run_until_complete(t) self.assertTrue(t.done()) self.assertEqual(t.result(), 'ok') self.assertIs(t, t_orig) loop = asyncio.new_event_loop() self.set_event_loop(loop) with self.assertRaises(ValueError): t = asyncio.async(t_orig, loop=loop) loop.close() t = asyncio.async(t_orig, loop=self.loop) self.assertIs(t, t_orig) def test_async_neither(self): with self.assertRaises(TypeError): asyncio.async('ok') def test_task_repr(self): self.loop.set_debug(False) @asyncio.coroutine def notmuch(): yield from [] return 'abc' # test coroutine function self.assertEqual(notmuch.__name__, 'notmuch') if PY35: self.assertEqual(notmuch.__qualname__, 'TaskTests.test_task_repr.<locals>.notmuch') self.assertEqual(notmuch.__module__, __name__) filename, lineno = test_utils.get_function_source(notmuch) src = "%s:%s" % (filename, lineno) # test coroutine object gen = notmuch() if coroutines._DEBUG or PY35: coro_qualname = 'TaskTests.test_task_repr.<locals>.notmuch' else: coro_qualname = 'notmuch' self.assertEqual(gen.__name__, 'notmuch') if PY35: self.assertEqual(gen.__qualname__, coro_qualname) # test pending Task t = asyncio.Task(gen, loop=self.loop) t.add_done_callback(Dummy()) coro = format_coroutine(coro_qualname, 'running', src, t._source_traceback, generator=True) self.assertEqual(repr(t), '<Task pending %s cb=[<Dummy>()]>' % coro) # test cancelling Task t.cancel() # Does not take immediate effect! self.assertEqual(repr(t), '<Task cancelling %s cb=[<Dummy>()]>' % coro) # test cancelled Task self.assertRaises(asyncio.CancelledError, self.loop.run_until_complete, t) coro = format_coroutine(coro_qualname, 'done', src, t._source_traceback) self.assertEqual(repr(t), '<Task cancelled %s>' % coro) # test finished Task t = asyncio.Task(notmuch(), loop=self.loop) self.loop.run_until_complete(t) coro = format_coroutine(coro_qualname, 'done', src, t._source_traceback) self.assertEqual(repr(t), "<Task finished %s result='abc'>" % coro) def test_task_repr_coro_decorator(self): self.loop.set_debug(False) @asyncio.coroutine def notmuch(): # notmuch() function doesn't use yield from: it will be wrapped by # @coroutine decorator return 123 # test coroutine function self.assertEqual(notmuch.__name__, 'notmuch') if PY35: self.assertEqual(notmuch.__qualname__, 'TaskTests.test_task_repr_coro_decorator' '.<locals>.notmuch') self.assertEqual(notmuch.__module__, __name__) # test coroutine object gen = notmuch() if coroutines._DEBUG or PY35: # On Python >= 3.5, generators now inherit the name of the # function, as expected, and have a qualified name (__qualname__ # attribute). coro_name = 'notmuch' coro_qualname = ('TaskTests.test_task_repr_coro_decorator' '.<locals>.notmuch') else: # On Python < 3.5, generators inherit the name of the code, not of # the function. See: http://bugs.python.org/issue21205 coro_name = coro_qualname = 'coro' self.assertEqual(gen.__name__, coro_name) if PY35: self.assertEqual(gen.__qualname__, coro_qualname) # test repr(CoroWrapper) if coroutines._DEBUG: # format the coroutine object if coroutines._DEBUG: filename, lineno = test_utils.get_function_source(notmuch) frame = gen._source_traceback[-1] coro = ('%s() running, defined at %s:%s, created at %s:%s' % (coro_qualname, filename, lineno, frame[0], frame[1])) else: code = gen.gi_code coro = ('%s() running at %s:%s' % (coro_qualname, code.co_filename, code.co_firstlineno)) self.assertEqual(repr(gen), '<CoroWrapper %s>' % coro) # test pending Task t = asyncio.Task(gen, loop=self.loop) t.add_done_callback(Dummy()) # format the coroutine object if coroutines._DEBUG: src = '%s:%s' % test_utils.get_function_source(notmuch) else: code = gen.gi_code src = '%s:%s' % (code.co_filename, code.co_firstlineno) coro = format_coroutine(coro_qualname, 'running', src, t._source_traceback, generator=not coroutines._DEBUG) self.assertEqual(repr(t), '<Task pending %s cb=[<Dummy>()]>' % coro) self.loop.run_until_complete(t) def test_task_repr_wait_for(self): self.loop.set_debug(False) @asyncio.coroutine def wait_for(fut): return (yield from fut) fut = asyncio.Future(loop=self.loop) task = asyncio.Task(wait_for(fut), loop=self.loop) test_utils.run_briefly(self.loop) self.assertRegex(repr(task), '<Task .* wait_for=%s>' % re.escape(repr(fut))) fut.set_result(None) self.loop.run_until_complete(task) def test_task_basics(self): @asyncio.coroutine def outer(): a = yield from inner1() b = yield from inner2() return a+b @asyncio.coroutine def inner1(): return 42 @asyncio.coroutine def inner2(): return 1000 t = outer() self.assertEqual(self.loop.run_until_complete(t), 1042) def test_cancel(self): def gen(): when = yield self.assertAlmostEqual(10.0, when) yield 0 loop = self.new_test_loop(gen) @asyncio.coroutine def task(): yield from asyncio.sleep(10.0, loop=loop) return 12 t = asyncio.Task(task(), loop=loop) loop.call_soon(t.cancel) with self.assertRaises(asyncio.CancelledError): loop.run_until_complete(t) self.assertTrue(t.done()) self.assertTrue(t.cancelled()) self.assertFalse(t.cancel()) def test_cancel_yield(self): @asyncio.coroutine def task(): yield yield return 12 t = asyncio.Task(task(), loop=self.loop) test_utils.run_briefly(self.loop) # start coro t.cancel() self.assertRaises( asyncio.CancelledError, self.loop.run_until_complete, t) self.assertTrue(t.done()) self.assertTrue(t.cancelled()) self.assertFalse(t.cancel()) def test_cancel_inner_future(self): f = asyncio.Future(loop=self.loop) @asyncio.coroutine def task(): yield from f return 12 t = asyncio.Task(task(), loop=self.loop) test_utils.run_briefly(self.loop) # start task f.cancel() with self.assertRaises(asyncio.CancelledError): self.loop.run_until_complete(t) self.assertTrue(f.cancelled()) self.assertTrue(t.cancelled()) def test_cancel_both_task_and_inner_future(self): f = asyncio.Future(loop=self.loop) @asyncio.coroutine def task(): yield from f return 12 t = asyncio.Task(task(), loop=self.loop) test_utils.run_briefly(self.loop) f.cancel() t.cancel() with self.assertRaises(asyncio.CancelledError): self.loop.run_until_complete(t) self.assertTrue(t.done()) self.assertTrue(f.cancelled()) self.assertTrue(t.cancelled()) def test_cancel_task_catching(self): fut1 = asyncio.Future(loop=self.loop) fut2 = asyncio.Future(loop=self.loop) @asyncio.coroutine def task(): yield from fut1 try: yield from fut2 except asyncio.CancelledError: return 42 t = asyncio.Task(task(), loop=self.loop) test_utils.run_briefly(self.loop) self.assertIs(t._fut_waiter, fut1) # White-box test. fut1.set_result(None) test_utils.run_briefly(self.loop) self.assertIs(t._fut_waiter, fut2) # White-box test. t.cancel() self.assertTrue(fut2.cancelled()) res = self.loop.run_until_complete(t) self.assertEqual(res, 42) self.assertFalse(t.cancelled()) def test_cancel_task_ignoring(self): fut1 = asyncio.Future(loop=self.loop) fut2 = asyncio.Future(loop=self.loop) fut3 = asyncio.Future(loop=self.loop) @asyncio.coroutine def task(): yield from fut1 try: yield from fut2 except asyncio.CancelledError: pass res = yield from fut3 return res t = asyncio.Task(task(), loop=self.loop) test_utils.run_briefly(self.loop) self.assertIs(t._fut_waiter, fut1) # White-box test. fut1.set_result(None) test_utils.run_briefly(self.loop) self.assertIs(t._fut_waiter, fut2) # White-box test. t.cancel() self.assertTrue(fut2.cancelled()) test_utils.run_briefly(self.loop) self.assertIs(t._fut_waiter, fut3) # White-box test. fut3.set_result(42) res = self.loop.run_until_complete(t) self.assertEqual(res, 42) self.assertFalse(fut3.cancelled()) self.assertFalse(t.cancelled()) def test_cancel_current_task(self): loop = asyncio.new_event_loop() self.set_event_loop(loop) @asyncio.coroutine def task(): t.cancel() self.assertTrue(t._must_cancel) # White-box test. # The sleep should be cancelled immediately. yield from asyncio.sleep(100, loop=loop) return 12 t = asyncio.Task(task(), loop=loop) self.assertRaises( asyncio.CancelledError, loop.run_until_complete, t) self.assertTrue(t.done()) self.assertFalse(t._must_cancel) # White-box test. self.assertFalse(t.cancel()) def test_stop_while_run_in_complete(self): def gen(): when = yield self.assertAlmostEqual(0.1, when) when = yield 0.1 self.assertAlmostEqual(0.2, when) when = yield 0.1 self.assertAlmostEqual(0.3, when) yield 0.1 loop = self.new_test_loop(gen) x = 0 waiters = [] @asyncio.coroutine def task(): nonlocal x while x < 10: waiters.append(asyncio.sleep(0.1, loop=loop)) yield from waiters[-1] x += 1 if x == 2: loop.stop() t = asyncio.Task(task(), loop=loop) with self.assertRaises(RuntimeError) as cm: loop.run_until_complete(t) self.assertEqual(str(cm.exception), 'Event loop stopped before Future completed.') self.assertFalse(t.done()) self.assertEqual(x, 2) self.assertAlmostEqual(0.3, loop.time()) # close generators for w in waiters: w.close() t.cancel() self.assertRaises(asyncio.CancelledError, loop.run_until_complete, t) def test_wait_for(self): def gen(): when = yield self.assertAlmostEqual(0.2, when) when = yield 0 self.assertAlmostEqual(0.1, when) when = yield 0.1 loop = self.new_test_loop(gen) foo_running = None @asyncio.coroutine def foo(): nonlocal foo_running foo_running = True try: yield from asyncio.sleep(0.2, loop=loop) finally: foo_running = False return 'done' fut = asyncio.Task(foo(), loop=loop) with self.assertRaises(asyncio.TimeoutError): loop.run_until_complete(asyncio.wait_for(fut, 0.1, loop=loop)) self.assertTrue(fut.done()) # it should have been cancelled due to the timeout self.assertTrue(fut.cancelled()) self.assertAlmostEqual(0.1, loop.time()) self.assertEqual(foo_running, False) def test_wait_for_blocking(self): loop = self.new_test_loop() @asyncio.coroutine def coro(): return 'done' res = loop.run_until_complete(asyncio.wait_for(coro(), timeout=None, loop=loop)) self.assertEqual(res, 'done') def test_wait_for_with_global_loop(self): def gen(): when = yield self.assertAlmostEqual(0.2, when) when = yield 0 self.assertAlmostEqual(0.01, when) yield 0.01 loop = self.new_test_loop(gen) @asyncio.coroutine def foo(): yield from asyncio.sleep(0.2, loop=loop) return 'done' asyncio.set_event_loop(loop) try: fut = asyncio.Task(foo(), loop=loop) with self.assertRaises(asyncio.TimeoutError): loop.run_until_complete(asyncio.wait_for(fut, 0.01)) finally: asyncio.set_event_loop(None) self.assertAlmostEqual(0.01, loop.time()) self.assertTrue(fut.done()) self.assertTrue(fut.cancelled()) def test_wait_for_race_condition(self): def gen(): yield 0.1 yield 0.1 yield 0.1 loop = self.new_test_loop(gen) fut = asyncio.Future(loop=loop) task = asyncio.wait_for(fut, timeout=0.2, loop=loop) loop.call_later(0.1, fut.set_result, "ok") res = loop.run_until_complete(task) self.assertEqual(res, "ok") def test_wait(self): def gen(): when = yield self.assertAlmostEqual(0.1, when) when = yield 0 self.assertAlmostEqual(0.15, when) yield 0.15 loop = self.new_test_loop(gen) a = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop) b = asyncio.Task(asyncio.sleep(0.15, loop=loop), loop=loop) @asyncio.coroutine def foo(): done, pending = yield from asyncio.wait([b, a], loop=loop) self.assertEqual(done, set([a, b])) self.assertEqual(pending, set()) return 42 res = loop.run_until_complete(asyncio.Task(foo(), loop=loop)) self.assertEqual(res, 42) self.assertAlmostEqual(0.15, loop.time()) # Doing it again should take no time and exercise a different path. res = loop.run_until_complete(asyncio.Task(foo(), loop=loop)) self.assertAlmostEqual(0.15, loop.time()) self.assertEqual(res, 42) def test_wait_with_global_loop(self): def gen(): when = yield self.assertAlmostEqual(0.01, when) when = yield 0 self.assertAlmostEqual(0.015, when) yield 0.015 loop = self.new_test_loop(gen) a = asyncio.Task(asyncio.sleep(0.01, loop=loop), loop=loop) b = asyncio.Task(asyncio.sleep(0.015, loop=loop), loop=loop) @asyncio.coroutine def foo(): done, pending = yield from asyncio.wait([b, a]) self.assertEqual(done, set([a, b])) self.assertEqual(pending, set()) return 42 asyncio.set_event_loop(loop) res = loop.run_until_complete( asyncio.Task(foo(), loop=loop)) self.assertEqual(res, 42) def test_wait_duplicate_coroutines(self): @asyncio.coroutine def coro(s): return s c = coro('test') task = asyncio.Task( asyncio.wait([c, c, coro('spam')], loop=self.loop), loop=self.loop) done, pending = self.loop.run_until_complete(task) self.assertFalse(pending) self.assertEqual(set(f.result() for f in done), {'test', 'spam'}) def test_wait_errors(self): self.assertRaises( ValueError, self.loop.run_until_complete, asyncio.wait(set(), loop=self.loop)) # -1 is an invalid return_when value sleep_coro = asyncio.sleep(10.0, loop=self.loop) wait_coro = asyncio.wait([sleep_coro], return_when=-1, loop=self.loop) self.assertRaises(ValueError, self.loop.run_until_complete, wait_coro) sleep_coro.close() def test_wait_first_completed(self): def gen(): when = yield self.assertAlmostEqual(10.0, when) when = yield 0 self.assertAlmostEqual(0.1, when) yield 0.1 loop = self.new_test_loop(gen) a = asyncio.Task(asyncio.sleep(10.0, loop=loop), loop=loop) b = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop) task = asyncio.Task( asyncio.wait([b, a], return_when=asyncio.FIRST_COMPLETED, loop=loop), loop=loop) done, pending = loop.run_until_complete(task) self.assertEqual({b}, done) self.assertEqual({a}, pending) self.assertFalse(a.done()) self.assertTrue(b.done()) self.assertIsNone(b.result()) self.assertAlmostEqual(0.1, loop.time()) # move forward to close generator loop.advance_time(10) loop.run_until_complete(asyncio.wait([a, b], loop=loop)) def test_wait_really_done(self): # there is possibility that some tasks in the pending list # became done but their callbacks haven't all been called yet @asyncio.coroutine def coro1(): yield @asyncio.coroutine def coro2(): yield yield a = asyncio.Task(coro1(), loop=self.loop) b = asyncio.Task(coro2(), loop=self.loop) task = asyncio.Task( asyncio.wait([b, a], return_when=asyncio.FIRST_COMPLETED, loop=self.loop), loop=self.loop) done, pending = self.loop.run_until_complete(task) self.assertEqual({a, b}, done) self.assertTrue(a.done()) self.assertIsNone(a.result()) self.assertTrue(b.done()) self.assertIsNone(b.result()) def test_wait_first_exception(self): def gen(): when = yield self.assertAlmostEqual(10.0, when) yield 0 loop = self.new_test_loop(gen) # first_exception, task already has exception a = asyncio.Task(asyncio.sleep(10.0, loop=loop), loop=loop) @asyncio.coroutine def exc(): raise ZeroDivisionError('err') b = asyncio.Task(exc(), loop=loop) task = asyncio.Task( asyncio.wait([b, a], return_when=asyncio.FIRST_EXCEPTION, loop=loop), loop=loop) done, pending = loop.run_until_complete(task) self.assertEqual({b}, done) self.assertEqual({a}, pending) self.assertAlmostEqual(0, loop.time()) # move forward to close generator loop.advance_time(10) loop.run_until_complete(asyncio.wait([a, b], loop=loop)) def test_wait_first_exception_in_wait(self): def gen(): when = yield self.assertAlmostEqual(10.0, when) when = yield 0 self.assertAlmostEqual(0.01, when) yield 0.01 loop = self.new_test_loop(gen) # first_exception, exception during waiting a = asyncio.Task(asyncio.sleep(10.0, loop=loop), loop=loop) @asyncio.coroutine def exc(): yield from asyncio.sleep(0.01, loop=loop) raise ZeroDivisionError('err') b = asyncio.Task(exc(), loop=loop) task = asyncio.wait([b, a], return_when=asyncio.FIRST_EXCEPTION, loop=loop) done, pending = loop.run_until_complete(task) self.assertEqual({b}, done) self.assertEqual({a}, pending) self.assertAlmostEqual(0.01, loop.time()) # move forward to close generator loop.advance_time(10) loop.run_until_complete(asyncio.wait([a, b], loop=loop)) def test_wait_with_exception(self): def gen(): when = yield self.assertAlmostEqual(0.1, when) when = yield 0 self.assertAlmostEqual(0.15, when) yield 0.15 loop = self.new_test_loop(gen) a = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop) @asyncio.coroutine def sleeper(): yield from asyncio.sleep(0.15, loop=loop) raise ZeroDivisionError('really') b = asyncio.Task(sleeper(), loop=loop) @asyncio.coroutine def foo(): done, pending = yield from asyncio.wait([b, a], loop=loop) self.assertEqual(len(done), 2) self.assertEqual(pending, set()) errors = set(f for f in done if f.exception() is not None) self.assertEqual(len(errors), 1) loop.run_until_complete(asyncio.Task(foo(), loop=loop)) self.assertAlmostEqual(0.15, loop.time()) loop.run_until_complete(asyncio.Task(foo(), loop=loop)) self.assertAlmostEqual(0.15, loop.time()) def test_wait_with_timeout(self): def gen(): when = yield self.assertAlmostEqual(0.1, when) when = yield 0 self.assertAlmostEqual(0.15, when) when = yield 0 self.assertAlmostEqual(0.11, when) yield 0.11 loop = self.new_test_loop(gen) a = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop) b = asyncio.Task(asyncio.sleep(0.15, loop=loop), loop=loop) @asyncio.coroutine def foo(): done, pending = yield from asyncio.wait([b, a], timeout=0.11, loop=loop) self.assertEqual(done, set([a])) self.assertEqual(pending, set([b])) loop.run_until_complete(asyncio.Task(foo(), loop=loop)) self.assertAlmostEqual(0.11, loop.time()) # move forward to close generator loop.advance_time(10) loop.run_until_complete(asyncio.wait([a, b], loop=loop)) def test_wait_concurrent_complete(self): def gen(): when = yield self.assertAlmostEqual(0.1, when) when = yield 0 self.assertAlmostEqual(0.15, when) when = yield 0 self.assertAlmostEqual(0.1, when) yield 0.1 loop = self.new_test_loop(gen) a = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop) b = asyncio.Task(asyncio.sleep(0.15, loop=loop), loop=loop) done, pending = loop.run_until_complete( asyncio.wait([b, a], timeout=0.1, loop=loop)) self.assertEqual(done, set([a])) self.assertEqual(pending, set([b])) self.assertAlmostEqual(0.1, loop.time()) # move forward to close generator loop.advance_time(10) loop.run_until_complete(asyncio.wait([a, b], loop=loop)) def test_as_completed(self): def gen(): yield 0 yield 0 yield 0.01 yield 0 loop = self.new_test_loop(gen) # disable "slow callback" warning loop.slow_callback_duration = 1.0 completed = set() time_shifted = False @asyncio.coroutine def sleeper(dt, x): nonlocal time_shifted yield from asyncio.sleep(dt, loop=loop) completed.add(x) if not time_shifted and 'a' in completed and 'b' in completed: time_shifted = True loop.advance_time(0.14) return x a = sleeper(0.01, 'a') b = sleeper(0.01, 'b') c = sleeper(0.15, 'c') @asyncio.coroutine def foo(): values = [] for f in asyncio.as_completed([b, c, a], loop=loop): values.append((yield from f)) return values res = loop.run_until_complete(asyncio.Task(foo(), loop=loop)) self.assertAlmostEqual(0.15, loop.time()) self.assertTrue('a' in res[:2]) self.assertTrue('b' in res[:2]) self.assertEqual(res[2], 'c') # Doing it again should take no time and exercise a different path. res = loop.run_until_complete(asyncio.Task(foo(), loop=loop)) self.assertAlmostEqual(0.15, loop.time()) def test_as_completed_with_timeout(self): def gen(): yield yield 0 yield 0 yield 0.1 loop = self.new_test_loop(gen) a = asyncio.sleep(0.1, 'a', loop=loop) b = asyncio.sleep(0.15, 'b', loop=loop) @asyncio.coroutine def foo(): values = [] for f in asyncio.as_completed([a, b], timeout=0.12, loop=loop): if values: loop.advance_time(0.02) try: v = yield from f values.append((1, v)) except asyncio.TimeoutError as exc: values.append((2, exc)) return values res = loop.run_until_complete(asyncio.Task(foo(), loop=loop)) self.assertEqual(len(res), 2, res) self.assertEqual(res[0], (1, 'a')) self.assertEqual(res[1][0], 2) self.assertIsInstance(res[1][1], asyncio.TimeoutError) self.assertAlmostEqual(0.12, loop.time()) # move forward to close generator loop.advance_time(10) loop.run_until_complete(asyncio.wait([a, b], loop=loop)) def test_as_completed_with_unused_timeout(self): def gen(): yield yield 0 yield 0.01 loop = self.new_test_loop(gen) a = asyncio.sleep(0.01, 'a', loop=loop) @asyncio.coroutine def foo(): for f in asyncio.as_completed([a], timeout=1, loop=loop): v = yield from f self.assertEqual(v, 'a') loop.run_until_complete(asyncio.Task(foo(), loop=loop)) def test_as_completed_reverse_wait(self): def gen(): yield 0 yield 0.05 yield 0 loop = self.new_test_loop(gen) a = asyncio.sleep(0.05, 'a', loop=loop) b = asyncio.sleep(0.10, 'b', loop=loop) fs = {a, b} futs = list(asyncio.as_completed(fs, loop=loop)) self.assertEqual(len(futs), 2) x = loop.run_until_complete(futs[1]) self.assertEqual(x, 'a') self.assertAlmostEqual(0.05, loop.time()) loop.advance_time(0.05) y = loop.run_until_complete(futs[0]) self.assertEqual(y, 'b') self.assertAlmostEqual(0.10, loop.time()) def test_as_completed_concurrent(self): def gen(): when = yield self.assertAlmostEqual(0.05, when) when = yield 0 self.assertAlmostEqual(0.05, when) yield 0.05 loop = self.new_test_loop(gen) a = asyncio.sleep(0.05, 'a', loop=loop) b = asyncio.sleep(0.05, 'b', loop=loop) fs = {a, b} futs = list(asyncio.as_completed(fs, loop=loop)) self.assertEqual(len(futs), 2) waiter = asyncio.wait(futs, loop=loop) done, pending = loop.run_until_complete(waiter) self.assertEqual(set(f.result() for f in done), {'a', 'b'}) def test_as_completed_duplicate_coroutines(self): @asyncio.coroutine def coro(s): return s @asyncio.coroutine def runner(): result = [] c = coro('ham') for f in asyncio.as_completed([c, c, coro('spam')], loop=self.loop): result.append((yield from f)) return result fut = asyncio.Task(runner(), loop=self.loop) self.loop.run_until_complete(fut) result = fut.result() self.assertEqual(set(result), {'ham', 'spam'}) self.assertEqual(len(result), 2) def test_sleep(self): def gen(): when = yield self.assertAlmostEqual(0.05, when) when = yield 0.05 self.assertAlmostEqual(0.1, when) yield 0.05 loop = self.new_test_loop(gen) @asyncio.coroutine def sleeper(dt, arg): yield from asyncio.sleep(dt/2, loop=loop) res = yield from asyncio.sleep(dt/2, arg, loop=loop) return res t = asyncio.Task(sleeper(0.1, 'yeah'), loop=loop) loop.run_until_complete(t) self.assertTrue(t.done()) self.assertEqual(t.result(), 'yeah') self.assertAlmostEqual(0.1, loop.time()) def test_sleep_cancel(self): def gen(): when = yield self.assertAlmostEqual(10.0, when) yield 0 loop = self.new_test_loop(gen) t = asyncio.Task(asyncio.sleep(10.0, 'yeah', loop=loop), loop=loop) handle = None orig_call_later = loop.call_later def call_later(delay, callback, *args): nonlocal handle handle = orig_call_later(delay, callback, *args) return handle loop.call_later = call_later test_utils.run_briefly(loop) self.assertFalse(handle._cancelled) t.cancel() test_utils.run_briefly(loop) self.assertTrue(handle._cancelled) def test_task_cancel_sleeping_task(self): def gen(): when = yield self.assertAlmostEqual(0.1, when) when = yield 0 self.assertAlmostEqual(5000, when) yield 0.1 loop = self.new_test_loop(gen) @asyncio.coroutine def sleep(dt): yield from asyncio.sleep(dt, loop=loop) @asyncio.coroutine def doit(): sleeper = asyncio.Task(sleep(5000), loop=loop) loop.call_later(0.1, sleeper.cancel) try: yield from sleeper except asyncio.CancelledError: return 'cancelled' else: return 'slept in' doer = doit() self.assertEqual(loop.run_until_complete(doer), 'cancelled') self.assertAlmostEqual(0.1, loop.time()) def test_task_cancel_waiter_future(self): fut = asyncio.Future(loop=self.loop) @asyncio.coroutine def coro(): yield from fut task = asyncio.Task(coro(), loop=self.loop) test_utils.run_briefly(self.loop) self.assertIs(task._fut_waiter, fut) task.cancel() test_utils.run_briefly(self.loop) self.assertRaises( asyncio.CancelledError, self.loop.run_until_complete, task) self.assertIsNone(task._fut_waiter) self.assertTrue(fut.cancelled()) def test_step_in_completed_task(self): @asyncio.coroutine def notmuch(): return 'ko' gen = notmuch() task = asyncio.Task(gen, loop=self.loop) task.set_result('ok') self.assertRaises(AssertionError, task._step) gen.close() def test_step_result(self): @asyncio.coroutine def notmuch(): yield None yield 1 return 'ko' self.assertRaises( RuntimeError, self.loop.run_until_complete, notmuch()) def test_step_result_future(self): # If coroutine returns future, task waits on this future. class Fut(asyncio.Future): def __init__(self, *args, **kwds): self.cb_added = False super().__init__(*args, **kwds) def add_done_callback(self, fn): self.cb_added = True super().add_done_callback(fn) fut = Fut(loop=self.loop) result = None @asyncio.coroutine def wait_for_future(): nonlocal result result = yield from fut t = asyncio.Task(wait_for_future(), loop=self.loop) test_utils.run_briefly(self.loop) self.assertTrue(fut.cb_added) res = object() fut.set_result(res) test_utils.run_briefly(self.loop) self.assertIs(res, result) self.assertTrue(t.done()) self.assertIsNone(t.result()) def test_step_with_baseexception(self): @asyncio.coroutine def notmutch(): raise BaseException() task = asyncio.Task(notmutch(), loop=self.loop) self.assertRaises(BaseException, task._step) self.assertTrue(task.done()) self.assertIsInstance(task.exception(), BaseException) def test_baseexception_during_cancel(self): def gen(): when = yield self.assertAlmostEqual(10.0, when) yield 0 loop = self.new_test_loop(gen) @asyncio.coroutine def sleeper(): yield from asyncio.sleep(10, loop=loop) base_exc = BaseException() @asyncio.coroutine def notmutch(): try: yield from sleeper() except asyncio.CancelledError: raise base_exc task = asyncio.Task(notmutch(), loop=loop) test_utils.run_briefly(loop) task.cancel() self.assertFalse(task.done()) self.assertRaises(BaseException, test_utils.run_briefly, loop) self.assertTrue(task.done()) self.assertFalse(task.cancelled()) self.assertIs(task.exception(), base_exc) def test_iscoroutinefunction(self): def fn(): pass self.assertFalse(asyncio.iscoroutinefunction(fn)) def fn1(): yield self.assertFalse(asyncio.iscoroutinefunction(fn1)) @asyncio.coroutine def fn2(): yield self.assertTrue(asyncio.iscoroutinefunction(fn2)) def test_yield_vs_yield_from(self): fut = asyncio.Future(loop=self.loop) @asyncio.coroutine def wait_for_future(): yield fut task = wait_for_future() with self.assertRaises(RuntimeError): self.loop.run_until_complete(task) self.assertFalse(fut.done()) def test_yield_vs_yield_from_generator(self): @asyncio.coroutine def coro(): yield @asyncio.coroutine def wait_for_future(): gen = coro() try: yield gen finally: gen.close() task = wait_for_future() self.assertRaises( RuntimeError, self.loop.run_until_complete, task) def test_coroutine_non_gen_function(self): @asyncio.coroutine def func(): return 'test' self.assertTrue(asyncio.iscoroutinefunction(func)) coro = func() self.assertTrue(asyncio.iscoroutine(coro)) res = self.loop.run_until_complete(coro) self.assertEqual(res, 'test') def test_coroutine_non_gen_function_return_future(self): fut = asyncio.Future(loop=self.loop) @asyncio.coroutine def func(): return fut @asyncio.coroutine def coro(): fut.set_result('test') t1 = asyncio.Task(func(), loop=self.loop) t2 = asyncio.Task(coro(), loop=self.loop) res = self.loop.run_until_complete(t1) self.assertEqual(res, 'test') self.assertIsNone(t2.result()) def test_current_task(self): self.assertIsNone(asyncio.Task.current_task(loop=self.loop)) @asyncio.coroutine def coro(loop): self.assertTrue(asyncio.Task.current_task(loop=loop) is task) task = asyncio.Task(coro(self.loop), loop=self.loop) self.loop.run_until_complete(task) self.assertIsNone(asyncio.Task.current_task(loop=self.loop)) def test_current_task_with_interleaving_tasks(self): self.assertIsNone(asyncio.Task.current_task(loop=self.loop)) fut1 = asyncio.Future(loop=self.loop) fut2 = asyncio.Future(loop=self.loop) @asyncio.coroutine def coro1(loop): self.assertTrue(asyncio.Task.current_task(loop=loop) is task1) yield from fut1 self.assertTrue(asyncio.Task.current_task(loop=loop) is task1) fut2.set_result(True) @asyncio.coroutine def coro2(loop): self.assertTrue(asyncio.Task.current_task(loop=loop) is task2) fut1.set_result(True) yield from fut2 self.assertTrue(asyncio.Task.current_task(loop=loop) is task2) task1 = asyncio.Task(coro1(self.loop), loop=self.loop) task2 = asyncio.Task(coro2(self.loop), loop=self.loop) self.loop.run_until_complete(asyncio.wait((task1, task2), loop=self.loop)) self.assertIsNone(asyncio.Task.current_task(loop=self.loop)) # Some thorough tests for cancellation propagation through # coroutines, tasks and wait(). def test_yield_future_passes_cancel(self): # Cancelling outer() cancels inner() cancels waiter. proof = 0 waiter = asyncio.Future(loop=self.loop) @asyncio.coroutine def inner(): nonlocal proof try: yield from waiter except asyncio.CancelledError: proof += 1 raise else: self.fail('got past sleep() in inner()') @asyncio.coroutine def outer(): nonlocal proof try: yield from inner() except asyncio.CancelledError: proof += 100 # Expect this path. else: proof += 10 f = asyncio.async(outer(), loop=self.loop) test_utils.run_briefly(self.loop) f.cancel() self.loop.run_until_complete(f) self.assertEqual(proof, 101) self.assertTrue(waiter.cancelled()) def test_yield_wait_does_not_shield_cancel(self): # Cancelling outer() makes wait() return early, leaves inner() # running. proof = 0 waiter = asyncio.Future(loop=self.loop) @asyncio.coroutine def inner(): nonlocal proof yield from waiter proof += 1 @asyncio.coroutine def outer(): nonlocal proof d, p = yield from asyncio.wait([inner()], loop=self.loop) proof += 100 f = asyncio.async(outer(), loop=self.loop) test_utils.run_briefly(self.loop) f.cancel() self.assertRaises( asyncio.CancelledError, self.loop.run_until_complete, f) waiter.set_result(None) test_utils.run_briefly(self.loop) self.assertEqual(proof, 1) def test_shield_result(self): inner = asyncio.Future(loop=self.loop) outer = asyncio.shield(inner) inner.set_result(42) res = self.loop.run_until_complete(outer) self.assertEqual(res, 42) def test_shield_exception(self): inner = asyncio.Future(loop=self.loop) outer = asyncio.shield(inner) test_utils.run_briefly(self.loop) exc = RuntimeError('expected') inner.set_exception(exc) test_utils.run_briefly(self.loop) self.assertIs(outer.exception(), exc) def test_shield_cancel(self): inner = asyncio.Future(loop=self.loop) outer = asyncio.shield(inner) test_utils.run_briefly(self.loop) inner.cancel() test_utils.run_briefly(self.loop) self.assertTrue(outer.cancelled()) def test_shield_shortcut(self): fut = asyncio.Future(loop=self.loop) fut.set_result(42) res = self.loop.run_until_complete(asyncio.shield(fut)) self.assertEqual(res, 42) def test_shield_effect(self): # Cancelling outer() does not affect inner(). proof = 0 waiter = asyncio.Future(loop=self.loop) @asyncio.coroutine def inner(): nonlocal proof yield from waiter proof += 1 @asyncio.coroutine def outer(): nonlocal proof yield from asyncio.shield(inner(), loop=self.loop) proof += 100 f = asyncio.async(outer(), loop=self.loop) test_utils.run_briefly(self.loop) f.cancel() with self.assertRaises(asyncio.CancelledError): self.loop.run_until_complete(f) waiter.set_result(None) test_utils.run_briefly(self.loop) self.assertEqual(proof, 1) def test_shield_gather(self): child1 = asyncio.Future(loop=self.loop) child2 = asyncio.Future(loop=self.loop) parent = asyncio.gather(child1, child2, loop=self.loop) outer = asyncio.shield(parent, loop=self.loop) test_utils.run_briefly(self.loop) outer.cancel() test_utils.run_briefly(self.loop) self.assertTrue(outer.cancelled()) child1.set_result(1) child2.set_result(2) test_utils.run_briefly(self.loop) self.assertEqual(parent.result(), [1, 2]) def test_gather_shield(self): child1 = asyncio.Future(loop=self.loop) child2 = asyncio.Future(loop=self.loop) inner1 = asyncio.shield(child1, loop=self.loop) inner2 = asyncio.shield(child2, loop=self.loop) parent = asyncio.gather(inner1, inner2, loop=self.loop) test_utils.run_briefly(self.loop) parent.cancel() # This should cancel inner1 and inner2 but bot child1 and child2. test_utils.run_briefly(self.loop) self.assertIsInstance(parent.exception(), asyncio.CancelledError) self.assertTrue(inner1.cancelled()) self.assertTrue(inner2.cancelled()) child1.set_result(1) child2.set_result(2) test_utils.run_briefly(self.loop) def test_as_completed_invalid_args(self): fut = asyncio.Future(loop=self.loop) # as_completed() expects a list of futures, not a future instance self.assertRaises(TypeError, self.loop.run_until_complete, asyncio.as_completed(fut, loop=self.loop)) coro = coroutine_function() self.assertRaises(TypeError, self.loop.run_until_complete, asyncio.as_completed(coro, loop=self.loop)) coro.close() def test_wait_invalid_args(self): fut = asyncio.Future(loop=self.loop) # wait() expects a list of futures, not a future instance self.assertRaises(TypeError, self.loop.run_until_complete, asyncio.wait(fut, loop=self.loop)) coro = coroutine_function() self.assertRaises(TypeError, self.loop.run_until_complete, asyncio.wait(coro, loop=self.loop)) coro.close() # wait() expects at least a future self.assertRaises(ValueError, self.loop.run_until_complete, asyncio.wait([], loop=self.loop)) def test_corowrapper_mocks_generator(self): def check(): # A function that asserts various things. # Called twice, with different debug flag values. @asyncio.coroutine def coro(): # The actual coroutine. self.assertTrue(gen.gi_running) yield from fut # A completed Future used to run the coroutine. fut = asyncio.Future(loop=self.loop) fut.set_result(None) # Call the coroutine. gen = coro() # Check some properties. self.assertTrue(asyncio.iscoroutine(gen)) self.assertIsInstance(gen.gi_frame, types.FrameType) self.assertFalse(gen.gi_running) self.assertIsInstance(gen.gi_code, types.CodeType) # Run it. self.loop.run_until_complete(gen) # The frame should have changed. self.assertIsNone(gen.gi_frame) # Save debug flag. old_debug = asyncio.coroutines._DEBUG try: # Test with debug flag cleared. asyncio.coroutines._DEBUG = False check() # Test with debug flag set. asyncio.coroutines._DEBUG = True check() finally: # Restore original debug flag. asyncio.coroutines._DEBUG = old_debug def test_yield_from_corowrapper(self): old_debug = asyncio.coroutines._DEBUG asyncio.coroutines._DEBUG = True try: @asyncio.coroutine def t1(): return (yield from t2()) @asyncio.coroutine def t2(): f = asyncio.Future(loop=self.loop) asyncio.Task(t3(f), loop=self.loop) return (yield from f) @asyncio.coroutine def t3(f): f.set_result((1, 2, 3)) task = asyncio.Task(t1(), loop=self.loop) val = self.loop.run_until_complete(task) self.assertEqual(val, (1, 2, 3)) finally: asyncio.coroutines._DEBUG = old_debug def test_yield_from_corowrapper_send(self): def foo(): a = yield return a def call(arg): cw = asyncio.coroutines.CoroWrapper(foo(), foo) cw.send(None) try: cw.send(arg) except StopIteration as ex: return ex.args[0] else: raise AssertionError('StopIteration was expected') self.assertEqual(call((1, 2)), (1, 2)) self.assertEqual(call('spam'), 'spam') def test_corowrapper_weakref(self): wd = weakref.WeakValueDictionary() def foo(): yield from [] cw = asyncio.coroutines.CoroWrapper(foo(), foo) wd['cw'] = cw # Would fail without __weakref__ slot. cw.gen = None # Suppress warning from __del__. @unittest.skipUnless(PY34, 'need python 3.4 or later') def test_log_destroyed_pending_task(self): @asyncio.coroutine def kill_me(loop): future = asyncio.Future(loop=loop) yield from future # at this point, the only reference to kill_me() task is # the Task._wakeup() method in future._callbacks raise Exception("code never reached") mock_handler = mock.Mock() self.loop.set_debug(True) self.loop.set_exception_handler(mock_handler) # schedule the task coro = kill_me(self.loop) task = asyncio.async(coro, loop=self.loop) self.assertEqual(asyncio.Task.all_tasks(loop=self.loop), {task}) # execute the task so it waits for future self.loop._run_once() self.assertEqual(len(self.loop._ready), 0) # remove the future used in kill_me(), and references to the task del coro.gi_frame.f_locals['future'] coro = None source_traceback = task._source_traceback task = None # no more reference to kill_me() task: the task is destroyed by the GC support.gc_collect() self.assertEqual(asyncio.Task.all_tasks(loop=self.loop), set()) mock_handler.assert_called_with(self.loop, { 'message': 'Task was destroyed but it is pending!', 'task': mock.ANY, 'source_traceback': source_traceback, }) mock_handler.reset_mock() @mock.patch('asyncio.coroutines.logger') def test_coroutine_never_yielded(self, m_log): debug = asyncio.coroutines._DEBUG try: asyncio.coroutines._DEBUG = True @asyncio.coroutine def coro_noop(): pass finally: asyncio.coroutines._DEBUG = debug tb_filename = __file__ tb_lineno = sys._getframe().f_lineno + 2 # create a coroutine object but don't use it coro_noop() support.gc_collect() self.assertTrue(m_log.error.called) message = m_log.error.call_args[0][0] func_filename, func_lineno = test_utils.get_function_source(coro_noop) regex = (r'^<CoroWrapper %s\(\) .* at %s:%s, .*> ' r'was never yielded from\n' r'Coroutine object created at \(most recent call last\):\n' r'.*\n' r' File "%s", line %s, in test_coroutine_never_yielded\n' r' coro_noop\(\)$' % (re.escape(coro_noop.__qualname__), re.escape(func_filename), func_lineno, re.escape(tb_filename), tb_lineno)) self.assertRegex(message, re.compile(regex, re.DOTALL)) def test_task_source_traceback(self): self.loop.set_debug(True) task = asyncio.Task(coroutine_function(), loop=self.loop) lineno = sys._getframe().f_lineno - 1 self.assertIsInstance(task._source_traceback, list) self.assertEqual(task._source_traceback[-1][:3], (__file__, lineno, 'test_task_source_traceback')) self.loop.run_until_complete(task) def _test_cancel_wait_for(self, timeout): loop = asyncio.new_event_loop() self.addCleanup(loop.close) @asyncio.coroutine def blocking_coroutine(): fut = asyncio.Future(loop=loop) # Block: fut result is never set yield from fut task = loop.create_task(blocking_coroutine()) wait = loop.create_task(asyncio.wait_for(task, timeout, loop=loop)) loop.call_soon(wait.cancel) self.assertRaises(asyncio.CancelledError, loop.run_until_complete, wait) # Python issue #23219: cancelling the wait must also cancel the task self.assertTrue(task.cancelled()) def test_cancel_blocking_wait_for(self): self._test_cancel_wait_for(None) def test_cancel_wait_for(self): self._test_cancel_wait_for(60.0) class GatherTestsBase: def setUp(self): self.one_loop = self.new_test_loop() self.other_loop = self.new_test_loop() self.set_event_loop(self.one_loop, cleanup=False) def _run_loop(self, loop): while loop._ready: test_utils.run_briefly(loop) def _check_success(self, **kwargs): a, b, c = [asyncio.Future(loop=self.one_loop) for i in range(3)] fut = asyncio.gather(*self.wrap_futures(a, b, c), **kwargs) cb = test_utils.MockCallback() fut.add_done_callback(cb) b.set_result(1) a.set_result(2) self._run_loop(self.one_loop) self.assertEqual(cb.called, False) self.assertFalse(fut.done()) c.set_result(3) self._run_loop(self.one_loop) cb.assert_called_once_with(fut) self.assertEqual(fut.result(), [2, 1, 3]) def test_success(self): self._check_success() self._check_success(return_exceptions=False) def test_result_exception_success(self): self._check_success(return_exceptions=True) def test_one_exception(self): a, b, c, d, e = [asyncio.Future(loop=self.one_loop) for i in range(5)] fut = asyncio.gather(*self.wrap_futures(a, b, c, d, e)) cb = test_utils.MockCallback() fut.add_done_callback(cb) exc = ZeroDivisionError() a.set_result(1) b.set_exception(exc) self._run_loop(self.one_loop) self.assertTrue(fut.done()) cb.assert_called_once_with(fut) self.assertIs(fut.exception(), exc) # Does nothing c.set_result(3) d.cancel() e.set_exception(RuntimeError()) e.exception() def test_return_exceptions(self): a, b, c, d = [asyncio.Future(loop=self.one_loop) for i in range(4)] fut = asyncio.gather(*self.wrap_futures(a, b, c, d), return_exceptions=True) cb = test_utils.MockCallback() fut.add_done_callback(cb) exc = ZeroDivisionError() exc2 = RuntimeError() b.set_result(1) c.set_exception(exc) a.set_result(3) self._run_loop(self.one_loop) self.assertFalse(fut.done()) d.set_exception(exc2) self._run_loop(self.one_loop) self.assertTrue(fut.done()) cb.assert_called_once_with(fut) self.assertEqual(fut.result(), [3, 1, exc, exc2]) def test_env_var_debug(self): aio_path = os.path.dirname(os.path.dirname(asyncio.__file__)) code = '\n'.join(( 'import asyncio.coroutines', 'print(asyncio.coroutines._DEBUG)')) # Test with -E to not fail if the unit test was run with # PYTHONASYNCIODEBUG set to a non-empty string sts, stdout, stderr = assert_python_ok('-E', '-c', code, PYTHONPATH=aio_path) self.assertEqual(stdout.rstrip(), b'False') sts, stdout, stderr = assert_python_ok('-c', code, PYTHONASYNCIODEBUG='', PYTHONPATH=aio_path) self.assertEqual(stdout.rstrip(), b'False') sts, stdout, stderr = assert_python_ok('-c', code, PYTHONASYNCIODEBUG='1', PYTHONPATH=aio_path) self.assertEqual(stdout.rstrip(), b'True') sts, stdout, stderr = assert_python_ok('-E', '-c', code, PYTHONASYNCIODEBUG='1', PYTHONPATH=aio_path) self.assertEqual(stdout.rstrip(), b'False') class FutureGatherTests(GatherTestsBase, test_utils.TestCase): def wrap_futures(self, *futures): return futures def _check_empty_sequence(self, seq_or_iter): asyncio.set_event_loop(self.one_loop) self.addCleanup(asyncio.set_event_loop, None) fut = asyncio.gather(*seq_or_iter) self.assertIsInstance(fut, asyncio.Future) self.assertIs(fut._loop, self.one_loop) self._run_loop(self.one_loop) self.assertTrue(fut.done()) self.assertEqual(fut.result(), []) fut = asyncio.gather(*seq_or_iter, loop=self.other_loop) self.assertIs(fut._loop, self.other_loop) def test_constructor_empty_sequence(self): self._check_empty_sequence([]) self._check_empty_sequence(()) self._check_empty_sequence(set()) self._check_empty_sequence(iter("")) def test_constructor_heterogenous_futures(self): fut1 = asyncio.Future(loop=self.one_loop) fut2 = asyncio.Future(loop=self.other_loop) with self.assertRaises(ValueError): asyncio.gather(fut1, fut2) with self.assertRaises(ValueError): asyncio.gather(fut1, loop=self.other_loop) def test_constructor_homogenous_futures(self): children = [asyncio.Future(loop=self.other_loop) for i in range(3)] fut = asyncio.gather(*children) self.assertIs(fut._loop, self.other_loop) self._run_loop(self.other_loop) self.assertFalse(fut.done()) fut = asyncio.gather(*children, loop=self.other_loop) self.assertIs(fut._loop, self.other_loop) self._run_loop(self.other_loop) self.assertFalse(fut.done()) def test_one_cancellation(self): a, b, c, d, e = [asyncio.Future(loop=self.one_loop) for i in range(5)] fut = asyncio.gather(a, b, c, d, e) cb = test_utils.MockCallback() fut.add_done_callback(cb) a.set_result(1) b.cancel() self._run_loop(self.one_loop) self.assertTrue(fut.done()) cb.assert_called_once_with(fut) self.assertFalse(fut.cancelled()) self.assertIsInstance(fut.exception(), asyncio.CancelledError) # Does nothing c.set_result(3) d.cancel() e.set_exception(RuntimeError()) e.exception() def test_result_exception_one_cancellation(self): a, b, c, d, e, f = [asyncio.Future(loop=self.one_loop) for i in range(6)] fut = asyncio.gather(a, b, c, d, e, f, return_exceptions=True) cb = test_utils.MockCallback() fut.add_done_callback(cb) a.set_result(1) zde = ZeroDivisionError() b.set_exception(zde) c.cancel() self._run_loop(self.one_loop) self.assertFalse(fut.done()) d.set_result(3) e.cancel() rte = RuntimeError() f.set_exception(rte) res = self.one_loop.run_until_complete(fut) self.assertIsInstance(res[2], asyncio.CancelledError) self.assertIsInstance(res[4], asyncio.CancelledError) res[2] = res[4] = None self.assertEqual(res, [1, zde, None, 3, None, rte]) cb.assert_called_once_with(fut) class CoroutineGatherTests(GatherTestsBase, test_utils.TestCase): def setUp(self): super().setUp() asyncio.set_event_loop(self.one_loop) def wrap_futures(self, *futures): coros = [] for fut in futures: @asyncio.coroutine def coro(fut=fut): return (yield from fut) coros.append(coro()) return coros def test_constructor_loop_selection(self): @asyncio.coroutine def coro(): return 'abc' gen1 = coro() gen2 = coro() fut = asyncio.gather(gen1, gen2) self.assertIs(fut._loop, self.one_loop) self.one_loop.run_until_complete(fut) self.set_event_loop(self.other_loop, cleanup=False) gen3 = coro() gen4 = coro() fut2 = asyncio.gather(gen3, gen4, loop=self.other_loop) self.assertIs(fut2._loop, self.other_loop) self.other_loop.run_until_complete(fut2) def test_duplicate_coroutines(self): @asyncio.coroutine def coro(s): return s c = coro('abc') fut = asyncio.gather(c, c, coro('def'), c, loop=self.one_loop) self._run_loop(self.one_loop) self.assertEqual(fut.result(), ['abc', 'abc', 'def', 'abc']) def test_cancellation_broadcast(self): # Cancelling outer() cancels all children. proof = 0 waiter = asyncio.Future(loop=self.one_loop) @asyncio.coroutine def inner(): nonlocal proof yield from waiter proof += 1 child1 = asyncio.async(inner(), loop=self.one_loop) child2 = asyncio.async(inner(), loop=self.one_loop) gatherer = None @asyncio.coroutine def outer(): nonlocal proof, gatherer gatherer = asyncio.gather(child1, child2, loop=self.one_loop) yield from gatherer proof += 100 f = asyncio.async(outer(), loop=self.one_loop) test_utils.run_briefly(self.one_loop) self.assertTrue(f.cancel()) with self.assertRaises(asyncio.CancelledError): self.one_loop.run_until_complete(f) self.assertFalse(gatherer.cancel()) self.assertTrue(waiter.cancelled()) self.assertTrue(child1.cancelled()) self.assertTrue(child2.cancelled()) test_utils.run_briefly(self.one_loop) self.assertEqual(proof, 0) def test_exception_marking(self): # Test for the first line marked "Mark exception retrieved." @asyncio.coroutine def inner(f): yield from f raise RuntimeError('should not be ignored') a = asyncio.Future(loop=self.one_loop) b = asyncio.Future(loop=self.one_loop) @asyncio.coroutine def outer(): yield from asyncio.gather(inner(a), inner(b), loop=self.one_loop) f = asyncio.async(outer(), loop=self.one_loop) test_utils.run_briefly(self.one_loop) a.set_result(None) test_utils.run_briefly(self.one_loop) b.set_result(None) test_utils.run_briefly(self.one_loop) self.assertIsInstance(f.exception(), RuntimeError) if __name__ == '__main__': unittest.main()
dablak/boto
refs/heads/develop
tests/unit/glacier/test_writer.py
18
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # from hashlib import sha256 import itertools from StringIO import StringIO from tests.unit import unittest from mock import ( call, Mock, patch, sentinel, ) from nose.tools import assert_equal from boto.glacier.layer1 import Layer1 from boto.glacier.vault import Vault from boto.glacier.writer import Writer, resume_file_upload from boto.glacier.utils import bytes_to_hex, chunk_hashes, tree_hash def create_mock_vault(): vault = Mock(spec=Vault) vault.layer1 = Mock(spec=Layer1) vault.layer1.complete_multipart_upload.return_value = dict( ArchiveId=sentinel.archive_id) vault.name = sentinel.vault_name return vault def partify(data, part_size): for i in itertools.count(0): start = i * part_size part = data[start:start+part_size] if part: yield part else: return def calculate_mock_vault_calls(data, part_size, chunk_size): upload_part_calls = [] data_tree_hashes = [] for i, data_part in enumerate(partify(data, part_size)): start = i * part_size end = start + len(data_part) data_part_tree_hash_blob = tree_hash( chunk_hashes(data_part, chunk_size)) data_part_tree_hash = bytes_to_hex(data_part_tree_hash_blob) data_part_linear_hash = sha256(data_part).hexdigest() upload_part_calls.append( call.layer1.upload_part( sentinel.vault_name, sentinel.upload_id, data_part_linear_hash, data_part_tree_hash, (start, end - 1), data_part)) data_tree_hashes.append(data_part_tree_hash_blob) return upload_part_calls, data_tree_hashes def check_mock_vault_calls(vault, upload_part_calls, data_tree_hashes, data_len): vault.layer1.upload_part.assert_has_calls( upload_part_calls, any_order=True) assert_equal( len(upload_part_calls), vault.layer1.upload_part.call_count) data_tree_hash = bytes_to_hex(tree_hash(data_tree_hashes)) vault.layer1.complete_multipart_upload.assert_called_once_with( sentinel.vault_name, sentinel.upload_id, data_tree_hash, data_len) class TestWriter(unittest.TestCase): def setUp(self): super(TestWriter, self).setUp() self.vault = create_mock_vault() self.chunk_size = 2 # power of 2 self.part_size = 4 # power of 2 upload_id = sentinel.upload_id self.writer = Writer( self.vault, upload_id, self.part_size, self.chunk_size) def check_write(self, write_list): for write_data in write_list: self.writer.write(write_data) self.writer.close() data = ''.join(write_list) upload_part_calls, data_tree_hashes = calculate_mock_vault_calls( data, self.part_size, self.chunk_size) check_mock_vault_calls( self.vault, upload_part_calls, data_tree_hashes, len(data)) def test_single_byte_write(self): self.check_write(['1']) def test_one_part_write(self): self.check_write(['1234']) def test_split_write_1(self): self.check_write(['1', '234']) def test_split_write_2(self): self.check_write(['12', '34']) def test_split_write_3(self): self.check_write(['123', '4']) def test_one_part_plus_one_write(self): self.check_write(['12345']) def test_returns_archive_id(self): self.writer.write('1') self.writer.close() self.assertEquals(sentinel.archive_id, self.writer.get_archive_id()) def test_current_tree_hash(self): self.writer.write('1234') self.writer.write('567') hash_1 = self.writer.current_tree_hash self.assertEqual(hash_1, '\x0e\xb0\x11Z\x1d\x1f\n\x10|\xf76\xa6\xf5' + '\x83\xd1\xd5"bU\x0c\x95\xa8<\xf5\x81\xef\x0e\x0f\x95\n\xb7k' ) # This hash will be different, since the content has changed. self.writer.write('22i3uy') hash_2 = self.writer.current_tree_hash self.assertEqual(hash_2, '\x7f\xf4\x97\x82U]\x81R\x05#^\xe8\x1c\xd19' + '\xe8\x1f\x9e\xe0\x1aO\xaad\xe5\x06"\xa5\xc0\xa8AdL' ) self.writer.close() # Check the final tree hash, post-close. final_hash = self.writer.current_tree_hash self.assertEqual(final_hash, ';\x1a\xb8!=\xf0\x14#\x83\x11\xd5\x0b\x0f' + '\xc7D\xe4\x8e\xd1W\x99z\x14\x06\xb9D\xd0\xf0*\x93\xa2\x8e\xf9' ) # Then assert we don't get a different one on a subsequent call. self.assertEqual(final_hash, self.writer.current_tree_hash) def test_current_uploaded_size(self): self.writer.write('1234') self.writer.write('567') size_1 = self.writer.current_uploaded_size self.assertEqual(size_1, 4) # This hash will be different, since the content has changed. self.writer.write('22i3uy') size_2 = self.writer.current_uploaded_size self.assertEqual(size_2, 12) self.writer.close() # Get the final size, post-close. final_size = self.writer.current_uploaded_size self.assertEqual(final_size, 13) # Then assert we don't get a different one on a subsequent call. self.assertEqual(final_size, self.writer.current_uploaded_size) def test_upload_id(self): self.assertEquals(sentinel.upload_id, self.writer.upload_id) class TestResume(unittest.TestCase): def setUp(self): super(TestResume, self).setUp() self.vault = create_mock_vault() self.chunk_size = 2 # power of 2 self.part_size = 4 # power of 2 def check_no_resume(self, data, resume_set=set()): fobj = StringIO(data) part_hash_map = {} for part_index in resume_set: start = self.part_size * part_index end = start + self.part_size part_data = data[start:end] part_hash_map[part_index] = tree_hash( chunk_hashes(part_data, self.chunk_size)) resume_file_upload( self.vault, sentinel.upload_id, self.part_size, fobj, part_hash_map, self.chunk_size) upload_part_calls, data_tree_hashes = calculate_mock_vault_calls( data, self.part_size, self.chunk_size) resume_upload_part_calls = [ call for part_index, call in enumerate(upload_part_calls) if part_index not in resume_set] check_mock_vault_calls( self.vault, resume_upload_part_calls, data_tree_hashes, len(data)) def test_one_part_no_resume(self): self.check_no_resume('1234') def test_two_parts_no_resume(self): self.check_no_resume('12345678') def test_one_part_resume(self): self.check_no_resume('1234', resume_set=set([0])) def test_two_parts_one_resume(self): self.check_no_resume('12345678', resume_set=set([1])) def test_returns_archive_id(self): archive_id = resume_file_upload( self.vault, sentinel.upload_id, self.part_size, StringIO('1'), {}, self.chunk_size) self.assertEquals(sentinel.archive_id, archive_id)
harshilasu/LinkurApp
refs/heads/master
y/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/sns/test_sns_sqs_subscription.py
12
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2010, Eucalyptus Systems, Inc. # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ Unit tests for subscribing SQS queues to SNS topics. """ import hashlib import time from tests.unit import unittest from boto.compat import json from boto.sqs.connection import SQSConnection from boto.sns.connection import SNSConnection class SNSSubcribeSQSTest(unittest.TestCase): sqs = True sns = True def setUp(self): self.sqsc = SQSConnection() self.snsc = SNSConnection() def get_policy_statements(self, queue): attrs = queue.get_attributes('Policy') policy = json.loads(attrs.get('Policy', "{}")) return policy.get('Statement', {}) def test_correct_sid(self): now = time.time() topic_name = queue_name = "test_correct_sid%d" % (now) timeout = 60 queue = self.sqsc.create_queue(queue_name, timeout) self.addCleanup(self.sqsc.delete_queue, queue, True) queue_arn = queue.arn topic = self.snsc.create_topic(topic_name) topic_arn = topic['CreateTopicResponse']['CreateTopicResult']\ ['TopicArn'] self.addCleanup(self.snsc.delete_topic, topic_arn) expected_sid = hashlib.md5(topic_arn + queue_arn).hexdigest() resp = self.snsc.subscribe_sqs_queue(topic_arn, queue) found_expected_sid = False statements = self.get_policy_statements(queue) for statement in statements: if statement['Sid'] == expected_sid: found_expected_sid = True break self.assertTrue(found_expected_sid) def test_idempotent_subscribe(self): now = time.time() topic_name = queue_name = "test_idempotent_subscribe%d" % (now) timeout = 60 queue = self.sqsc.create_queue(queue_name, timeout) self.addCleanup(self.sqsc.delete_queue, queue, True) initial_statements = self.get_policy_statements(queue) queue_arn = queue.arn topic = self.snsc.create_topic(topic_name) topic_arn = topic['CreateTopicResponse']['CreateTopicResult']\ ['TopicArn'] self.addCleanup(self.snsc.delete_topic, topic_arn) resp = self.snsc.subscribe_sqs_queue(topic_arn, queue) time.sleep(3) first_subscribe_statements = self.get_policy_statements(queue) self.assertEqual(len(first_subscribe_statements), len(initial_statements) + 1) resp2 = self.snsc.subscribe_sqs_queue(topic_arn, queue) time.sleep(3) second_subscribe_statements = self.get_policy_statements(queue) self.assertEqual(len(second_subscribe_statements), len(first_subscribe_statements))
methane/FrameworkBenchmarks
refs/heads/master
frameworks/CSharp/nancy/setup_iis.py
65
import subprocess import sys import setup_util import os def start(args, logfile, errfile): if os.name != 'nt': return 1 try: setup_util.replace_text("nancy/src/Web.config", "localhost", args.database_host) subprocess.check_call("powershell -Command .\\setup_iis.ps1 start", cwd="nancy", stderr=errfile, stdout=logfile) return 0 except subprocess.CalledProcessError: return 1 def stop(logfile, errfile): if os.name != 'nt': return 0 subprocess.check_call("powershell -Command .\\setup_iis.ps1 stop", cwd="nancy", stderr=errfile, stdout=logfile) return 0
sugarlabs/sugar
refs/heads/master
src/jarabe/controlpanel/cmd.py
2
# Copyright (C) 2007, 2008 One Laptop Per Child # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import sys import getopt import os from gettext import gettext as _ import logging from jarabe import config _RESTART = 1 _same_option_warning = _('sugar-control-panel: WARNING, found more than one' ' option with the same name: %(key)s' ' module: %(module)r') _no_option_error = _('sugar-control-panel: key=%s not an available option') _general_error = 'sugar-control-panel: %s' def cmd_help(): """Print the help to the screen""" # TRANS: Translators, there's a empty line at the end of this string, # which must appear in the translated string (msgstr) as well. print(_('Usage: sugar-control-panel [ option ] key [ args ... ] \n\ Control for the sugar environment. \n\ Options: \n\ -h show this help message and exit \n\ -l list all the available options \n\ -h key show information about this key \n\ -g key get the current value of the key \n\ -s key set the current value for the key \n\ -c key clear the current value for the key \n\ ')) def note_restart(): """Instructions how to restart sugar""" print(_('To apply your changes you have to restart Sugar.\n' + 'Hit ctrl+alt+erase on the keyboard to trigger a restart.')) def load_modules(): """Build a list of pointers to available modules and import them. """ modules = [] path = os.path.join(config.ext_path, 'cpsection') folder = os.listdir(path) for item in folder: if os.path.isdir(os.path.join(path, item)) and \ os.path.exists(os.path.join(path, item, 'model.py')): try: module = __import__('.'.join(('cpsection', item, 'model')), globals(), locals(), ['model']) except Exception: logging.exception('Exception while loading extension:') else: modules.append(module) return modules def main(): try: options, args = getopt.getopt(sys.argv[1:], 'h:s:g:c:l', []) except getopt.GetoptError: cmd_help() sys.exit(2) if not options: cmd_help() sys.exit(2) modules = load_modules() for option, key in options: found = 0 if option in ('-h'): for module in modules: method = getattr(module, 'set_' + key, None) if method: found += 1 if found == 1: print(method.__doc__) else: print(_same_option_warning % {'key': key, 'module': module}) if found == 0: print(_no_option_error % key) if option in ('-l'): for module in modules: methods = dir(module) print('%s:' % module.__name__.split('.')[1]) for method in methods: if method.startswith('get_'): print(' %s' % method[4:]) elif method.startswith('clear_'): print(' %s (use the -c argument with this option)' % method[6:]) if option in ('-g'): for module in modules: method = getattr(module, 'print_' + key, None) if method: found += 1 if found == 1: try: method() except Exception as detail: print(_general_error % detail) else: print(_same_option_warning % {'key': key, 'module': module}) if found == 0: print(_no_option_error % key) if option in ('-s'): for module in modules: method = getattr(module, 'set_' + key, None) if method: note = 0 found += 1 if found == 1: try: note = method(*args) except Exception as detail: print(_general_error % detail) if note == _RESTART: note_restart() else: print(_same_option_warning % {'key': key, 'module': module}) if found == 0: print(_no_option_error % key) if option in ('-c'): for module in modules: method = getattr(module, 'clear_' + key, None) if method: note = 0 found += 1 if found == 1: try: note = method(*args) except Exception as detail: print(_general_error % detail) if note == _RESTART: note_restart() else: print(_same_option_warning % {'key': key, 'module': module}) if found == 0: print(_no_option_error % key)
kleientertainment/ds_mod_tools
refs/heads/master
pkg/win32/Python27/Lib/multiprocessing/managers.py
5
# # Module providing the `SyncManager` class for dealing # with shared objects # # multiprocessing/managers.py # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ] # # Imports # import os import sys import weakref import threading import array import Queue from traceback import format_exc from multiprocessing import Process, current_process, active_children, Pool, util, connection from multiprocessing.process import AuthenticationString from multiprocessing.forking import exit, Popen, assert_spawning, ForkingPickler from multiprocessing.util import Finalize, info try: from cPickle import PicklingError except ImportError: from pickle import PicklingError # # Register some things for pickling # def reduce_array(a): return array.array, (a.typecode, a.tostring()) ForkingPickler.register(array.array, reduce_array) view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] # # Type for identifying shared objects # class Token(object): ''' Type to uniquely indentify a shared object ''' __slots__ = ('typeid', 'address', 'id') def __init__(self, typeid, address, id): (self.typeid, self.address, self.id) = (typeid, address, id) def __getstate__(self): return (self.typeid, self.address, self.id) def __setstate__(self, state): (self.typeid, self.address, self.id) = state def __repr__(self): return 'Token(typeid=%r, address=%r, id=%r)' % \ (self.typeid, self.address, self.id) # # Function for communication with a manager's server process # def dispatch(c, id, methodname, args=(), kwds={}): ''' Send a message to manager using connection `c` and return response ''' c.send((id, methodname, args, kwds)) kind, result = c.recv() if kind == '#RETURN': return result raise convert_to_error(kind, result) def convert_to_error(kind, result): if kind == '#ERROR': return result elif kind == '#TRACEBACK': assert type(result) is str return RemoteError(result) elif kind == '#UNSERIALIZABLE': assert type(result) is str return RemoteError('Unserializable message: %s\n' % result) else: return ValueError('Unrecognized message type') class RemoteError(Exception): def __str__(self): return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) # # Functions for finding the method names of an object # def all_methods(obj): ''' Return a list of names of methods of `obj` ''' temp = [] for name in dir(obj): func = getattr(obj, name) if hasattr(func, '__call__'): temp.append(name) return temp def public_methods(obj): ''' Return a list of names of methods of `obj` which do not start with '_' ''' return [name for name in all_methods(obj) if name[0] != '_'] # # Server which is run in a process controlled by a manager # class Server(object): ''' Server class which runs in a process controlled by a manager object ''' public = ['shutdown', 'create', 'accept_connection', 'get_methods', 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] def __init__(self, registry, address, authkey, serializer): assert isinstance(authkey, bytes) self.registry = registry self.authkey = AuthenticationString(authkey) Listener, Client = listener_client[serializer] # do authentication later self.listener = Listener(address=address, backlog=16) self.address = self.listener.address self.id_to_obj = {'0': (None, ())} self.id_to_refcount = {} self.mutex = threading.RLock() self.stop = 0 def serve_forever(self): ''' Run the server forever ''' current_process()._manager_server = self try: try: while 1: try: c = self.listener.accept() except (OSError, IOError): continue t = threading.Thread(target=self.handle_request, args=(c,)) t.daemon = True t.start() except (KeyboardInterrupt, SystemExit): pass finally: self.stop = 999 self.listener.close() def handle_request(self, c): ''' Handle a new connection ''' funcname = result = request = None try: connection.deliver_challenge(c, self.authkey) connection.answer_challenge(c, self.authkey) request = c.recv() ignore, funcname, args, kwds = request assert funcname in self.public, '%r unrecognized' % funcname func = getattr(self, funcname) except Exception: msg = ('#TRACEBACK', format_exc()) else: try: result = func(c, *args, **kwds) except Exception: msg = ('#TRACEBACK', format_exc()) else: msg = ('#RETURN', result) try: c.send(msg) except Exception, e: try: c.send(('#TRACEBACK', format_exc())) except Exception: pass util.info('Failure to send message: %r', msg) util.info(' ... request was %r', request) util.info(' ... exception was %r', e) c.close() def serve_client(self, conn): ''' Handle requests from the proxies in a particular process/thread ''' util.debug('starting server thread to service %r', threading.current_thread().name) recv = conn.recv send = conn.send id_to_obj = self.id_to_obj while not self.stop: try: methodname = obj = None request = recv() ident, methodname, args, kwds = request obj, exposed, gettypeid = id_to_obj[ident] if methodname not in exposed: raise AttributeError( 'method %r of %r object is not in exposed=%r' % (methodname, type(obj), exposed) ) function = getattr(obj, methodname) try: res = function(*args, **kwds) except Exception, e: msg = ('#ERROR', e) else: typeid = gettypeid and gettypeid.get(methodname, None) if typeid: rident, rexposed = self.create(conn, typeid, res) token = Token(typeid, self.address, rident) msg = ('#PROXY', (rexposed, token)) else: msg = ('#RETURN', res) except AttributeError: if methodname is None: msg = ('#TRACEBACK', format_exc()) else: try: fallback_func = self.fallback_mapping[methodname] result = fallback_func( self, conn, ident, obj, *args, **kwds ) msg = ('#RETURN', result) except Exception: msg = ('#TRACEBACK', format_exc()) except EOFError: util.debug('got EOF -- exiting thread serving %r', threading.current_thread().name) sys.exit(0) except Exception: msg = ('#TRACEBACK', format_exc()) try: try: send(msg) except Exception, e: send(('#UNSERIALIZABLE', repr(msg))) except Exception, e: util.info('exception in thread serving %r', threading.current_thread().name) util.info(' ... message was %r', msg) util.info(' ... exception was %r', e) conn.close() sys.exit(1) def fallback_getvalue(self, conn, ident, obj): return obj def fallback_str(self, conn, ident, obj): return str(obj) def fallback_repr(self, conn, ident, obj): return repr(obj) fallback_mapping = { '__str__':fallback_str, '__repr__':fallback_repr, '#GETVALUE':fallback_getvalue } def dummy(self, c): pass def debug_info(self, c): ''' Return some info --- useful to spot problems with refcounting ''' self.mutex.acquire() try: result = [] keys = self.id_to_obj.keys() keys.sort() for ident in keys: if ident != '0': result.append(' %s: refcount=%s\n %s' % (ident, self.id_to_refcount[ident], str(self.id_to_obj[ident][0])[:75])) return '\n'.join(result) finally: self.mutex.release() def number_of_objects(self, c): ''' Number of shared objects ''' return len(self.id_to_obj) - 1 # don't count ident='0' def shutdown(self, c): ''' Shutdown this process ''' try: try: util.debug('manager received shutdown message') c.send(('#RETURN', None)) if sys.stdout != sys.__stdout__: util.debug('resetting stdout, stderr') sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ util._run_finalizers(0) for p in active_children(): util.debug('terminating a child process of manager') p.terminate() for p in active_children(): util.debug('terminating a child process of manager') p.join() util._run_finalizers() util.info('manager exiting with exitcode 0') except: import traceback traceback.print_exc() finally: exit(0) def create(self, c, typeid, *args, **kwds): ''' Create a new shared object and return its id ''' self.mutex.acquire() try: callable, exposed, method_to_typeid, proxytype = \ self.registry[typeid] if callable is None: assert len(args) == 1 and not kwds obj = args[0] else: obj = callable(*args, **kwds) if exposed is None: exposed = public_methods(obj) if method_to_typeid is not None: assert type(method_to_typeid) is dict exposed = list(exposed) + list(method_to_typeid) ident = '%x' % id(obj) # convert to string because xmlrpclib # only has 32 bit signed integers util.debug('%r callable returned object with id %r', typeid, ident) self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) if ident not in self.id_to_refcount: self.id_to_refcount[ident] = 0 # increment the reference count immediately, to avoid # this object being garbage collected before a Proxy # object for it can be created. The caller of create() # is responsible for doing a decref once the Proxy object # has been created. self.incref(c, ident) return ident, tuple(exposed) finally: self.mutex.release() def get_methods(self, c, token): ''' Return the methods of the shared object indicated by token ''' return tuple(self.id_to_obj[token.id][1]) def accept_connection(self, c, name): ''' Spawn a new thread to serve this connection ''' threading.current_thread().name = name c.send(('#RETURN', None)) self.serve_client(c) def incref(self, c, ident): self.mutex.acquire() try: self.id_to_refcount[ident] += 1 finally: self.mutex.release() def decref(self, c, ident): self.mutex.acquire() try: assert self.id_to_refcount[ident] >= 1 self.id_to_refcount[ident] -= 1 if self.id_to_refcount[ident] == 0: del self.id_to_obj[ident], self.id_to_refcount[ident] util.debug('disposing of obj with id %r', ident) finally: self.mutex.release() # # Class to represent state of a manager # class State(object): __slots__ = ['value'] INITIAL = 0 STARTED = 1 SHUTDOWN = 2 # # Mapping from serializer name to Listener and Client types # listener_client = { 'pickle' : (connection.Listener, connection.Client), 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) } # # Definition of BaseManager # class BaseManager(object): ''' Base class for managers ''' _registry = {} _Server = Server def __init__(self, address=None, authkey=None, serializer='pickle'): if authkey is None: authkey = current_process().authkey self._address = address # XXX not final address if eg ('', 0) self._authkey = AuthenticationString(authkey) self._state = State() self._state.value = State.INITIAL self._serializer = serializer self._Listener, self._Client = listener_client[serializer] def __reduce__(self): return type(self).from_address, \ (self._address, self._authkey, self._serializer) def get_server(self): ''' Return server object with serve_forever() method and address attribute ''' assert self._state.value == State.INITIAL return Server(self._registry, self._address, self._authkey, self._serializer) def connect(self): ''' Connect manager object to the server process ''' Listener, Client = listener_client[self._serializer] conn = Client(self._address, authkey=self._authkey) dispatch(conn, None, 'dummy') self._state.value = State.STARTED def start(self, initializer=None, initargs=()): ''' Spawn a server process for this manager object ''' assert self._state.value == State.INITIAL if initializer is not None and not hasattr(initializer, '__call__'): raise TypeError('initializer must be a callable') # pipe over which we will retrieve address of server reader, writer = connection.Pipe(duplex=False) # spawn process which runs a server self._process = Process( target=type(self)._run_server, args=(self._registry, self._address, self._authkey, self._serializer, writer, initializer, initargs), ) ident = ':'.join(str(i) for i in self._process._identity) self._process.name = type(self).__name__ + '-' + ident self._process.start() # get address of server writer.close() self._address = reader.recv() reader.close() # register a finalizer self._state.value = State.STARTED self.shutdown = util.Finalize( self, type(self)._finalize_manager, args=(self._process, self._address, self._authkey, self._state, self._Client), exitpriority=0 ) @classmethod def _run_server(cls, registry, address, authkey, serializer, writer, initializer=None, initargs=()): ''' Create a server, report its address and run it ''' if initializer is not None: initializer(*initargs) # create server server = cls._Server(registry, address, authkey, serializer) # inform parent process of the server's address writer.send(server.address) writer.close() # run the manager util.info('manager serving at %r', server.address) server.serve_forever() def _create(self, typeid, *args, **kwds): ''' Create a new shared object; return the token and exposed tuple ''' assert self._state.value == State.STARTED, 'server not yet started' conn = self._Client(self._address, authkey=self._authkey) try: id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) finally: conn.close() return Token(typeid, self._address, id), exposed def join(self, timeout=None): ''' Join the manager process (if it has been spawned) ''' self._process.join(timeout) def _debug_info(self): ''' Return some info about the servers shared objects and connections ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'debug_info') finally: conn.close() def _number_of_objects(self): ''' Return the number of shared objects ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'number_of_objects') finally: conn.close() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.shutdown() @staticmethod def _finalize_manager(process, address, authkey, state, _Client): ''' Shutdown the manager process; will be registered as a finalizer ''' if process.is_alive(): util.info('sending shutdown message to manager') try: conn = _Client(address, authkey=authkey) try: dispatch(conn, None, 'shutdown') finally: conn.close() except Exception: pass process.join(timeout=0.2) if process.is_alive(): util.info('manager still alive') if hasattr(process, 'terminate'): util.info('trying to `terminate()` manager process') process.terminate() process.join(timeout=0.1) if process.is_alive(): util.info('manager still alive after terminate') state.value = State.SHUTDOWN try: del BaseProxy._address_to_local[address] except KeyError: pass address = property(lambda self: self._address) @classmethod def register(cls, typeid, callable=None, proxytype=None, exposed=None, method_to_typeid=None, create_method=True): ''' Register a typeid with the manager type ''' if '_registry' not in cls.__dict__: cls._registry = cls._registry.copy() if proxytype is None: proxytype = AutoProxy exposed = exposed or getattr(proxytype, '_exposed_', None) method_to_typeid = method_to_typeid or \ getattr(proxytype, '_method_to_typeid_', None) if method_to_typeid: for key, value in method_to_typeid.items(): assert type(key) is str, '%r is not a string' % key assert type(value) is str, '%r is not a string' % value cls._registry[typeid] = ( callable, exposed, method_to_typeid, proxytype ) if create_method: def temp(self, *args, **kwds): util.debug('requesting creation of a shared %r object', typeid) token, exp = self._create(typeid, *args, **kwds) proxy = proxytype( token, self._serializer, manager=self, authkey=self._authkey, exposed=exp ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy temp.__name__ = typeid setattr(cls, typeid, temp) # # Subclass of set which get cleared after a fork # class ProcessLocalSet(set): def __init__(self): util.register_after_fork(self, lambda obj: obj.clear()) def __reduce__(self): return type(self), () # # Definition of BaseProxy # class BaseProxy(object): ''' A base for proxies of shared objects ''' _address_to_local = {} _mutex = util.ForkAwareThreadLock() def __init__(self, token, serializer, manager=None, authkey=None, exposed=None, incref=True): BaseProxy._mutex.acquire() try: tls_idset = BaseProxy._address_to_local.get(token.address, None) if tls_idset is None: tls_idset = util.ForkAwareLocal(), ProcessLocalSet() BaseProxy._address_to_local[token.address] = tls_idset finally: BaseProxy._mutex.release() # self._tls is used to record the connection used by this # thread to communicate with the manager at token.address self._tls = tls_idset[0] # self._idset is used to record the identities of all shared # objects for which the current process owns references and # which are in the manager at token.address self._idset = tls_idset[1] self._token = token self._id = self._token.id self._manager = manager self._serializer = serializer self._Client = listener_client[serializer][1] if authkey is not None: self._authkey = AuthenticationString(authkey) elif self._manager is not None: self._authkey = self._manager._authkey else: self._authkey = current_process().authkey if incref: self._incref() util.register_after_fork(self, BaseProxy._after_fork) def _connect(self): util.debug('making connection to manager') name = current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'accept_connection', (name,)) self._tls.connection = conn def _callmethod(self, methodname, args=(), kwds={}): ''' Try to call a method of the referrent and return a copy of the result ''' try: conn = self._tls.connection except AttributeError: util.debug('thread %r does not own a connection', threading.current_thread().name) self._connect() conn = self._tls.connection conn.send((self._id, methodname, args, kwds)) kind, result = conn.recv() if kind == '#RETURN': return result elif kind == '#PROXY': exposed, token = result proxytype = self._manager._registry[token.typeid][-1] proxy = proxytype( token, self._serializer, manager=self._manager, authkey=self._authkey, exposed=exposed ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy raise convert_to_error(kind, result) def _getvalue(self): ''' Get a copy of the value of the referent ''' return self._callmethod('#GETVALUE') def _incref(self): conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'incref', (self._id,)) util.debug('INCREF %r', self._token.id) self._idset.add(self._id) state = self._manager and self._manager._state self._close = util.Finalize( self, BaseProxy._decref, args=(self._token, self._authkey, state, self._tls, self._idset, self._Client), exitpriority=10 ) @staticmethod def _decref(token, authkey, state, tls, idset, _Client): idset.discard(token.id) # check whether manager is still alive if state is None or state.value == State.STARTED: # tell manager this process no longer cares about referent try: util.debug('DECREF %r', token.id) conn = _Client(token.address, authkey=authkey) dispatch(conn, None, 'decref', (token.id,)) except Exception, e: util.debug('... decref failed %s', e) else: util.debug('DECREF %r -- manager already shutdown', token.id) # check whether we can close this thread's connection because # the process owns no more references to objects for this manager if not idset and hasattr(tls, 'connection'): util.debug('thread %r has no more proxies so closing conn', threading.current_thread().name) tls.connection.close() del tls.connection def _after_fork(self): self._manager = None try: self._incref() except Exception, e: # the proxy may just be for a manager which has shutdown util.info('incref failed: %s' % e) def __reduce__(self): kwds = {} if Popen.thread_is_spawning(): kwds['authkey'] = self._authkey if getattr(self, '_isauto', False): kwds['exposed'] = self._exposed_ return (RebuildProxy, (AutoProxy, self._token, self._serializer, kwds)) else: return (RebuildProxy, (type(self), self._token, self._serializer, kwds)) def __deepcopy__(self, memo): return self._getvalue() def __repr__(self): return '<%s object, typeid %r at %s>' % \ (type(self).__name__, self._token.typeid, '0x%x' % id(self)) def __str__(self): ''' Return representation of the referent (or a fall-back if that fails) ''' try: return self._callmethod('__repr__') except Exception: return repr(self)[:-1] + "; '__str__()' failed>" # # Function used for unpickling # def RebuildProxy(func, token, serializer, kwds): ''' Function used for unpickling proxy objects. If possible the shared object is returned, or otherwise a proxy for it. ''' server = getattr(current_process(), '_manager_server', None) if server and server.address == token.address: return server.id_to_obj[token.id][0] else: incref = ( kwds.pop('incref', True) and not getattr(current_process(), '_inheriting', False) ) return func(token, serializer, incref=incref, **kwds) # # Functions to create proxies and proxy types # def MakeProxyType(name, exposed, _cache={}): ''' Return an proxy type whose methods are given by `exposed` ''' exposed = tuple(exposed) try: return _cache[(name, exposed)] except KeyError: pass dic = {} for meth in exposed: exec '''def %s(self, *args, **kwds): return self._callmethod(%r, args, kwds)''' % (meth, meth) in dic ProxyType = type(name, (BaseProxy,), dic) ProxyType._exposed_ = exposed _cache[(name, exposed)] = ProxyType return ProxyType def AutoProxy(token, serializer, manager=None, authkey=None, exposed=None, incref=True): ''' Return an auto-proxy for `token` ''' _Client = listener_client[serializer][1] if exposed is None: conn = _Client(token.address, authkey=authkey) try: exposed = dispatch(conn, None, 'get_methods', (token,)) finally: conn.close() if authkey is None and manager is not None: authkey = manager._authkey if authkey is None: authkey = current_process().authkey ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, incref=incref) proxy._isauto = True return proxy # # Types/callables which we will register with SyncManager # class Namespace(object): def __init__(self, **kwds): self.__dict__.update(kwds) def __repr__(self): items = self.__dict__.items() temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return 'Namespace(%s)' % str.join(', ', temp) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def get(self): return self._value def set(self, value): self._value = value def __repr__(self): return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) value = property(get, set) def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) # # Proxy types used by SyncManager # class IteratorProxy(BaseProxy): # XXX remove methods for Py3.0 and Py2.6 _exposed_ = ('__next__', 'next', 'send', 'throw', 'close') def __iter__(self): return self def __next__(self, *args): return self._callmethod('__next__', args) def next(self, *args): return self._callmethod('next', args) def send(self, *args): return self._callmethod('send', args) def throw(self, *args): return self._callmethod('throw', args) def close(self, *args): return self._callmethod('close', args) class AcquirerProxy(BaseProxy): _exposed_ = ('acquire', 'release') def acquire(self, blocking=True): return self._callmethod('acquire', (blocking,)) def release(self): return self._callmethod('release') def __enter__(self): return self._callmethod('acquire') def __exit__(self, exc_type, exc_val, exc_tb): return self._callmethod('release') class ConditionProxy(AcquirerProxy): # XXX will Condition.notfyAll() name be available in Py3.0? _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def notify(self): return self._callmethod('notify') def notify_all(self): return self._callmethod('notify_all') class EventProxy(BaseProxy): _exposed_ = ('is_set', 'set', 'clear', 'wait') def is_set(self): return self._callmethod('is_set') def set(self): return self._callmethod('set') def clear(self): return self._callmethod('clear') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) class NamespaceProxy(BaseProxy): _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') def __getattr__(self, key): if key[0] == '_': return object.__getattribute__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__getattribute__', (key,)) def __setattr__(self, key, value): if key[0] == '_': return object.__setattr__(self, key, value) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__setattr__', (key, value)) def __delattr__(self, key): if key[0] == '_': return object.__delattr__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__delattr__', (key,)) class ValueProxy(BaseProxy): _exposed_ = ('get', 'set') def get(self): return self._callmethod('get') def set(self, value): return self._callmethod('set', (value,)) value = property(get, set) BaseListProxy = MakeProxyType('BaseListProxy', ( '__add__', '__contains__', '__delitem__', '__delslice__', '__getitem__', '__getslice__', '__len__', '__mul__', '__reversed__', '__rmul__', '__setitem__', '__setslice__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort', '__imul__' )) # XXX __getslice__ and __setslice__ unneeded in Py3.0 class ListProxy(BaseListProxy): def __iadd__(self, value): self._callmethod('extend', (value,)) return self def __imul__(self, value): self._callmethod('__imul__', (value,)) return self DictProxy = MakeProxyType('DictProxy', ( '__contains__', '__delitem__', '__getitem__', '__len__', '__setitem__', 'clear', 'copy', 'get', 'has_key', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' )) ArrayProxy = MakeProxyType('ArrayProxy', ( '__len__', '__getitem__', '__setitem__', '__getslice__', '__setslice__' )) # XXX __getslice__ and __setslice__ unneeded in Py3.0 PoolProxy = MakeProxyType('PoolProxy', ( 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', 'map', 'map_async', 'terminate' )) PoolProxy._method_to_typeid_ = { 'apply_async': 'AsyncResult', 'map_async': 'AsyncResult', 'imap': 'Iterator', 'imap_unordered': 'Iterator' } # # Definition of SyncManager # class SyncManager(BaseManager): ''' Subclass of `BaseManager` which supports a number of shared object types. The types registered are those intended for the synchronization of threads, plus `dict`, `list` and `Namespace`. The `multiprocessing.Manager()` function creates started instances of this class. ''' SyncManager.register('Queue', Queue.Queue) SyncManager.register('JoinableQueue', Queue.Queue) SyncManager.register('Event', threading.Event, EventProxy) SyncManager.register('Lock', threading.Lock, AcquirerProxy) SyncManager.register('RLock', threading.RLock, AcquirerProxy) SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, AcquirerProxy) SyncManager.register('Condition', threading.Condition, ConditionProxy) SyncManager.register('Pool', Pool, PoolProxy) SyncManager.register('list', list, ListProxy) SyncManager.register('dict', dict, DictProxy) SyncManager.register('Value', Value, ValueProxy) SyncManager.register('Array', Array, ArrayProxy) SyncManager.register('Namespace', Namespace, NamespaceProxy) # types returned by methods of PoolProxy SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) SyncManager.register('AsyncResult', create_method=False)
Zlash65/erpnext
refs/heads/develop
erpnext/patches/v12_0/move_item_tax_to_item_tax_template.py
1
import frappe import json from six import iteritems def execute(): if "tax_type" not in frappe.db.get_table_columns("Item Tax"): return old_item_taxes = {} item_tax_templates = {} rename_template_to_untitled = [] for d in frappe.db.sql("""select parent as item_code, tax_type, tax_rate from `tabItem Tax`""", as_dict=1): old_item_taxes.setdefault(d.item_code, []) old_item_taxes[d.item_code].append(d) frappe.reload_doc("accounts", "doctype", "item_tax_template_detail", force=1) frappe.reload_doc("accounts", "doctype", "item_tax_template", force=1) frappe.reload_doc("stock", "doctype", "item", force=1) frappe.reload_doc("stock", "doctype", "item_tax", force=1) frappe.reload_doc("selling", "doctype", "quotation_item", force=1) frappe.reload_doc("selling", "doctype", "sales_order_item", force=1) frappe.reload_doc("stock", "doctype", "delivery_note_item", force=1) frappe.reload_doc("accounts", "doctype", "sales_invoice_item", force=1) frappe.reload_doc("buying", "doctype", "supplier_quotation_item", force=1) frappe.reload_doc("buying", "doctype", "purchase_order_item", force=1) frappe.reload_doc("stock", "doctype", "purchase_receipt_item", force=1) frappe.reload_doc("accounts", "doctype", "purchase_invoice_item", force=1) frappe.reload_doc("accounts", "doctype", "accounts_settings", force=1) # for each item that have item tax rates for item_code in old_item_taxes.keys(): # make current item's tax map item_tax_map = {} for d in old_item_taxes[item_code]: item_tax_map[d.tax_type] = d.tax_rate item_tax_template_name = get_item_tax_template(item_tax_templates, rename_template_to_untitled, item_tax_map, item_code) # update the item tax table item = frappe.get_doc("Item", item_code) item.set("taxes", []) item.append("taxes", {"item_tax_template": item_tax_template_name, "tax_category": ""}) item.save() doctypes = [ 'Quotation', 'Sales Order', 'Delivery Note', 'Sales Invoice', 'Supplier Quotation', 'Purchase Order', 'Purchase Receipt', 'Purchase Invoice' ] for dt in doctypes: for d in frappe.db.sql("""select name, parent, item_code, item_tax_rate from `tab{0} Item` where ifnull(item_tax_rate, '') not in ('', '{{}}')""".format(dt), as_dict=1): item_tax_map = json.loads(d.item_tax_rate) item_tax_template = get_item_tax_template(item_tax_templates, rename_template_to_untitled, item_tax_map, d.item_code, d.parent) frappe.db.set_value(dt + " Item", d.name, "item_tax_template", item_tax_template) idx = 1 for oldname in rename_template_to_untitled: frappe.rename_doc("Item Tax Template", oldname, "Untitled {}".format(idx)) idx += 1 settings = frappe.get_single("Accounts Settings") settings.add_taxes_from_item_tax_template = 0 settings.determine_address_tax_category_from = "Billing Address" settings.save() def get_item_tax_template(item_tax_templates, rename_template_to_untitled, item_tax_map, item_code, parent=None): # search for previously created item tax template by comparing tax maps for template, item_tax_template_map in iteritems(item_tax_templates): if item_tax_map == item_tax_template_map: if not parent: rename_template_to_untitled.append(template) return template # if no item tax template found, create one item_tax_template = frappe.new_doc("Item Tax Template") item_tax_template.title = "{}--{}".format(parent, item_code) if parent else "Item-{}".format(item_code) for tax_type, tax_rate in iteritems(item_tax_map): if not frappe.db.exists("Account", tax_type): parts = tax_type.strip().split(" - ") account_name = " - ".join(parts[:-1]) company = frappe.db.get_value("Company", filters={"abbr": parts[-1]}) parent_account = frappe.db.get_value("Account", filters={"account_type": "Tax", "root_type": "Liability", "is_group": 0}, fieldname="parent_account") frappe.get_doc({ "doctype": "Account", "account_name": account_name, "company": company, "account_type": "Tax", "parent_account": parent_account }).insert() item_tax_template.append("taxes", {"tax_type": tax_type, "tax_rate": tax_rate}) item_tax_templates.setdefault(item_tax_template.title, {}) item_tax_templates[item_tax_template.title][tax_type] = tax_rate item_tax_template.save() return item_tax_template.name
Tomsod/gemrb
refs/heads/master
gemrb/GUIScripts/iwd2/Spells.py
1
# GemRB - Infinity Engine Emulator # Copyright (C) 2014 The GemRB Project # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # #character generation, spells (GUISPL2) import GemRB import CommonTables import GUICommon import LUSpellSelection import IDLUCommon import Spellbook from ie_stats import IE_CLASS, IE_CLASSLEVELSUM, IE_KIT from GUIDefines import IE_IWD2_SPELL_DOMAIN def OnLoad (): SetupSpellsWindow (1) def SetupSpellsWindow(chargen=0): if chargen: MyChar = GemRB.GetVar ("Slot") Class = GemRB.GetPlayerStat (MyChar, IE_CLASS) ClassName = GUICommon.GetClassRowName (Class, "class") Level = 0 LevelDiff = 1 KitValue = GemRB.GetPlayerStat (MyChar, IE_KIT) else: MyChar = GemRB.GameGetSelectedPCSingle () ClassIndex = GemRB.GetVar ("LUClass") ClassName = GUICommon.GetClassRowName (ClassIndex, "index") LevelDiff = GemRB.GetVar ("LevelDiff") Level = GemRB.GetPlayerStat (MyChar, IE_CLASSLEVELSUM) # this is only used for detecting specialists! KitValue = GemRB.GetVar ("LUKit") SpellTableName = CommonTables.ClassSkills.GetValue (ClassName, "MAGESPELL") # mxsplbon.2da is handled in core and does not affect learning # sorcerer types need this change to not get too many spells # for them the castable and known count progress differently # TODO: create an extra clsskills column to hold this data if SpellTableName == "MXSPLSOR": SpellTableName = "SPLSRCKN" elif SpellTableName == "MXSPLBRD": SpellTableName = "SPLBRDKN" # charbase has domain slots reserved, so nuke them if chargen: Spellbook.UnsetupSpellLevels (MyChar, "MXSPLCLR", IE_IWD2_SPELL_DOMAIN, 1) # learn priest spells if any and setup spell levels # but first nullify any previous spells if chargen: for row in range(CommonTables.ClassSkills.GetRowCount()): rowname = CommonTables.ClassSkills.GetRowName (row) SpellBookType = CommonTables.ClassSkills.GetValue (rowname, "SPLTYPE") if SpellBookType != "*": Spellbook.RemoveKnownSpells (MyChar, SpellBookType, 1,9, 0) Spellbook.RemoveKnownSpells (MyChar, IE_IWD2_SPELL_DOMAIN, 1,9, 0) IDLUCommon.LearnAnySpells (MyChar, ClassName, chargen) # make sure we have a correct table and that we're eligible BookType = CommonTables.ClassSkills.GetValue (ClassName, "BOOKTYPE") canLearn = chargen or Spellbook.IsSorcererBook (BookType) # bard / sorcerer if SpellTableName == "*" or not canLearn: if chargen: GemRB.SetNextScript ("CharGen7") else: import GUIREC GUIREC.FinishLevelUp () return SpellBookType = CommonTables.ClassSkills.GetValue (ClassName, "SPLTYPE") LUSpellSelection.OpenSpellsWindow (MyChar, SpellTableName, Level+LevelDiff, LevelDiff, KitValue, chargen, True, SpellBookType)
pbirsinger/asp_backup
refs/heads/master
asp/jit/variant_history.py
5
class CodeVariantPerformanceDatabase(object): def __init__(self): self.variant_times = {} # The measured performance data for a particular method # Dict of dicts, key: input key, value: dict # Inner dict of times, key: v_id, value: time # The set of v_ids contained in the dict may be # much larger than the set of v_ids compiled by # a particular instance of a specializer self.oracular_best = {} # The variant id of the best variant out of all # currently compiled variants of a particular method # Dict of v_ids, key: input key, value: v_id or False def set_oracular_best(self, key, time_dict, v_id_set): # filter entries that failed to run or are not currently compiled succeeded = filter( lambda x: x[1] > 0 and x[0] in v_id_set, \ time_dict.iteritems() ) if not succeeded: print "Warning: ASP has tried every currently compiled variant for this input and none have run successfully. Add different variants." self.oracular_best[key] = False else: name = min(succeeded, key=lambda p: p[1])[0] # key with min val self.oracular_best[key] = name def get_oracular_best(self, key): return self.oracular_best.get(key, False) def clear_oracle(self): self.oracular_best.clear() #newly added variant might be the best def add_time(self, key, elapsed, v_id, v_id_set): time_dict = self.variant_times.get(key,{}) # TODO: Overwrite old times with new data? If so, reset best when? if v_id not in time_dict: time_dict[v_id] = elapsed if set(time_dict.keys()) >= v_id_set: self.set_oracular_best(key, time_dict, v_id_set) self.variant_times[key] = time_dict def get_measured_v_ids(self, key): return self.variant_times.get(key,{}).keys() def clear(self): self.variant_times.clear() self.oracular_best.clear() def get_picklable_obj(self): return { 'variant_times': self.variant_times } def set_from_pickled_obj(self, obj, v_id_set): self.variant_times = obj['variant_times'] for k, time_dict in self.variant_times.iteritems(): if set(time_dict.keys()) >= v_id_set: self.set_oracular_best(k, time_dict, v_id_set) class CodeVariantUseCaseLimiter(object): def __init__(self): self.compilable = {} # Track whether or not a variant is compilable on this machine # Dict of bools, key: v_id, val: bool self.input_limits_funcs = {} # Return a function determining if a particular input is # runnable with a particular variant # Dict of closures, key: v_id, val: closure returning bool def is_allowed(self, v_id, *args, **kwargs): return self.compilable[v_id] and \ self.input_limits_funcs[v_id](*args, **kwargs) def append(self, v_id_list, limit_funcs, compilables): for v, lim, c in zip(v_id_list, limit_funcs, compilables): self.input_limits_funcs[v] = lim self.compilable[v] = c class CodeVariantSelector(object): def __init__(self, perf_database, use_case_limiter): self.perf_database = perf_database self.use_case_limiter = use_case_limiter def get_v_id_to_run(self, v_id_set, key, *args, **kwargs): def exhaustive_search(): candidates = v_id_set - set(self.perf_database.get_measured_v_ids(key)) while candidates: v_id = candidates.pop() if self.use_case_limiter.is_allowed(v_id, *args, **kwargs): return v_id self.perf_database.add_time(key, -1., v_id, v_id_set) return None best = self.perf_database.get_oracular_best(key) return best if best else exhaustive_search() return ret_func or error_func def use_supplied_function_to_generate_a_new_variant(): pass class CodeVariants(object): def __init__(self, variant_names, key_func, param_names): self.v_id_list = variant_names self.v_id_set = set(variant_names) self.make_key = key_func self.param_names = param_names self.database = CodeVariantPerformanceDatabase() self.limiter = CodeVariantUseCaseLimiter() self.selector = CodeVariantSelector(self.database, self.limiter) def __contains__(self, v_id): return v_id in self.v_id_list def append(self, variant_names): self.v_id_list.extend(variant_names) self.v_id_set.update(variant_names) def get_picklable_obj(self): return { 'variant_names': self.v_id_list, 'param_names': self.param_names, } def set_from_pickled_obj(self, obj): if self.v_id_list != obj['variant_names']: print "Warning: Attempted to load pickled performance data for non-matching space of code variants." return self.param_names = obj['param_names']
kaiyuanl/gem5
refs/heads/master
src/unittest/stattestmain.py
76
def main(): from m5.internal.stattest import stattest_init, stattest_run import m5.stats stattest_init() # Initialize the global statistics m5.stats.initSimStats() m5.stats.initText("cout") # We're done registering statistics. Enable the stats package now. m5.stats.enable() # Reset to put the stats in a consistent state. m5.stats.reset() stattest_run() m5.stats.dump()
linktlh/Toontown-journey
refs/heads/master
otp/chat/TalkMessage.py
3
class TalkMessage: def __init__(self, messageId, timeStamp, body, senderAvatarId, senderAvatarName, senderAccountId, senderAccountName, receiverAvatarId, receiverAvatarName, receiverAccountId, receiverAccountName, talkType, extraInfo = None): self.timeStamp = timeStamp self.body = body self.senderAvatarId = senderAvatarId self.senderAvatarName = senderAvatarName self.senderAccountId = senderAccountId self.senderAccountName = senderAccountName self.receiverAvatarId = receiverAvatarId self.receiverAvatarName = receiverAvatarName self.receiverAccountId = receiverAccountId self.receiverAccountName = receiverAccountName self.talkType = talkType self.extraInfo = extraInfo self.messageId = messageId def getMessageId(self): return self.messageId def setMessageId(self, id): self.messageId = id def getTimeStamp(self): return self.timeStamp def setTimeStamp(self, timeStamp): self.timeStamp = timeStamp def getBody(self): return self.body def setBody(self, body): self.body = body def getSenderAvatarId(self): return self.senderAvatarId def setSenderAvatarId(self, senderAvatarId): self.senderAvatarId = senderAvatarId def getSenderAvatarName(self): return self.senderAvatarName def setSenderAvatarName(self, senderAvatarName): self.senderAvatarName = senderAvatarName def getSenderAccountId(self): return self.senderAccountId def setSenderAccountId(self, senderAccountId): self.senderAccountId = senderAccountId def getSenderAccountName(self): return self.senderAccountName def setSenderAccountName(self, senderAccountName): self.senderAccountName = senderAccountName def getReceiverAvatarId(self): return self.receiverAvatarId def setReceiverAvatarId(self, receiverAvatarId): self.receiverAvatarId = receiverAvatarId def getReceiverAvatarName(self): return self.receiverAvatarName def setReceiverAvatarName(self, receiverAvatarName): self.receiverAvatarName = receiverAvatarName def getReceiverAccountId(self): return self.receiverAccountId def setReceiverAccountId(self, receiverAccountId): self.receiverAccountId = receiverAccountId def getReceiverAccountName(self): return self.receiverAccountName def setReceiverAccountName(self, receiverAccountName): self.receiverAccountName = receiverAccountName def getTalkType(self): return self.talkType def setTalkType(self, talkType): self.talkType = talkType def getExtraInfo(self): return self.extraInfo def setExtraInfo(self, extraInfo): self.extraInfo = extraInfo
shanil-puri/mase
refs/heads/master
src/ntilesok.py
10
from __future__ import print_function, division import sys,random sys.dont_write_bytecode = True from ok import * from ntiles import * """ # showing off ntiles """ r = random.random rseed = random.seed @ok def _ntiles(): r1 = [ r() for _ in xrange(1000)] r2 = [ x**2 for x in r1] print("\nlong",ntiles(r1,ordered=False)) print("\nshort",ntiles(r1,tiles=[0.25,0.5,0.75])) print("\nother",ntiles(r2)) @ok def _isSorted2(): assert isSorted([1,2,3]) assert isSorted([1,4,3])
HarmonyEnterpriseSolutions/harmony-platform
refs/heads/master
src/gnue/forms/uidrivers/wx26/widgets/url_resource/PDF.py
1
import os from wx.lib.pdfwin import PDFWindow, get_min_adobe_version from TempFileMixIn import TempFileMixIn from gnue import paths from src.gnue.forms.uidrivers.wx26.widgets.url_resource import PdfCheckerMixIn if get_min_adobe_version() is None: raise ImportError, 'Adobe Acrobat Reader 5.0, 7.0 or greater is required to be installed' class PDF(TempFileMixIn, PdfCheckerMixIn, PDFWindow): """ An implementation of a wx widget used for displaying pdfs """ def __init__(self, *args, **kwargs): PDFWindow.__init__(self, *args, **kwargs) TempFileMixIn.__init__(self, self._openFile, self._closeFile, checkFile=self.checkPdfFile, openBusyFile=self._openBusyFile) self.__path = os.path.join(paths.data, 'share', 'gnue', 'pdf') def _closeFile(self): self.LoadFile(os.path.join(self.__path, "blank.pdf")) def _openBusyFile(self): self.LoadFile(os.path.join(self.__path, "busy.pdf")) def _openFile(self, url): self.LoadFile(url)
junhuac/MQUIC
refs/heads/master
src/tools/android/findbugs_plugin/test/run_findbugs_plugin_tests.py
63
#!/usr/bin/env python # # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # This is used to test the findbugs plugin, it calls # build/android/pylib/utils/findbugs.py to analyze the classes in # org.chromium.tools.findbugs.plugin package, and expects to get the same # issue with those in expected_result.txt. # # Useful command line: # --rebaseline to generate the expected_result.txt, please make sure don't # remove the expected result of exsting tests. import argparse import os import sys sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', 'build', 'android'))) from pylib import constants from pylib.utils import findbugs _EXPECTED_WARNINGS = set([ findbugs.FindBugsWarning( bug_type='CHROMIUM_SYNCHRONIZED_THIS', start_line=15, end_line=15, file_name='SimpleSynchronizedThis.java', message=( "Shouldn't use synchronized(this)", 'In class org.chromium.tools.findbugs.plugin.' + 'SimpleSynchronizedThis', 'In method org.chromium.tools.findbugs.plugin.' + 'SimpleSynchronizedThis.synchronizedThis()', 'At SimpleSynchronizedThis.java:[line 15]', )), findbugs.FindBugsWarning( bug_type='CHROMIUM_SYNCHRONIZED_METHOD', start_line=14, end_line=14, file_name='SimpleSynchronizedStaticMethod.java', message=( "Shouldn't use synchronized method", 'In class org.chromium.tools.findbugs.plugin.' + 'SimpleSynchronizedStaticMethod', 'In method org.chromium.tools.findbugs.plugin.' + 'SimpleSynchronizedStaticMethod.synchronizedStaticMethod()', 'At SimpleSynchronizedStaticMethod.java:[line 14]', )), findbugs.FindBugsWarning( bug_type='CHROMIUM_SYNCHRONIZED_METHOD', start_line=15, end_line=15, file_name='SimpleSynchronizedMethod.java', message=( "Shouldn't use synchronized method", 'In class org.chromium.tools.findbugs.plugin.' + 'SimpleSynchronizedMethod', 'In method org.chromium.tools.findbugs.plugin.' + 'SimpleSynchronizedMethod.synchronizedMethod()', 'At SimpleSynchronizedMethod.java:[line 15]', )), ]) def main(argv): parser = argparse.ArgumentParser() parser.add_argument( '-l', '--release-build', action='store_true', dest='release', help='Run the release build of the findbugs plugin test.') args = parser.parse_args() test_jar_path = os.path.join( constants.GetOutDirectory( 'Release' if args.release else 'Debug'), 'lib.java', 'findbugs_plugin_test.jar') findbugs_command, findbugs_warnings = findbugs.Run( None, 'org.chromium.tools.findbugs.plugin.*', None, None, None, [test_jar_path]) missing_warnings = _EXPECTED_WARNINGS.difference(findbugs_warnings) if missing_warnings: print 'Missing warnings:' for w in missing_warnings: print '%s' % str(w) unexpected_warnings = findbugs_warnings.difference(_EXPECTED_WARNINGS) if unexpected_warnings: print 'Unexpected warnings:' for w in unexpected_warnings: print '%s' % str(w) return len(unexpected_warnings) + len(missing_warnings) if __name__ == '__main__': sys.exit(main(sys.argv))
abhikumar22/MYBLOG
refs/heads/master
blg/Lib/site-packages/django-1.11.7-py3.6.egg/django/contrib/gis/db/backends/spatialite/operations.py
28
""" SQL functions reference lists: https://web.archive.org/web/20130407175746/https://www.gaia-gis.it/gaia-sins/spatialite-sql-4.0.0.html https://www.gaia-gis.it/gaia-sins/spatialite-sql-4.2.1.html """ import re import sys from django.contrib.gis.db.backends.base.operations import ( BaseSpatialOperations, ) from django.contrib.gis.db.backends.spatialite.adapter import SpatiaLiteAdapter from django.contrib.gis.db.backends.utils import SpatialOperator from django.contrib.gis.db.models import aggregates from django.contrib.gis.geometry.backend import Geometry from django.contrib.gis.measure import Distance from django.core.exceptions import ImproperlyConfigured from django.db.backends.sqlite3.operations import DatabaseOperations from django.utils import six from django.utils.functional import cached_property class SpatiaLiteDistanceOperator(SpatialOperator): def as_sql(self, connection, lookup, template_params, sql_params): if lookup.lhs.output_field.geodetic(connection): # SpatiaLite returns NULL instead of zero on geodetic coordinates sql_template = 'COALESCE(%(func)s(%(lhs)s, %(rhs)s, %%s), 0) %(op)s %(value)s' template_params.update({ 'op': self.op, 'func': connection.ops.spatial_function_name('Distance'), }) sql_params.insert(1, len(lookup.rhs) == 3 and lookup.rhs[-1] == 'spheroid') return sql_template % template_params, sql_params return super(SpatiaLiteDistanceOperator, self).as_sql(connection, lookup, template_params, sql_params) class SpatiaLiteOperations(BaseSpatialOperations, DatabaseOperations): name = 'spatialite' spatialite = True version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)') Adapter = SpatiaLiteAdapter area = 'Area' centroid = 'Centroid' collect = 'Collect' contained = 'MbrWithin' difference = 'Difference' distance = 'Distance' envelope = 'Envelope' extent = 'Extent' geojson = 'AsGeoJSON' gml = 'AsGML' intersection = 'Intersection' kml = 'AsKML' length = 'GLength' # OpenGis defines Length, but this conflicts with an SQLite reserved keyword makeline = 'MakeLine' num_geom = 'NumGeometries' num_points = 'NumPoints' point_on_surface = 'PointOnSurface' scale = 'ScaleCoords' svg = 'AsSVG' sym_difference = 'SymDifference' transform = 'Transform' translate = 'ShiftCoords' union = 'GUnion' # OpenGis defines Union, but this conflicts with an SQLite reserved keyword unionagg = 'GUnion' from_text = 'GeomFromText' from_wkb = 'GeomFromWKB' select = 'AsText(%s)' gis_operators = { # Unary predicates 'isvalid': SpatialOperator(func='IsValid'), # Binary predicates 'equals': SpatialOperator(func='Equals'), 'disjoint': SpatialOperator(func='Disjoint'), 'touches': SpatialOperator(func='Touches'), 'crosses': SpatialOperator(func='Crosses'), 'within': SpatialOperator(func='Within'), 'overlaps': SpatialOperator(func='Overlaps'), 'contains': SpatialOperator(func='Contains'), 'intersects': SpatialOperator(func='Intersects'), 'relate': SpatialOperator(func='Relate'), # Returns true if B's bounding box completely contains A's bounding box. 'contained': SpatialOperator(func='MbrWithin'), # Returns true if A's bounding box completely contains B's bounding box. 'bbcontains': SpatialOperator(func='MbrContains'), # Returns true if A's bounding box overlaps B's bounding box. 'bboverlaps': SpatialOperator(func='MbrOverlaps'), # These are implemented here as synonyms for Equals 'same_as': SpatialOperator(func='Equals'), 'exact': SpatialOperator(func='Equals'), # Distance predicates 'dwithin': SpatialOperator(func='PtDistWithin'), 'distance_gt': SpatiaLiteDistanceOperator(func='Distance', op='>'), 'distance_gte': SpatiaLiteDistanceOperator(func='Distance', op='>='), 'distance_lt': SpatiaLiteDistanceOperator(func='Distance', op='<'), 'distance_lte': SpatiaLiteDistanceOperator(func='Distance', op='<='), } disallowed_aggregates = (aggregates.Extent3D,) @cached_property def function_names(self): return { 'Length': 'ST_Length', 'Reverse': 'ST_Reverse', 'Scale': 'ScaleCoords', 'Translate': 'ST_Translate', 'Union': 'ST_Union', } @cached_property def unsupported_functions(self): unsupported = {'BoundingCircle', 'ForceRHR', 'MemSize'} if not self.lwgeom_version(): unsupported |= {'GeoHash', 'IsValid', 'MakeValid'} return unsupported @cached_property def spatial_version(self): """Determine the version of the SpatiaLite library.""" try: version = self.spatialite_version_tuple()[1:] except Exception as msg: new_msg = ( 'Cannot determine the SpatiaLite version for the "%s" ' 'database (error was "%s"). Was the SpatiaLite initialization ' 'SQL loaded on this database?') % (self.connection.settings_dict['NAME'], msg) six.reraise(ImproperlyConfigured, ImproperlyConfigured(new_msg), sys.exc_info()[2]) if version < (4, 0, 0): raise ImproperlyConfigured('GeoDjango only supports SpatiaLite versions 4.0.0 and above.') return version def convert_extent(self, box, srid): """ Convert the polygon data received from SpatiaLite to min/max values. """ if box is None: return None shell = Geometry(box, srid).shell xmin, ymin = shell[0][:2] xmax, ymax = shell[2][:2] return (xmin, ymin, xmax, ymax) def geo_db_type(self, f): """ Returns None because geometry columns are added via the `AddGeometryColumn` stored procedure on SpatiaLite. """ return None def get_distance(self, f, value, lookup_type, **kwargs): """ Returns the distance parameters for the given geometry field, lookup value, and lookup type. """ if not value: return [] value = value[0] if isinstance(value, Distance): if f.geodetic(self.connection): if lookup_type == 'dwithin': raise ValueError( 'Only numeric values of degree units are allowed on ' 'geographic DWithin queries.' ) dist_param = value.m else: dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection))) else: dist_param = value return [dist_param] def get_geom_placeholder(self, f, value, compiler): """ Provides a proper substitution value for Geometries that are not in the SRID of the field. Specifically, this routine will substitute in the Transform() and GeomFromText() function call(s). """ def transform_value(value, srid): return not (value is None or value.srid == srid) if hasattr(value, 'as_sql'): if transform_value(value, f.srid): placeholder = '%s(%%s, %s)' % (self.transform, f.srid) else: placeholder = '%s' # No geometry value used for F expression, substitute in # the column name instead. sql, _ = compiler.compile(value) return placeholder % sql else: if transform_value(value, f.srid): # Adding Transform() to the SQL placeholder. return '%s(%s(%%s,%s), %s)' % (self.transform, self.from_text, value.srid, f.srid) else: return '%s(%%s,%s)' % (self.from_text, f.srid) def _get_spatialite_func(self, func): """ Helper routine for calling SpatiaLite functions and returning their result. Any error occurring in this method should be handled by the caller. """ cursor = self.connection._cursor() try: cursor.execute('SELECT %s' % func) row = cursor.fetchone() finally: cursor.close() return row[0] def geos_version(self): "Returns the version of GEOS used by SpatiaLite as a string." return self._get_spatialite_func('geos_version()') def proj4_version(self): "Returns the version of the PROJ.4 library used by SpatiaLite." return self._get_spatialite_func('proj4_version()') def lwgeom_version(self): """Return the version of LWGEOM library used by SpatiaLite.""" return self._get_spatialite_func('lwgeom_version()') def spatialite_version(self): "Returns the SpatiaLite library version as a string." return self._get_spatialite_func('spatialite_version()') def spatialite_version_tuple(self): """ Returns the SpatiaLite version as a tuple (version string, major, minor, subminor). """ version = self.spatialite_version() m = self.version_regex.match(version) if m: major = int(m.group('major')) minor1 = int(m.group('minor1')) minor2 = int(m.group('minor2')) else: raise Exception('Could not parse SpatiaLite version string: %s' % version) return (version, major, minor1, minor2) def spatial_aggregate_name(self, agg_name): """ Returns the spatial aggregate SQL template and function for the given Aggregate instance. """ agg_name = 'unionagg' if agg_name.lower() == 'union' else agg_name.lower() return getattr(self, agg_name) # Routines for getting the OGC-compliant models. def geometry_columns(self): from django.contrib.gis.db.backends.spatialite.models import SpatialiteGeometryColumns return SpatialiteGeometryColumns def spatial_ref_sys(self): from django.contrib.gis.db.backends.spatialite.models import SpatialiteSpatialRefSys return SpatialiteSpatialRefSys def get_db_converters(self, expression): converters = super(SpatiaLiteOperations, self).get_db_converters(expression) if hasattr(expression.output_field, 'geom_type'): converters.append(self.convert_geometry) return converters def convert_geometry(self, value, expression, connection, context): if value: value = Geometry(value) if 'transformed_srid' in context: value.srid = context['transformed_srid'] return value
BigDataforYou/movie_recommendation_workshop_1
refs/heads/master
big_data_4_you_demo_1/venv/lib/python2.7/site-packages/setuptools/py26compat.py
22
""" Compatibility Support for Python 2.6 and earlier """ import sys try: from urllib.parse import splittag except ImportError: from urllib import splittag def strip_fragment(url): """ In `Python 8280 <http://bugs.python.org/issue8280>`_, Python 2.7 and later was patched to disregard the fragment when making URL requests. Do the same for Python 2.6 and earlier. """ url, fragment = splittag(url) return url if sys.version_info >= (2, 7): strip_fragment = lambda x: x
ttglennhall/DjangoGirlsTutorial
refs/heads/master
myvenv/lib/python3.4/site-packages/pip/_vendor/distlib/index.py
185
# -*- coding: utf-8 -*- # # Copyright (C) 2013 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # import hashlib import logging import os import shutil import subprocess import tempfile try: from threading import Thread except ImportError: from dummy_threading import Thread from . import DistlibException from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr, urlparse, build_opener, string_types) from .util import cached_property, zip_dir, ServerProxy logger = logging.getLogger(__name__) DEFAULT_INDEX = 'https://pypi.python.org/pypi' DEFAULT_REALM = 'pypi' class PackageIndex(object): """ This class represents a package index compatible with PyPI, the Python Package Index. """ boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$' def __init__(self, url=None): """ Initialise an instance. :param url: The URL of the index. If not specified, the URL for PyPI is used. """ self.url = url or DEFAULT_INDEX self.read_configuration() scheme, netloc, path, params, query, frag = urlparse(self.url) if params or query or frag or scheme not in ('http', 'https'): raise DistlibException('invalid repository: %s' % self.url) self.password_handler = None self.ssl_verifier = None self.gpg = None self.gpg_home = None self.rpc_proxy = None with open(os.devnull, 'w') as sink: for s in ('gpg2', 'gpg'): try: rc = subprocess.check_call([s, '--version'], stdout=sink, stderr=sink) if rc == 0: self.gpg = s break except OSError: pass def _get_pypirc_command(self): """ Get the distutils command for interacting with PyPI configurations. :return: the command. """ from distutils.core import Distribution from distutils.config import PyPIRCCommand d = Distribution() return PyPIRCCommand(d) def read_configuration(self): """ Read the PyPI access configuration as supported by distutils, getting PyPI to do the acutal work. This populates ``username``, ``password``, ``realm`` and ``url`` attributes from the configuration. """ # get distutils to do the work c = self._get_pypirc_command() c.repository = self.url cfg = c._read_pypirc() self.username = cfg.get('username') self.password = cfg.get('password') self.realm = cfg.get('realm', 'pypi') self.url = cfg.get('repository', self.url) def save_configuration(self): """ Save the PyPI access configuration. You must have set ``username`` and ``password`` attributes before calling this method. Again, distutils is used to do the actual work. """ self.check_credentials() # get distutils to do the work c = self._get_pypirc_command() c._store_pypirc(self.username, self.password) def check_credentials(self): """ Check that ``username`` and ``password`` have been set, and raise an exception if not. """ if self.username is None or self.password is None: raise DistlibException('username and password must be set') pm = HTTPPasswordMgr() _, netloc, _, _, _, _ = urlparse(self.url) pm.add_password(self.realm, netloc, self.username, self.password) self.password_handler = HTTPBasicAuthHandler(pm) def register(self, metadata): """ Register a distribution on PyPI, using the provided metadata. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the distribution to be registered. :return: The HTTP response received from PyPI upon submission of the request. """ self.check_credentials() metadata.validate() d = metadata.todict() d[':action'] = 'verify' request = self.encode_request(d.items(), []) response = self.send_request(request) d[':action'] = 'submit' request = self.encode_request(d.items(), []) return self.send_request(request) def _reader(self, name, stream, outbuf): """ Thread runner for reading lines of from a subprocess into a buffer. :param name: The logical name of the stream (used for logging only). :param stream: The stream to read from. This will typically a pipe connected to the output stream of a subprocess. :param outbuf: The list to append the read lines to. """ while True: s = stream.readline() if not s: break s = s.decode('utf-8').rstrip() outbuf.append(s) logger.debug('%s: %s' % (name, s)) stream.close() def get_sign_command(self, filename, signer, sign_password): """ Return a suitable command for signing a file. :param filename: The pathname to the file to be signed. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer's private key used for signing. :return: The signing command as a list suitable to be passed to :class:`subprocess.Popen`. """ cmd = [self.gpg, '--status-fd', '2', '--no-tty'] if self.gpg_home: cmd.extend(['--homedir', self.gpg_home]) if sign_password is not None: cmd.extend(['--batch', '--passphrase-fd', '0']) td = tempfile.mkdtemp() sf = os.path.join(td, os.path.basename(filename) + '.asc') cmd.extend(['--detach-sign', '--armor', '--local-user', signer, '--output', sf, filename]) logger.debug('invoking: %s', ' '.join(cmd)) return cmd, sf def run_command(self, cmd, input_data=None): """ Run a command in a child process , passing it any input data specified. :param cmd: The command to run. :param input_data: If specified, this must be a byte string containing data to be sent to the child process. :return: A tuple consisting of the subprocess' exit code, a list of lines read from the subprocess' ``stdout``, and a list of lines read from the subprocess' ``stderr``. """ kwargs = { 'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE, } if input_data is not None: kwargs['stdin'] = subprocess.PIPE stdout = [] stderr = [] p = subprocess.Popen(cmd, **kwargs) # We don't use communicate() here because we may need to # get clever with interacting with the command t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout)) t1.start() t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr)) t2.start() if input_data is not None: p.stdin.write(input_data) p.stdin.close() p.wait() t1.join() t2.join() return p.returncode, stdout, stderr def sign_file(self, filename, signer, sign_password): """ Sign a file. :param filename: The pathname to the file to be signed. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer's private key used for signing. :return: The absolute pathname of the file where the signature is stored. """ cmd, sig_file = self.get_sign_command(filename, signer, sign_password) rc, stdout, stderr = self.run_command(cmd, sign_password.encode('utf-8')) if rc != 0: raise DistlibException('sign command failed with error ' 'code %s' % rc) return sig_file def upload_file(self, metadata, filename, signer=None, sign_password=None, filetype='sdist', pyversion='source'): """ Upload a release file to the index. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the file to be uploaded. :param filename: The pathname of the file to be uploaded. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer's private key used for signing. :param filetype: The type of the file being uploaded. This is the distutils command which produced that file, e.g. ``sdist`` or ``bdist_wheel``. :param pyversion: The version of Python which the release relates to. For code compatible with any Python, this would be ``source``, otherwise it would be e.g. ``3.2``. :return: The HTTP response received from PyPI upon submission of the request. """ self.check_credentials() if not os.path.exists(filename): raise DistlibException('not found: %s' % filename) metadata.validate() d = metadata.todict() sig_file = None if signer: if not self.gpg: logger.warning('no signing program available - not signed') else: sig_file = self.sign_file(filename, signer, sign_password) with open(filename, 'rb') as f: file_data = f.read() md5_digest = hashlib.md5(file_data).hexdigest() sha256_digest = hashlib.sha256(file_data).hexdigest() d.update({ ':action': 'file_upload', 'protcol_version': '1', 'filetype': filetype, 'pyversion': pyversion, 'md5_digest': md5_digest, 'sha256_digest': sha256_digest, }) files = [('content', os.path.basename(filename), file_data)] if sig_file: with open(sig_file, 'rb') as f: sig_data = f.read() files.append(('gpg_signature', os.path.basename(sig_file), sig_data)) shutil.rmtree(os.path.dirname(sig_file)) request = self.encode_request(d.items(), files) return self.send_request(request) def upload_documentation(self, metadata, doc_dir): """ Upload documentation to the index. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the documentation to be uploaded. :param doc_dir: The pathname of the directory which contains the documentation. This should be the directory that contains the ``index.html`` for the documentation. :return: The HTTP response received from PyPI upon submission of the request. """ self.check_credentials() if not os.path.isdir(doc_dir): raise DistlibException('not a directory: %r' % doc_dir) fn = os.path.join(doc_dir, 'index.html') if not os.path.exists(fn): raise DistlibException('not found: %r' % fn) metadata.validate() name, version = metadata.name, metadata.version zip_data = zip_dir(doc_dir).getvalue() fields = [(':action', 'doc_upload'), ('name', name), ('version', version)] files = [('content', name, zip_data)] request = self.encode_request(fields, files) return self.send_request(request) def get_verify_command(self, signature_filename, data_filename): """ Return a suitable command for verifying a file. :param signature_filename: The pathname to the file containing the signature. :param data_filename: The pathname to the file containing the signed data. :return: The verifying command as a list suitable to be passed to :class:`subprocess.Popen`. """ cmd = [self.gpg, '--status-fd', '2', '--no-tty'] if self.gpg_home: cmd.extend(['--homedir', self.gpg_home]) cmd.extend(['--verify', signature_filename, data_filename]) logger.debug('invoking: %s', ' '.join(cmd)) return cmd def verify_signature(self, signature_filename, data_filename): """ Verify a signature for a file. :param signature_filename: The pathname to the file containing the signature. :param data_filename: The pathname to the file containing the signed data. :return: True if the signature was verified, else False. """ if not self.gpg: raise DistlibException('verification unavailable because gpg ' 'unavailable') cmd = self.get_verify_command(signature_filename, data_filename) rc, stdout, stderr = self.run_command(cmd) if rc not in (0, 1): raise DistlibException('verify command failed with error ' 'code %s' % rc) return rc == 0 def download_file(self, url, destfile, digest=None, reporthook=None): """ This is a convenience method for downloading a file from an URL. Normally, this will be a file from the index, though currently no check is made for this (i.e. a file can be downloaded from anywhere). The method is just like the :func:`urlretrieve` function in the standard library, except that it allows digest computation to be done during download and checking that the downloaded data matched any expected value. :param url: The URL of the file to be downloaded (assumed to be available via an HTTP GET request). :param destfile: The pathname where the downloaded file is to be saved. :param digest: If specified, this must be a (hasher, value) tuple, where hasher is the algorithm used (e.g. ``'md5'``) and ``value`` is the expected value. :param reporthook: The same as for :func:`urlretrieve` in the standard library. """ if digest is None: digester = None logger.debug('No digest specified') else: if isinstance(digest, (list, tuple)): hasher, digest = digest else: hasher = 'md5' digester = getattr(hashlib, hasher)() logger.debug('Digest specified: %s' % digest) # The following code is equivalent to urlretrieve. # We need to do it this way so that we can compute the # digest of the file as we go. with open(destfile, 'wb') as dfp: # addinfourl is not a context manager on 2.x # so we have to use try/finally sfp = self.send_request(Request(url)) try: headers = sfp.info() blocksize = 8192 size = -1 read = 0 blocknum = 0 if "content-length" in headers: size = int(headers["Content-Length"]) if reporthook: reporthook(blocknum, blocksize, size) while True: block = sfp.read(blocksize) if not block: break read += len(block) dfp.write(block) if digester: digester.update(block) blocknum += 1 if reporthook: reporthook(blocknum, blocksize, size) finally: sfp.close() # check that we got the whole file, if we can if size >= 0 and read < size: raise DistlibException( 'retrieval incomplete: got only %d out of %d bytes' % (read, size)) # if we have a digest, it must match. if digester: actual = digester.hexdigest() if digest != actual: raise DistlibException('%s digest mismatch for %s: expected ' '%s, got %s' % (hasher, destfile, digest, actual)) logger.debug('Digest verified: %s', digest) def send_request(self, req): """ Send a standard library :class:`Request` to PyPI and return its response. :param req: The request to send. :return: The HTTP response from PyPI (a standard library HTTPResponse). """ handlers = [] if self.password_handler: handlers.append(self.password_handler) if self.ssl_verifier: handlers.append(self.ssl_verifier) opener = build_opener(*handlers) return opener.open(req) def encode_request(self, fields, files): """ Encode fields and files for posting to an HTTP server. :param fields: The fields to send as a list of (fieldname, value) tuples. :param files: The files to send as a list of (fieldname, filename, file_bytes) tuple. """ # Adapted from packaging, which in turn was adapted from # http://code.activestate.com/recipes/146306 parts = [] boundary = self.boundary for k, values in fields: if not isinstance(values, (list, tuple)): values = [values] for v in values: parts.extend(( b'--' + boundary, ('Content-Disposition: form-data; name="%s"' % k).encode('utf-8'), b'', v.encode('utf-8'))) for key, filename, value in files: parts.extend(( b'--' + boundary, ('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)).encode('utf-8'), b'', value)) parts.extend((b'--' + boundary + b'--', b'')) body = b'\r\n'.join(parts) ct = b'multipart/form-data; boundary=' + boundary headers = { 'Content-type': ct, 'Content-length': str(len(body)) } return Request(self.url, body, headers) def search(self, terms, operator=None): if isinstance(terms, string_types): terms = {'name': terms} if self.rpc_proxy is None: self.rpc_proxy = ServerProxy(self.url, timeout=3.0) return self.rpc_proxy.search(terms, operator or 'and')
thomasaarholt/hyperspy
refs/heads/master
hyperspy/datasets/example_signals.py
11
from hyperspy.misc.example_signals_loading import load_1D_EDS_SEM_spectrum as\ EDS_SEM_Spectrum from hyperspy.misc.example_signals_loading import load_1D_EDS_TEM_spectrum as\ EDS_TEM_Spectrum from hyperspy.misc.example_signals_loading import load_object_hologram as object_hologram from hyperspy.misc.example_signals_loading import load_reference_hologram as reference_hologram
storm-computers/odoo
refs/heads/9.0
addons/crm/wizard/__init__.py
41
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import crm_lead_lost import crm_partner_binding import crm_lead_to_opportunity import crm_merge_opportunities
Deepakkothandan/ansible
refs/heads/devel
lib/ansible/playbook/role/definition.py
62
# (c) 2014 Michael DeHaan, <michael@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os from ansible import constants as C from ansible.errors import AnsibleError from ansible.module_utils.six import iteritems, string_types from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.playbook.base import Base from ansible.playbook.become import Become from ansible.playbook.conditional import Conditional from ansible.playbook.taggable import Taggable from ansible.template import Templar from ansible.utils.path import unfrackpath try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() __all__ = ['RoleDefinition'] class RoleDefinition(Base, Become, Conditional, Taggable): _role = FieldAttribute(isa='string') def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None): super(RoleDefinition, self).__init__() self._play = play self._variable_manager = variable_manager self._loader = loader self._role_path = None self._role_basedir = role_basedir self._role_params = dict() # def __repr__(self): # return 'ROLEDEF: ' + self._attributes.get('role', '<no name set>') @staticmethod def load(data, variable_manager=None, loader=None): raise AnsibleError("not implemented") def preprocess_data(self, ds): # role names that are simply numbers can be parsed by PyYAML # as integers even when quoted, so turn it into a string type if isinstance(ds, int): ds = "%s" % ds assert isinstance(ds, dict) or isinstance(ds, string_types) or isinstance(ds, AnsibleBaseYAMLObject) if isinstance(ds, dict): ds = super(RoleDefinition, self).preprocess_data(ds) # save the original ds for use later self._ds = ds # we create a new data structure here, using the same # object used internally by the YAML parsing code so we # can preserve file:line:column information if it exists new_ds = AnsibleMapping() if isinstance(ds, AnsibleBaseYAMLObject): new_ds.ansible_pos = ds.ansible_pos # first we pull the role name out of the data structure, # and then use that to determine the role path (which may # result in a new role name, if it was a file path) role_name = self._load_role_name(ds) (role_name, role_path) = self._load_role_path(role_name) # next, we split the role params out from the valid role # attributes and update the new datastructure with that # result and the role name if isinstance(ds, dict): (new_role_def, role_params) = self._split_role_params(ds) new_ds.update(new_role_def) self._role_params = role_params # set the role name in the new ds new_ds['role'] = role_name # we store the role path internally self._role_path = role_path # and return the cleaned-up data structure return new_ds def _load_role_name(self, ds): ''' Returns the role name (either the role: or name: field) from the role definition, or (when the role definition is a simple string), just that string ''' if isinstance(ds, string_types): return ds role_name = ds.get('role', ds.get('name')) if not role_name or not isinstance(role_name, string_types): raise AnsibleError('role definitions must contain a role name', obj=ds) # if we have the required datastructures, and if the role_name # contains a variable, try and template it now if self._variable_manager: all_vars = self._variable_manager.get_vars(play=self._play) templar = Templar(loader=self._loader, variables=all_vars) if templar._contains_vars(role_name): role_name = templar.template(role_name) return role_name def _load_role_path(self, role_name): ''' the 'role', as specified in the ds (or as a bare string), can either be a simple name or a full path. If it is a full path, we use the basename as the role name, otherwise we take the name as-given and append it to the default role path ''' # we always start the search for roles in the base directory of the playbook role_search_paths = [ os.path.join(self._loader.get_basedir(), u'roles'), ] # also search in the configured roles path if C.DEFAULT_ROLES_PATH: role_search_paths.extend(C.DEFAULT_ROLES_PATH) # next, append the roles basedir, if it was set, so we can # search relative to that directory for dependent roles if self._role_basedir: role_search_paths.append(self._role_basedir) # finally as a last resort we look in the current basedir as set # in the loader (which should be the playbook dir itself) but without # the roles/ dir appended role_search_paths.append(self._loader.get_basedir()) # create a templar class to template the dependency names, in # case they contain variables if self._variable_manager is not None: all_vars = self._variable_manager.get_vars(play=self._play) else: all_vars = dict() templar = Templar(loader=self._loader, variables=all_vars) role_name = templar.template(role_name) # now iterate through the possible paths and return the first one we find for path in role_search_paths: path = templar.template(path) role_path = unfrackpath(os.path.join(path, role_name)) if self._loader.path_exists(role_path): return (role_name, role_path) # if not found elsewhere try to extract path from name role_path = unfrackpath(role_name) if self._loader.path_exists(role_path): role_name = os.path.basename(role_name) return (role_name, role_path) raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(role_search_paths)), obj=self._ds) def _split_role_params(self, ds): ''' Splits any random role params off from the role spec and store them in a dictionary of params for parsing later ''' role_def = dict() role_params = dict() base_attribute_names = frozenset(self._valid_attrs.keys()) for (key, value) in iteritems(ds): # use the list of FieldAttribute values to determine what is and is not # an extra parameter for this role (or sub-class of this role) # FIXME: hard-coded list of exception key names here corresponds to the # connection fields in the Base class. There may need to be some # other mechanism where we exclude certain kinds of field attributes, # or make this list more automatic in some way so we don't have to # remember to update it manually. if key not in base_attribute_names or key in ('connection', 'port', 'remote_user'): if key in ('connection', 'port', 'remote_user'): display.deprecated("Using '%s' as a role param has been deprecated. " % key + "In the future, these values should be entered in the `vars:` " + "section for roles, but for now we'll store it as both a param and an attribute.", version="2.7") role_def[key] = value # this key does not match a field attribute, so it must be a role param role_params[key] = value else: # this is a field attribute, so copy it over directly role_def[key] = value return (role_def, role_params) def get_role_params(self): return self._role_params.copy() def get_role_path(self): return self._role_path
mcgachey/edx-platform
refs/heads/master
common/lib/xmodule/xmodule/templates.py
231
""" This module handles loading xmodule templates These templates are used by the CMS to provide content that overrides xmodule defaults for samples. ``Template``s are defined in x_module. They contain 2 attributes: :metadata: A dictionary with the template metadata :data: A JSON value that defines the template content """ # should this move to cms since it's really only for module crud? import logging from collections import defaultdict from xblock.core import XBlock log = logging.getLogger(__name__) def all_templates(): """ Returns all templates for enabled modules, grouped by descriptor type """ # TODO use memcache to memoize w/ expiration templates = defaultdict(list) for category, descriptor in XBlock.load_classes(): if not hasattr(descriptor, 'templates'): continue templates[category] = descriptor.templates() return templates
misterdanb/micropython
refs/heads/master
tests/pyb/pin.py
71
from pyb import Pin p = Pin('X1', Pin.IN) print(p) print(p.name()) print(p.pin()) print(p.port()) p = Pin('X1', Pin.IN, Pin.PULL_UP) p = Pin('X1', Pin.IN, pull=Pin.PULL_UP) p = Pin('X1', mode=Pin.IN, pull=Pin.PULL_UP) print(p) print(p.value()) p.init(p.IN, p.PULL_DOWN) p.init(p.IN, pull=p.PULL_DOWN) p.init(mode=p.IN, pull=p.PULL_DOWN) print(p) print(p.value()) p.init(p.OUT_PP) p.low() print(p.value()) p.high() print(p.value()) p.value(0) print(p.value()) p.value(1) print(p.value()) p.value(False) print(p.value()) p.value(True) print(p.value())
paolodedios/tensorflow
refs/heads/master
tensorflow/python/keras/layers/local_test.py
5
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for locally-connected layers.""" import os from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util as tf_test_util from tensorflow.python.keras import combinations from tensorflow.python.keras import testing_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.platform import test from tensorflow.python.keras.optimizer_v2 import rmsprop from tensorflow.python.training.rmsprop import RMSPropOptimizer _DATA_FORMAT_PADDING_IMPLEMENTATION = [{ 'data_format': 'channels_first', 'padding': 'valid', 'implementation': 1 }, { 'data_format': 'channels_first', 'padding': 'same', 'implementation': 1 }, { 'data_format': 'channels_last', 'padding': 'valid', 'implementation': 1 }, { 'data_format': 'channels_last', 'padding': 'same', 'implementation': 1 }, { 'data_format': 'channels_first', 'padding': 'valid', 'implementation': 2 }, { 'data_format': 'channels_first', 'padding': 'same', 'implementation': 2 }, { 'data_format': 'channels_last', 'padding': 'valid', 'implementation': 2 }, { 'data_format': 'channels_last', 'padding': 'same', 'implementation': 2 }, { 'data_format': 'channels_first', 'padding': 'valid', 'implementation': 3 }, { 'data_format': 'channels_first', 'padding': 'same', 'implementation': 3 }, { 'data_format': 'channels_last', 'padding': 'valid', 'implementation': 3 }, { 'data_format': 'channels_last', 'padding': 'same', 'implementation': 3 }] @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class LocallyConnected1DLayersTest(test.TestCase, parameterized.TestCase): @parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION) def test_locallyconnected_1d(self, data_format, padding, implementation): with self.cached_session(): num_samples = 2 num_steps = 8 input_dim = 5 filter_length = 3 filters = 4 for strides in [1]: if padding == 'same' and strides != 1: continue kwargs = { 'filters': filters, 'kernel_size': filter_length, 'padding': padding, 'strides': strides, 'data_format': data_format, 'implementation': implementation } if padding == 'same' and implementation == 1: self.assertRaises(ValueError, keras.layers.LocallyConnected1D, **kwargs) else: testing_utils.layer_test( keras.layers.LocallyConnected1D, kwargs=kwargs, input_shape=(num_samples, num_steps, input_dim)) @parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION) def test_locallyconnected_1d_regularization(self, data_format, padding, implementation): num_samples = 2 num_steps = 8 input_dim = 5 filter_length = 3 filters = 4 kwargs = { 'filters': filters, 'kernel_size': filter_length, 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'data_format': data_format, 'implementation': implementation, 'padding': padding } if padding == 'same' and implementation == 1: self.assertRaises(ValueError, keras.layers.LocallyConnected1D, **kwargs) else: with self.cached_session(): layer = keras.layers.LocallyConnected1D(**kwargs) layer.build((num_samples, num_steps, input_dim)) self.assertEqual(len(layer.losses), 2) layer( keras.backend.variable( np.ones((num_samples, num_steps, input_dim)))) self.assertEqual(len(layer.losses), 3) k_constraint = keras.constraints.max_norm(0.01) b_constraint = keras.constraints.max_norm(0.01) kwargs = { 'filters': filters, 'kernel_size': filter_length, 'kernel_constraint': k_constraint, 'bias_constraint': b_constraint, } with self.cached_session(): layer = keras.layers.LocallyConnected1D(**kwargs) layer.build((num_samples, num_steps, input_dim)) self.assertEqual(layer.kernel.constraint, k_constraint) self.assertEqual(layer.bias.constraint, b_constraint) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class LocallyConnected2DLayersTest(test.TestCase, parameterized.TestCase): @parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION) def test_locallyconnected_2d(self, data_format, padding, implementation): with self.cached_session(): num_samples = 8 filters = 3 stack_size = 4 num_row = 6 num_col = 10 for strides in [(1, 1), (2, 2)]: if padding == 'same' and strides != (1, 1): continue kwargs = { 'filters': filters, 'kernel_size': 3, 'padding': padding, 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'strides': strides, 'data_format': data_format, 'implementation': implementation } if padding == 'same' and implementation == 1: self.assertRaises(ValueError, keras.layers.LocallyConnected2D, **kwargs) else: testing_utils.layer_test( keras.layers.LocallyConnected2D, kwargs=kwargs, input_shape=(num_samples, num_row, num_col, stack_size)) @parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION) def test_locallyconnected_2d_channels_first(self, data_format, padding, implementation): with self.cached_session(): num_samples = 8 filters = 3 stack_size = 4 num_row = 6 num_col = 10 kwargs = { 'filters': filters, 'kernel_size': 3, 'data_format': data_format, 'implementation': implementation, 'padding': padding } if padding == 'same' and implementation == 1: self.assertRaises(ValueError, keras.layers.LocallyConnected2D, **kwargs) else: testing_utils.layer_test( keras.layers.LocallyConnected2D, kwargs=kwargs, input_shape=(num_samples, num_row, num_col, stack_size)) @parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION) def test_locallyconnected_2d_regularization(self, data_format, padding, implementation): num_samples = 2 filters = 3 stack_size = 4 num_row = 6 num_col = 7 kwargs = { 'filters': filters, 'kernel_size': 3, 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'implementation': implementation, 'padding': padding, 'data_format': data_format } if padding == 'same' and implementation == 1: self.assertRaises(ValueError, keras.layers.LocallyConnected2D, **kwargs) else: with self.cached_session(): layer = keras.layers.LocallyConnected2D(**kwargs) layer.build((num_samples, num_row, num_col, stack_size)) self.assertEqual(len(layer.losses), 2) layer( keras.backend.variable( np.ones((num_samples, num_row, num_col, stack_size)))) self.assertEqual(len(layer.losses), 3) k_constraint = keras.constraints.max_norm(0.01) b_constraint = keras.constraints.max_norm(0.01) kwargs = { 'filters': filters, 'kernel_size': 3, 'kernel_constraint': k_constraint, 'bias_constraint': b_constraint, } with self.cached_session(): layer = keras.layers.LocallyConnected2D(**kwargs) layer.build((num_samples, num_row, num_col, stack_size)) self.assertEqual(layer.kernel.constraint, k_constraint) self.assertEqual(layer.bias.constraint, b_constraint) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class LocallyConnectedImplementationModeTest(test.TestCase, parameterized.TestCase): @parameterized.parameters([ {'width': 1, 'data_format': 'channels_first'}, {'width': 1, 'data_format': 'channels_last'}, {'width': 6, 'data_format': 'channels_first'}, {'width': 6, 'data_format': 'channels_last'}, ]) def test_locallyconnected_implementation(self, width, data_format): with self.cached_session(): num_samples = 4 num_classes = 3 num_epochs = 2 np.random.seed(1) tf_test_util.random_seed.set_seed(1) targets = np.random.randint(0, num_classes, (num_samples,)) height = 7 filters = 2 inputs = get_inputs(data_format, filters, height, num_samples, width) kernel_x = (3,) kernel_y = () if width == 1 else (2,) stride_x = (1,) stride_y = () if width == 1 else (3,) layers = 2 kwargs = { 'layers': layers, 'filters': filters, 'kernel_size': kernel_x + kernel_y, 'strides': stride_x + stride_y, 'data_format': data_format, 'num_classes': num_classes } model_1 = get_model(implementation=1, **kwargs) model_2 = get_model(implementation=2, **kwargs) model_3 = get_model(implementation=3, **kwargs) # Build models. model_1.train_on_batch(inputs, targets) model_2.train_on_batch(inputs, targets) model_3.train_on_batch(inputs, targets) # Copy weights. copy_model_weights(model_from=model_2, model_to=model_1) copy_model_weights(model_from=model_2, model_to=model_3) # Compare outputs at initialization. out_1 = model_1(inputs) out_2 = model_2(inputs) out_3 = model_3(inputs) self.assertAllCloseAccordingToType( out_2, out_1, rtol=1e-5, atol=1e-5) self.assertAllCloseAccordingToType( out_2, out_3, rtol=1e-5, atol=1e-5) self.assertAllCloseAccordingToType( out_1, out_3, rtol=1e-5, atol=1e-5) # Train. model_1.fit( x=inputs, y=targets, epochs=num_epochs, batch_size=num_samples, shuffle=False) model_2.fit( x=inputs, y=targets, epochs=num_epochs, batch_size=num_samples, shuffle=False) model_3.fit( x=inputs, y=targets, epochs=num_epochs, batch_size=num_samples, shuffle=False) # Compare outputs after a few training steps. out_1 = model_1(inputs) out_2 = model_2(inputs) out_3 = model_3(inputs) self.assertAllCloseAccordingToType( out_2, out_1, atol=2e-4) self.assertAllCloseAccordingToType( out_2, out_3, atol=2e-4) self.assertAllCloseAccordingToType( out_1, out_3, atol=2e-4) @parameterized.parameters([ { 'width': 1, 'data_format': 'channels_first' }, { 'width': 1, 'data_format': 'channels_last' }, { 'width': 6, 'data_format': 'channels_first' }, { 'width': 6, 'data_format': 'channels_last' }, ]) def test_locallyconnected_save(self, width, data_format): with self.cached_session(): num_samples = 4 num_classes = 3 num_epochs = 2 np.random.seed(1) tf_test_util.random_seed.set_seed(1) targets = np.random.randint(0, num_classes, (num_samples,)) height = 7 filters = 2 inputs = get_inputs(data_format, filters, height, num_samples, width) kernel_x = (3,) kernel_y = () if width == 1 else (2,) stride_x = (1,) stride_y = () if width == 1 else (3,) layers = 2 kwargs = { 'layers': layers, 'filters': filters, 'kernel_size': kernel_x + kernel_y, 'strides': stride_x + stride_y, 'data_format': data_format, 'num_classes': num_classes } model_1 = get_model_saveable(implementation=1, **kwargs) model_2 = get_model_saveable(implementation=2, **kwargs) model_3 = get_model_saveable(implementation=3, **kwargs) # Train. model_1.fit( x=inputs, y=targets, epochs=num_epochs, batch_size=num_samples, shuffle=False) model_2.fit( x=inputs, y=targets, epochs=num_epochs, batch_size=num_samples, shuffle=False) model_3.fit( x=inputs, y=targets, epochs=num_epochs, batch_size=num_samples, shuffle=False) out_1_before = model_1(inputs) out_2_before = model_2(inputs) out_3_before = model_3(inputs) path_1 = os.path.join(self.get_temp_dir(), 'model_1_path') model_1.save(path_1) model_1 = keras.models.load_model(path_1, custom_objects={'xent': xent}) path_2 = os.path.join(self.get_temp_dir(), 'model_2_path') model_2.save(path_2) model_2 = keras.models.load_model(path_2, custom_objects={'xent': xent}) path_3 = os.path.join(self.get_temp_dir(), 'model_3_path') model_3.save(path_3) model_3 = keras.models.load_model(path_3, custom_objects={'xent': xent}) out_1_after = model_1(inputs) out_2_after = model_2(inputs) out_3_after = model_3(inputs) self.assertAllCloseAccordingToType(out_1_before, out_1_after, atol=2e-4) self.assertAllCloseAccordingToType(out_2_before, out_2_after, atol=2e-4) self.assertAllCloseAccordingToType(out_3_before, out_3_after, atol=2e-4) def test_make_2d(self): input_shapes = [ (0,), (0, 0), (1,), (2,), (3,), (1, 0), (0, 3), (1, 1), (1, 2), (3, 1), (2, 2), (3, 3), (1, 0, 1), (5, 2, 3), (3, 5, 6, 7, 0), (3, 2, 2, 4, 4), (1, 2, 3, 4, 7, 2), ] np.random.seed(1) for input_shape in input_shapes: inputs = np.random.normal(0, 1, input_shape) inputs_tf = keras.backend.variable(inputs) split_dim = np.random.randint(0, inputs.ndim + 1) shape_2d = (int(np.prod(inputs.shape[:split_dim])), int(np.prod(inputs.shape[split_dim:]))) inputs_2d = np.reshape(inputs, shape_2d) inputs_2d_tf = keras.layers.local.make_2d(inputs_tf, split_dim) inputs_2d_tf = keras.backend.get_value(inputs_2d_tf) self.assertAllCloseAccordingToType(inputs_2d, inputs_2d_tf) def get_inputs(data_format, filters, height, num_samples, width): if data_format == 'channels_first': if width == 1: input_shape = (filters, height) else: input_shape = (filters, height, width) elif data_format == 'channels_last': if width == 1: input_shape = (height, filters) else: input_shape = (height, width, filters) else: raise NotImplementedError(data_format) inputs = np.random.normal(0, 1, (num_samples,) + input_shape).astype(np.float32) return inputs def xent(y_true, y_pred): y_true = keras.backend.cast( keras.backend.reshape(y_true, (-1,)), dtypes.int32) return nn.sparse_softmax_cross_entropy_with_logits( labels=y_true, logits=y_pred) def get_model(implementation, filters, kernel_size, strides, layers, num_classes, data_format): model = keras.Sequential() if len(kernel_size) == 1: lc_layer = keras.layers.LocallyConnected1D elif len(kernel_size) == 2: lc_layer = keras.layers.LocallyConnected2D else: raise NotImplementedError(kernel_size) for _ in range(layers): model.add(lc_layer( padding='valid', kernel_initializer=keras.initializers.random_normal(), bias_initializer=keras.initializers.random_normal(), filters=filters, strides=strides, kernel_size=kernel_size, activation=keras.activations.relu, data_format=data_format, implementation=implementation)) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(num_classes)) model.compile( optimizer=RMSPropOptimizer(0.01), metrics=[keras.metrics.categorical_accuracy], loss=xent ) return model def get_model_saveable(implementation, filters, kernel_size, strides, layers, num_classes, data_format): model = keras.Sequential() if len(kernel_size) == 1: lc_layer = keras.layers.LocallyConnected1D elif len(kernel_size) == 2: lc_layer = keras.layers.LocallyConnected2D else: raise NotImplementedError(kernel_size) for _ in range(layers): model.add( lc_layer( padding='valid', kernel_initializer=keras.initializers.random_normal(), bias_initializer=keras.initializers.random_normal(), filters=filters, strides=strides, kernel_size=kernel_size, activation=keras.activations.relu, data_format=data_format, implementation=implementation)) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(num_classes)) model.compile( optimizer=rmsprop.RMSProp(learning_rate=0.01), metrics=[keras.metrics.categorical_accuracy], loss=xent) return model def copy_lc_weights_2_to_1(lc_layer_2_from, lc_layer_1_to): lc_2_kernel, lc_2_bias = lc_layer_2_from.weights lc_2_kernel_masked = lc_2_kernel * lc_layer_2_from.kernel_mask data_format = lc_layer_2_from.data_format if data_format == 'channels_first': if isinstance(lc_layer_2_from, keras.layers.LocallyConnected1D): permutation = (3, 0, 1, 2) elif isinstance(lc_layer_2_from, keras.layers.LocallyConnected2D): permutation = (4, 5, 0, 1, 2, 3) else: raise NotImplementedError(lc_layer_2_from) elif data_format == 'channels_last': if isinstance(lc_layer_2_from, keras.layers.LocallyConnected1D): permutation = (2, 0, 1, 3) elif isinstance(lc_layer_2_from, keras.layers.LocallyConnected2D): permutation = (3, 4, 0, 1, 2, 5) else: raise NotImplementedError(lc_layer_2_from) else: raise NotImplementedError(data_format) lc_2_kernel_masked = keras.backend.permute_dimensions( lc_2_kernel_masked, permutation) lc_2_kernel_mask = math_ops.not_equal( lc_2_kernel_masked, 0) lc_2_kernel_flat = array_ops.boolean_mask( lc_2_kernel_masked, lc_2_kernel_mask) lc_2_kernel_reshaped = keras.backend.reshape(lc_2_kernel_flat, lc_layer_1_to.kernel.shape) lc_2_kernel_reshaped = keras.backend.get_value(lc_2_kernel_reshaped) lc_2_bias = keras.backend.get_value(lc_2_bias) lc_layer_1_to.set_weights([lc_2_kernel_reshaped, lc_2_bias]) def copy_lc_weights_2_to_3(lc_layer_2_from, lc_layer_3_to): lc_2_kernel, lc_2_bias = lc_layer_2_from.weights lc_2_kernel_masked = lc_2_kernel * lc_layer_2_from.kernel_mask lc_2_kernel_masked = keras.layers.local.make_2d( lc_2_kernel_masked, split_dim=keras.backend.ndim(lc_2_kernel_masked) // 2) lc_2_kernel_masked = keras.backend.transpose(lc_2_kernel_masked) lc_2_kernel_mask = math_ops.not_equal(lc_2_kernel_masked, 0) lc_2_kernel_flat = array_ops.boolean_mask( lc_2_kernel_masked, lc_2_kernel_mask) lc_2_kernel_flat = keras.backend.get_value(lc_2_kernel_flat) lc_2_bias = keras.backend.get_value(lc_2_bias) lc_layer_3_to.set_weights([lc_2_kernel_flat, lc_2_bias]) def copy_model_weights(model_from, model_to): for l in range(len(model_from.layers)): layer_from = model_from.layers[l] layer_to = model_to.layers[l] if (isinstance( layer_from, (keras.layers.LocallyConnected2D, keras.layers.LocallyConnected1D)) and isinstance(layer_to, (keras.layers.LocallyConnected2D, keras.layers.LocallyConnected1D))): if layer_from.implementation == 2: if layer_to.implementation == 1: copy_lc_weights_2_to_1(layer_from, layer_to) elif layer_to.implementation == 3: copy_lc_weights_2_to_3(layer_from, layer_to) else: raise NotImplementedError else: raise NotImplementedError elif isinstance(layer_from, keras.layers.Dense): weights_2, bias_2 = layer_from.weights weights_2 = keras.backend.get_value(weights_2) bias_2 = keras.backend.get_value(bias_2) layer_to.set_weights([weights_2, bias_2]) else: continue if __name__ == '__main__': test.main()
uwdata/termite-data-server
refs/heads/master
web2py/gluon/fileutils.py
9
#!/usr/bin/env python # -*- coding: utf-8 -*- """ | This file is part of the web2py Web Framework | Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> | License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) File operations --------------- """ import storage import os import re import tarfile import glob import time import datetime import logging from http import HTTP from gzip import open as gzopen __all__ = [ 'parse_version', 'read_file', 'write_file', 'readlines_file', 'up', 'abspath', 'mktree', 'listdir', 'recursive_unlink', 'cleanpath', 'tar', 'untar', 'tar_compiled', 'get_session', 'check_credentials', 'w2p_pack', 'w2p_unpack', 'w2p_pack_plugin', 'w2p_unpack_plugin', 'fix_newlines', 'make_fake_file_like_object', ] def parse_semantic(version="Version 1.99.0-rc.1+timestamp.2011.09.19.08.23.26"): """Parses a version string according to http://semver.org/ rules Args: version(str): the SemVer string Returns: tuple: Major, Minor, Patch, Release, Build Date """ re_version = re.compile('(\d+)\.(\d+)\.(\d+)(\-(?P<pre>[^\s+]*))?(\+(?P<build>\S*))') m = re_version.match(version.strip().split()[-1]) if not m: return None a, b, c = int(m.group(1)), int(m.group(2)), int(m.group(3)) pre_release = m.group('pre') or '' build = m.group('build') or '' if build.startswith('timestamp'): build = datetime.datetime.strptime(build.split('.',1)[1], '%Y.%m.%d.%H.%M.%S') return (a, b, c, pre_release, build) def parse_legacy(version="Version 1.99.0 (2011-09-19 08:23:26)"): """Parses "legacy" version string Args: version(str): the version string Returns: tuple: Major, Minor, Patch, Release, Build Date """ re_version = re.compile('[^\d]+ (\d+)\.(\d+)\.(\d+)\s*\((?P<datetime>.+?)\)\s*(?P<type>[a-z]+)?') m = re_version.match(version) a, b, c = int(m.group(1)), int(m.group(2)), int(m.group(3)), pre_release = m.group('type') or 'dev' build = datetime.datetime.strptime(m.group('datetime'), '%Y-%m-%d %H:%M:%S') return (a, b, c, pre_release, build) def parse_version(version): """Attempts to parse SemVer, fallbacks on legacy """ version_tuple = parse_semantic(version) if not version_tuple: version_tuple = parse_legacy(version) return version_tuple def read_file(filename, mode='r'): """Returns content from filename, making sure to close the file explicitly on exit. """ f = open(filename, mode) try: return f.read() finally: f.close() def write_file(filename, value, mode='w'): """Writes <value> to filename, making sure to close the file explicitly on exit. """ f = open(filename, mode) try: return f.write(value) finally: f.close() def readlines_file(filename, mode='r'): """Applies .split('\n') to the output of `read_file()` """ return read_file(filename, mode).split('\n') def mktree(path): head, tail = os.path.split(path) if head: if tail: mktree(head) if not os.path.exists(head): os.mkdir(head) def listdir( path, expression='^.+$', drop=True, add_dirs=False, sort=True, maxnum = None, ): """ Like `os.listdir()` but you can specify a regex pattern to filter files. If `add_dirs` is True, the returned items will have the full path. """ if path[-1:] != os.path.sep: path = path + os.path.sep if drop: n = len(path) else: n = 0 regex = re.compile(expression) items = [] for (root, dirs, files) in os.walk(path, topdown=True): for dir in dirs[:]: if dir.startswith('.'): dirs.remove(dir) if add_dirs: items.append(root[n:]) for file in sorted(files): if regex.match(file) and not file.startswith('.'): items.append(os.path.join(root, file)[n:]) if maxnum and len(items)>=maxnum: break if sort: return sorted(items) else: return items def recursive_unlink(f): """Deletes `f`. If it's a folder, also its contents will be deleted """ if os.path.isdir(f): for s in os.listdir(f): recursive_unlink(os.path.join(f, s)) os.rmdir(f) elif os.path.isfile(f): os.unlink(f) def cleanpath(path): """Turns any expression/path into a valid filename. replaces / with _ and removes special characters. """ items = path.split('.') if len(items) > 1: path = re.sub('[^\w\.]+', '_', '_'.join(items[:-1]) + '.' + ''.join(items[-1:])) else: path = re.sub('[^\w\.]+', '_', ''.join(items[-1:])) return path def _extractall(filename, path='.', members=None): # FIXME: this should be dropped because python 2.4 support was dropped if not hasattr(tarfile.TarFile, 'extractall'): from tarfile import ExtractError class TarFile(tarfile.TarFile): def extractall(self, path='.', members=None): """Extract all members from the archive to the current working directory and set owner, modification time and permissions on directories afterwards. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by getmembers(). """ directories = [] if members is None: members = self for tarinfo in members: if tarinfo.isdir(): # Extract directory with a safe mode, so that # all files below can be extracted as well. try: os.makedirs(os.path.join(path, tarinfo.name), 0777) except EnvironmentError: pass directories.append(tarinfo) else: self.extract(tarinfo, path) # Reverse sort directories. directories.sort(lambda a, b: cmp(a.name, b.name)) directories.reverse() # Set correct owner, mtime and filemode on directories. for tarinfo in directories: path = os.path.join(path, tarinfo.name) try: self.chown(tarinfo, path) self.utime(tarinfo, path) self.chmod(tarinfo, path) except ExtractError, e: if self.errorlevel > 1: raise else: self._dbg(1, 'tarfile: %s' % e) _cls = TarFile else: _cls = tarfile.TarFile tar = _cls(filename, 'r') ret = tar.extractall(path, members) tar.close() return ret def tar(file, dir, expression='^.+$', filenames=None): """Tars dir into file, only tars file that match expression """ tar = tarfile.TarFile(file, 'w') try: if filenames is None: filenames = listdir(dir, expression, add_dirs=True) for file in filenames: tar.add(os.path.join(dir, file), file, False) finally: tar.close() def untar(file, dir): """Untar file into dir """ _extractall(file, dir) def w2p_pack(filename, path, compiled=False, filenames=None): """Packs a web2py application. Args: filename(str): path to the resulting archive path(str): path to the application compiled(bool): if `True` packs the compiled version filenames(list): adds filenames to the archive """ filename = abspath(filename) path = abspath(path) tarname = filename + '.tar' if compiled: tar_compiled(tarname, path, '^[\w\.\-]+$') else: tar(tarname, path, '^[\w\.\-]+$', filenames=filenames) w2pfp = gzopen(filename, 'wb') tarfp = open(tarname, 'rb') w2pfp.write(tarfp.read()) w2pfp.close() tarfp.close() os.unlink(tarname) def create_welcome_w2p(): if not os.path.exists('welcome.w2p') or os.path.exists('NEWINSTALL'): try: w2p_pack('welcome.w2p', 'applications/welcome') os.unlink('NEWINSTALL') logging.info("New installation: created welcome.w2p file") except: logging.error("New installation error: unable to create welcome.w2p file") def w2p_unpack(filename, path, delete_tar=True): if filename=='welcome.w2p': create_welcome_w2p() filename = abspath(filename) path = abspath(path) if filename[-4:] == '.w2p' or filename[-3:] == '.gz': if filename[-4:] == '.w2p': tarname = filename[:-4] + '.tar' else: tarname = filename[:-3] + '.tar' fgzipped = gzopen(filename, 'rb') tarfile = open(tarname, 'wb') tarfile.write(fgzipped.read()) tarfile.close() fgzipped.close() else: tarname = filename untar(tarname, path) if delete_tar: os.unlink(tarname) def w2p_pack_plugin(filename, path, plugin_name): """Packs the given plugin into a w2p file. Will match files at:: <path>/*/plugin_[name].* <path>/*/plugin_[name]/* """ filename = abspath(filename) path = abspath(path) if not filename.endswith('web2py.plugin.%s.w2p' % plugin_name): raise Exception("Not a web2py plugin name") plugin_tarball = tarfile.open(filename, 'w:gz') try: app_dir = path while app_dir[-1] == '/': app_dir = app_dir[:-1] files1 = glob.glob( os.path.join(app_dir, '*/plugin_%s.*' % plugin_name)) files2 = glob.glob( os.path.join(app_dir, '*/plugin_%s/*' % plugin_name)) for file in files1 + files2: plugin_tarball.add(file, arcname=file[len(app_dir) + 1:]) finally: plugin_tarball.close() def w2p_unpack_plugin(filename, path, delete_tar=True): filename = abspath(filename) path = abspath(path) if not os.path.basename(filename).startswith('web2py.plugin.'): raise Exception("Not a web2py plugin") w2p_unpack(filename, path, delete_tar) def tar_compiled(file, dir, expression='^.+$'): """Used to tar a compiled application. The content of models, views, controllers is not stored in the tar file. """ tar = tarfile.TarFile(file, 'w') for file in listdir(dir, expression, add_dirs=True): filename = os.path.join(dir, file) if os.path.islink(filename): continue if os.path.isfile(filename) and file[-4:] != '.pyc': if file[:6] == 'models': continue if file[:5] == 'views': continue if file[:11] == 'controllers': continue if file[:7] == 'modules': continue tar.add(filename, file, False) tar.close() def up(path): return os.path.dirname(os.path.normpath(path)) def get_session(request, other_application='admin'): """Checks that user is authorized to access other_application""" if request.application == other_application: raise KeyError try: session_id = request.cookies['session_id_' + other_application].value session_filename = os.path.join( up(request.folder), other_application, 'sessions', session_id) osession = storage.load_storage(session_filename) except Exception, e: osession = storage.Storage() return osession def set_session(request, session, other_application='admin'): """Checks that user is authorized to access other_application""" if request.application == other_application: raise KeyError session_id = request.cookies['session_id_' + other_application].value session_filename = os.path.join( up(request.folder), other_application, 'sessions', session_id) storage.save_storage(session,session_filename) def check_credentials(request, other_application='admin', expiration=60 * 60, gae_login=True): """Checks that user is authorized to access other_application""" if request.env.web2py_runtime_gae: from google.appengine.api import users if users.is_current_user_admin(): return True elif gae_login: login_html = '<a href="%s">Sign in with your google account</a>.' \ % users.create_login_url(request.env.path_info) raise HTTP(200, '<html><body>%s</body></html>' % login_html) else: return False else: t0 = time.time() dt = t0 - expiration s = get_session(request, other_application) r = (s.authorized and s.last_time and s.last_time > dt) if r: s.last_time = t0 set_session(request,s,other_application) return r def fix_newlines(path): regex = re.compile(r'''(\r |\r| )''') for filename in listdir(path, '.*\.(py|html)$', drop=False): rdata = read_file(filename, 'rb') wdata = regex.sub('\n', rdata) if wdata != rdata: write_file(filename, wdata, 'wb') def copystream( src, dest, size, chunk_size=10 ** 5, ): """ this is here because I think there is a bug in shutil.copyfileobj """ while size > 0: if size < chunk_size: data = src.read(size) else: data = src.read(chunk_size) length = len(data) if length > size: (data, length) = (data[:size], size) size -= length if length == 0: break dest.write(data) if length < chunk_size: break dest.seek(0) return def make_fake_file_like_object(): class LogFile(object): def write(self, value): pass def close(self): pass return LogFile() from settings import global_settings # we need to import settings here because # settings imports fileutils too def abspath(*relpath, **base): """Converts relative path to absolute path based (by default) on applications_parent """ path = os.path.join(*relpath) gluon = base.get('gluon', False) if os.path.isabs(path): return path if gluon: return os.path.join(global_settings.gluon_parent, path) return os.path.join(global_settings.applications_parent, path)
luzheqi1987/nova-annotation
refs/heads/master
nova/virt/xenapi/client/objects.py
97
# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import utils class XenAPISessionObject(object): """Wrapper to make calling and mocking the session easier The XenAPI protocol is an XML RPC API that is based around the XenAPI database, and operations you can do on each of the objects stored in the database, such as VM, SR, VDI, etc. For more details see the XenAPI docs: http://docs.vmd.citrix.com/XenServer/6.2.0/1.0/en_gb/api/ Most, objects like VM, SR, VDI, etc, share a common set of methods: * vm_ref = session.VM.create(vm_rec) * vm_ref = session.VM.get_by_uuid(uuid) * session.VM.destroy(vm_ref) * vm_refs = session.VM.get_all() Each object also has specific messages, or functions, such as: * session.VM.clean_reboot(vm_ref) Each object has fields, like "VBDs" that can be fetched like this: * vbd_refs = session.VM.get_VBDs(vm_ref) You can get all the fields by fetching the full record. However please note this is much more expensive than just fetching the field you require: * vm_rec = session.VM.get_record(vm_ref) When searching for particular objects, you may be tempted to use get_all(), but this often leads to races as objects get deleted under your feet. It is preferable to use the undocumented: * vms = session.VM.get_all_records_where( 'field "is_control_domain"="true"') """ def __init__(self, session, name): self.session = session self.name = name def _call_method(self, method_name, *args): call = "%s.%s" % (self.name, method_name) return self.session.call_xenapi(call, *args) def __getattr__(self, method_name): return lambda *params: self._call_method(method_name, *params) class VM(XenAPISessionObject): """Virtual Machine.""" def __init__(self, session): super(VM, self).__init__(session, "VM") class VBD(XenAPISessionObject): """Virtual block device.""" def __init__(self, session): super(VBD, self).__init__(session, "VBD") def plug(self, vbd_ref, vm_ref): @utils.synchronized('xenapi-vbd-' + vm_ref) def synchronized_plug(): self._call_method("plug", vbd_ref) # NOTE(johngarbutt) we need to ensure there is only ever one # VBD.unplug or VBD.plug happening at once per VM # due to a bug in XenServer 6.1 and 6.2 synchronized_plug() def unplug(self, vbd_ref, vm_ref): @utils.synchronized('xenapi-vbd-' + vm_ref) def synchronized_unplug(): self._call_method("unplug", vbd_ref) # NOTE(johngarbutt) we need to ensure there is only ever one # VBD.unplug or VBD.plug happening at once per VM # due to a bug in XenServer 6.1 and 6.2 synchronized_unplug() class VDI(XenAPISessionObject): """Virtual disk image.""" def __init__(self, session): super(VDI, self).__init__(session, "VDI") class SR(XenAPISessionObject): """Storage Repository.""" def __init__(self, session): super(SR, self).__init__(session, "SR") class PBD(XenAPISessionObject): """Physical block device.""" def __init__(self, session): super(PBD, self).__init__(session, "PBD") class PIF(XenAPISessionObject): """Physical Network Interface.""" def __init__(self, session): super(PIF, self).__init__(session, "PIF") class VLAN(XenAPISessionObject): """VLAN.""" def __init__(self, session): super(VLAN, self).__init__(session, "VLAN") class Host(XenAPISessionObject): """XenServer hosts.""" def __init__(self, session): super(Host, self).__init__(session, "host") class Network(XenAPISessionObject): """Networks that VIFs are attached to.""" def __init__(self, session): super(Network, self).__init__(session, "network") class Pool(XenAPISessionObject): """Pool of hosts.""" def __init__(self, session): super(Pool, self).__init__(session, "pool")
cosmoharrigan/rl-glue-ext
refs/heads/master
projects/codecs/Python/src/tests/test_taskspec.py
8
# # Copyright (C) 2008, Brian Tanner # #http://rl-glue-ext.googlecode.com/ # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Last modifed 22-1-2009 by Jose Antonio Martin H. # Improving the test processTaskSpec # $Revision$ # $Date$ # $Author$ # $HeadURL$ import sys from rlglue.utils import TaskSpecVRLGLUE3 from glue_test import glue_test tester =glue_test("test_taskspec") def processTaskSpec(ts): # you can cut the taskspec by the main words with new line #ts= """VERSION RL-Glue-3.0 PROBLEMTYPE episodic DISCOUNTFACTOR 1 OBSERVATIONS INTS (3 0 1) DOUBLES (2 -1.2 0.5) (-.07 .07) CHARCOUNT 1024 # ACTIONS INTS (2 0 4) CHARCOUNT 1024 REWARDS (-5.0 UNSPEC) EXTRA some other stuff goes here""" print print print "=======================================================================================================" print ts print print TaskSpec = TaskSpecVRLGLUE3.TaskSpecParser(ts) if TaskSpec.valid: print "=======================================================================================================" print "Version: ["+TaskSpec.getVersion()+"]" print "ProblemType: ["+TaskSpec.getProblemType()+"]" print "DiscountFactor: ["+TaskSpec.getDiscountFactor()+"]" print "=======================================================================================================" print "\t \t \t \t Observations" print "=======================================================================================================" print "Observations: ["+TaskSpec.getObservations()+"]" print "Integers:",TaskSpec.getIntObservations() print "Doubles: ",TaskSpec.getDoubleObservations() print "Chars: ",TaskSpec.getCharCountObservations() print "=======================================================================================================" print "\t \t \t \t Actions" print "======================================================================================================" print "Actions: ["+TaskSpec.getActions()+"]" print "Integers:",TaskSpec.getIntActions() print "Doubles: ",TaskSpec.getDoubleActions() print "Chars: ",TaskSpec.getCharCountActions() print "=======================================================================================================" print "Reward :["+TaskSpec.getReward()+"]" print "Reward Range:",TaskSpec.getRewardRange() print "Extra: ["+TaskSpec.getExtra()+"]" print "remeber that by using len() you get the cardinality of lists!" print "Thus:" print "len(Doubles) ==> ",len(TaskSpec.getDoubleObservations())," Double Observations" else: print "Task spec was invalid, but I can try to get version: "+TaskSpec.getVersion(); f=open('sample_task_specs.txt', 'r') for ts in f: processTaskSpec(ts) f.close() print tester.get_summary() sys.exit(tester.getFailCount())
noslenfa/tdjangorest
refs/heads/master
uw/lib/python2.7/site-packages/paramiko/packet.py
7
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com> # # This file is part of paramiko. # # Paramiko is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 2.1 of the License, or (at your option) # any later version. # # Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with Paramiko; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. """ Packetizer. """ import errno import select import socket import struct import threading import time from paramiko.common import * from paramiko import util from paramiko.ssh_exception import SSHException, ProxyCommandFailure from paramiko.message import Message try: from r_hmac import HMAC except ImportError: from Crypto.Hash.HMAC import HMAC def compute_hmac(key, message, digest_class): return HMAC(key, message, digest_class).digest() class NeedRekeyException (Exception): pass class Packetizer (object): """ Implementation of the base SSH packet protocol. """ # READ the secsh RFC's before raising these values. if anything, # they should probably be lower. REKEY_PACKETS = pow(2, 29) REKEY_BYTES = pow(2, 29) REKEY_PACKETS_OVERFLOW_MAX = pow(2,29) # Allow receiving this many packets after a re-key request before terminating REKEY_BYTES_OVERFLOW_MAX = pow(2,29) # Allow receiving this many bytes after a re-key request before terminating def __init__(self, socket): self.__socket = socket self.__logger = None self.__closed = False self.__dump_packets = False self.__need_rekey = False self.__init_count = 0 self.__remainder = '' # used for noticing when to re-key: self.__sent_bytes = 0 self.__sent_packets = 0 self.__received_bytes = 0 self.__received_packets = 0 self.__received_bytes_overflow = 0 self.__received_packets_overflow = 0 # current inbound/outbound ciphering: self.__block_size_out = 8 self.__block_size_in = 8 self.__mac_size_out = 0 self.__mac_size_in = 0 self.__block_engine_out = None self.__block_engine_in = None self.__sdctr_out = False self.__mac_engine_out = None self.__mac_engine_in = None self.__mac_key_out = '' self.__mac_key_in = '' self.__compress_engine_out = None self.__compress_engine_in = None self.__sequence_number_out = 0L self.__sequence_number_in = 0L # lock around outbound writes (packet computation) self.__write_lock = threading.RLock() # keepalives: self.__keepalive_interval = 0 self.__keepalive_last = time.time() self.__keepalive_callback = None def set_log(self, log): """ Set the python log object to use for logging. """ self.__logger = log def set_outbound_cipher(self, block_engine, block_size, mac_engine, mac_size, mac_key, sdctr=False): """ Switch outbound data cipher. """ self.__block_engine_out = block_engine self.__sdctr_out = sdctr self.__block_size_out = block_size self.__mac_engine_out = mac_engine self.__mac_size_out = mac_size self.__mac_key_out = mac_key self.__sent_bytes = 0 self.__sent_packets = 0 # wait until the reset happens in both directions before clearing rekey flag self.__init_count |= 1 if self.__init_count == 3: self.__init_count = 0 self.__need_rekey = False def set_inbound_cipher(self, block_engine, block_size, mac_engine, mac_size, mac_key): """ Switch inbound data cipher. """ self.__block_engine_in = block_engine self.__block_size_in = block_size self.__mac_engine_in = mac_engine self.__mac_size_in = mac_size self.__mac_key_in = mac_key self.__received_bytes = 0 self.__received_packets = 0 self.__received_bytes_overflow = 0 self.__received_packets_overflow = 0 # wait until the reset happens in both directions before clearing rekey flag self.__init_count |= 2 if self.__init_count == 3: self.__init_count = 0 self.__need_rekey = False def set_outbound_compressor(self, compressor): self.__compress_engine_out = compressor def set_inbound_compressor(self, compressor): self.__compress_engine_in = compressor def close(self): self.__closed = True def set_hexdump(self, hexdump): self.__dump_packets = hexdump def get_hexdump(self): return self.__dump_packets def get_mac_size_in(self): return self.__mac_size_in def get_mac_size_out(self): return self.__mac_size_out def need_rekey(self): """ Returns C{True} if a new set of keys needs to be negotiated. This will be triggered during a packet read or write, so it should be checked after every read or write, or at least after every few. @return: C{True} if a new set of keys needs to be negotiated """ return self.__need_rekey def set_keepalive(self, interval, callback): """ Turn on/off the callback keepalive. If C{interval} seconds pass with no data read from or written to the socket, the callback will be executed and the timer will be reset. """ self.__keepalive_interval = interval self.__keepalive_callback = callback self.__keepalive_last = time.time() def read_all(self, n, check_rekey=False): """ Read as close to N bytes as possible, blocking as long as necessary. @param n: number of bytes to read @type n: int @return: the data read @rtype: str @raise EOFError: if the socket was closed before all the bytes could be read """ out = '' # handle over-reading from reading the banner line if len(self.__remainder) > 0: out = self.__remainder[:n] self.__remainder = self.__remainder[n:] n -= len(out) if PY22: return self._py22_read_all(n, out) while n > 0: got_timeout = False try: x = self.__socket.recv(n) if len(x) == 0: raise EOFError() out += x n -= len(x) except socket.timeout: got_timeout = True except socket.error, e: # on Linux, sometimes instead of socket.timeout, we get # EAGAIN. this is a bug in recent (> 2.6.9) kernels but # we need to work around it. if (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EAGAIN): got_timeout = True elif (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EINTR): # syscall interrupted; try again pass elif self.__closed: raise EOFError() else: raise if got_timeout: if self.__closed: raise EOFError() if check_rekey and (len(out) == 0) and self.__need_rekey: raise NeedRekeyException() self._check_keepalive() return out def write_all(self, out): self.__keepalive_last = time.time() while len(out) > 0: retry_write = False try: n = self.__socket.send(out) except socket.timeout: retry_write = True except socket.error, e: if (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EAGAIN): retry_write = True elif (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EINTR): # syscall interrupted; try again retry_write = True else: n = -1 except ProxyCommandFailure: raise # so it doesn't get swallowed by the below catchall except Exception: # could be: (32, 'Broken pipe') n = -1 if retry_write: n = 0 if self.__closed: n = -1 if n < 0: raise EOFError() if n == len(out): break out = out[n:] return def readline(self, timeout): """ Read a line from the socket. We assume no data is pending after the line, so it's okay to attempt large reads. """ buf = self.__remainder while not '\n' in buf: buf += self._read_timeout(timeout) n = buf.index('\n') self.__remainder = buf[n+1:] buf = buf[:n] if (len(buf) > 0) and (buf[-1] == '\r'): buf = buf[:-1] return buf def send_message(self, data): """ Write a block of data using the current cipher, as an SSH block. """ # encrypt this sucka data = str(data) cmd = ord(data[0]) if cmd in MSG_NAMES: cmd_name = MSG_NAMES[cmd] else: cmd_name = '$%x' % cmd orig_len = len(data) self.__write_lock.acquire() try: if self.__compress_engine_out is not None: data = self.__compress_engine_out(data) packet = self._build_packet(data) if self.__dump_packets: self._log(DEBUG, 'Write packet <%s>, length %d' % (cmd_name, orig_len)) self._log(DEBUG, util.format_binary(packet, 'OUT: ')) if self.__block_engine_out != None: out = self.__block_engine_out.encrypt(packet) else: out = packet # + mac if self.__block_engine_out != None: payload = struct.pack('>I', self.__sequence_number_out) + packet out += compute_hmac(self.__mac_key_out, payload, self.__mac_engine_out)[:self.__mac_size_out] self.__sequence_number_out = (self.__sequence_number_out + 1) & 0xffffffffL self.write_all(out) self.__sent_bytes += len(out) self.__sent_packets += 1 if ((self.__sent_packets >= self.REKEY_PACKETS) or (self.__sent_bytes >= self.REKEY_BYTES)) \ and not self.__need_rekey: # only ask once for rekeying self._log(DEBUG, 'Rekeying (hit %d packets, %d bytes sent)' % (self.__sent_packets, self.__sent_bytes)) self.__received_bytes_overflow = 0 self.__received_packets_overflow = 0 self._trigger_rekey() finally: self.__write_lock.release() def read_message(self): """ Only one thread should ever be in this function (no other locking is done). @raise SSHException: if the packet is mangled @raise NeedRekeyException: if the transport should rekey """ header = self.read_all(self.__block_size_in, check_rekey=True) if self.__block_engine_in != None: header = self.__block_engine_in.decrypt(header) if self.__dump_packets: self._log(DEBUG, util.format_binary(header, 'IN: ')); packet_size = struct.unpack('>I', header[:4])[0] # leftover contains decrypted bytes from the first block (after the length field) leftover = header[4:] if (packet_size - len(leftover)) % self.__block_size_in != 0: raise SSHException('Invalid packet blocking') buf = self.read_all(packet_size + self.__mac_size_in - len(leftover)) packet = buf[:packet_size - len(leftover)] post_packet = buf[packet_size - len(leftover):] if self.__block_engine_in != None: packet = self.__block_engine_in.decrypt(packet) if self.__dump_packets: self._log(DEBUG, util.format_binary(packet, 'IN: ')); packet = leftover + packet if self.__mac_size_in > 0: mac = post_packet[:self.__mac_size_in] mac_payload = struct.pack('>II', self.__sequence_number_in, packet_size) + packet my_mac = compute_hmac(self.__mac_key_in, mac_payload, self.__mac_engine_in)[:self.__mac_size_in] if my_mac != mac: raise SSHException('Mismatched MAC') padding = ord(packet[0]) payload = packet[1:packet_size - padding] if self.__dump_packets: self._log(DEBUG, 'Got payload (%d bytes, %d padding)' % (packet_size, padding)) if self.__compress_engine_in is not None: payload = self.__compress_engine_in(payload) msg = Message(payload[1:]) msg.seqno = self.__sequence_number_in self.__sequence_number_in = (self.__sequence_number_in + 1) & 0xffffffffL # check for rekey raw_packet_size = packet_size + self.__mac_size_in + 4 self.__received_bytes += raw_packet_size self.__received_packets += 1 if self.__need_rekey: # we've asked to rekey -- give them some packets to comply before # dropping the connection self.__received_bytes_overflow += raw_packet_size self.__received_packets_overflow += 1 if (self.__received_packets_overflow >= self.REKEY_PACKETS_OVERFLOW_MAX) or \ (self.__received_bytes_overflow >= self.REKEY_BYTES_OVERFLOW_MAX): raise SSHException('Remote transport is ignoring rekey requests') elif (self.__received_packets >= self.REKEY_PACKETS) or \ (self.__received_bytes >= self.REKEY_BYTES): # only ask once for rekeying self._log(DEBUG, 'Rekeying (hit %d packets, %d bytes received)' % (self.__received_packets, self.__received_bytes)) self.__received_bytes_overflow = 0 self.__received_packets_overflow = 0 self._trigger_rekey() cmd = ord(payload[0]) if cmd in MSG_NAMES: cmd_name = MSG_NAMES[cmd] else: cmd_name = '$%x' % cmd if self.__dump_packets: self._log(DEBUG, 'Read packet <%s>, length %d' % (cmd_name, len(payload))) return cmd, msg ########## protected def _log(self, level, msg): if self.__logger is None: return if issubclass(type(msg), list): for m in msg: self.__logger.log(level, m) else: self.__logger.log(level, msg) def _check_keepalive(self): if (not self.__keepalive_interval) or (not self.__block_engine_out) or \ self.__need_rekey: # wait till we're encrypting, and not in the middle of rekeying return now = time.time() if now > self.__keepalive_last + self.__keepalive_interval: self.__keepalive_callback() self.__keepalive_last = now def _py22_read_all(self, n, out): while n > 0: r, w, e = select.select([self.__socket], [], [], 0.1) if self.__socket not in r: if self.__closed: raise EOFError() self._check_keepalive() else: x = self.__socket.recv(n) if len(x) == 0: raise EOFError() out += x n -= len(x) return out def _py22_read_timeout(self, timeout): start = time.time() while True: r, w, e = select.select([self.__socket], [], [], 0.1) if self.__socket in r: x = self.__socket.recv(1) if len(x) == 0: raise EOFError() break if self.__closed: raise EOFError() now = time.time() if now - start >= timeout: raise socket.timeout() return x def _read_timeout(self, timeout): if PY22: return self._py22_read_timeout(timeout) start = time.time() while True: try: x = self.__socket.recv(128) if len(x) == 0: raise EOFError() break except socket.timeout: pass except EnvironmentError, e: if ((type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EINTR)): pass else: raise if self.__closed: raise EOFError() now = time.time() if now - start >= timeout: raise socket.timeout() return x def _build_packet(self, payload): # pad up at least 4 bytes, to nearest block-size (usually 8) bsize = self.__block_size_out padding = 3 + bsize - ((len(payload) + 8) % bsize) packet = struct.pack('>IB', len(payload) + padding + 1, padding) packet += payload if self.__sdctr_out or self.__block_engine_out is None: # cute trick i caught openssh doing: if we're not encrypting or SDCTR mode (RFC4344), # don't waste random bytes for the padding packet += (chr(0) * padding) else: packet += rng.read(padding) return packet def _trigger_rekey(self): # outside code should check for this flag self.__need_rekey = True
rsampaio/cobbler
refs/heads/master
cobbler/couch.py
12
import httplib, simplejson # http://cheeseshop.python.org/pypi/simplejson # Here only used for prettyprinting def prettyPrint(s): """Prettyprints the json response of an HTTPResponse object""" # HTTPResponse instance -> Python object -> str print simplejson.dumps(simplejson.loads(s.read()), sort_keys=True, indent=4) class Couch: """Basic wrapper class for operations on a couchDB""" def __init__(self, host, port=5984, options=None): self.host = host self.port = port def connect(self): return httplib.HTTPConnection(self.host, self.port) # No close() # Database operations def createDb(self, dbName): """Creates a new database on the server""" r = self.put(''.join(['/',dbName,'/']), "") return r.read() def deleteDb(self, dbName): """Deletes the database on the server""" r = self.delete(''.join(['/',dbName,'/'])) return r.read() def listDb(self): """List the databases on the server""" r = self.get('/_all_dbs') return r.read() def infoDb(self, dbName): """Returns info about the couchDB""" r = self.get(''.join(['/', dbName, '/'])) return r.read() # Document operations def listDoc(self, dbName): """List all documents in a given database""" r = self.get(''.join(['/', dbName, '/', '_all_docs'])) return r.read() def openDoc(self, dbName, docId): """Open a document in a given database""" r = self.get(''.join(['/', dbName, '/', docId,])) return r.read() def saveDoc(self, dbName, body, docId=None): """Save/create a document to/in a given database""" if docId: r = self.put(''.join(['/', dbName, '/', docId]), body) else: r = self.post(''.join(['/', dbName, '/']), body) return r.read() def deleteDoc(self, dbName, docId): # XXX Crashed if resource is non-existent; not so for DELETE on db. Bug? # XXX Does not work any more, on has to specify an revid # Either do html head to get the recten revid or provide it as parameter r = self.delete(''.join(['/', dbName, '/', docId])) return r.read() # Basic http methods def get(self, uri): c = self.connect() headers = {"Accept": "application/json"} c.request("GET", uri, None, headers) return c.getresponse() def post(self, uri, body): c = self.connect() headers = {"Content-type": "application/json"} c.request('POST', uri, body, headers) return c.getresponse() def put(self, uri, body): c = self.connect() if len(body) > 0: headers = {"Content-type": "application/json"} c.request("PUT", uri, body, headers) else: c.request("PUT", uri, body) return c.getresponse() def delete(self, uri): c = self.connect() c.request("DELETE", uri) return c.getresponse()
sgiavasis/nipype
refs/heads/master
nipype/workflows/dmri/mrtrix/diffusion.py
10
from ....interfaces import utility as util # utility from ....pipeline import engine as pe # pypeline engine from ....interfaces import fsl as fsl from ....interfaces import mrtrix as mrtrix def create_mrtrix_dti_pipeline(name="dtiproc", tractography_type='probabilistic'): """Creates a pipeline that does the same diffusion processing as in the :doc:`../../users/examples/dmri_mrtrix_dti` example script. Given a diffusion-weighted image, b-values, and b-vectors, the workflow will return the tractography computed from spherical deconvolution and probabilistic streamline tractography Example ------- >>> dti = create_mrtrix_dti_pipeline("mrtrix_dti") >>> dti.inputs.inputnode.dwi = 'data.nii' >>> dti.inputs.inputnode.bvals = 'bvals' >>> dti.inputs.inputnode.bvecs = 'bvecs' >>> dti.run() # doctest: +SKIP Inputs:: inputnode.dwi inputnode.bvecs inputnode.bvals Outputs:: outputnode.fa outputnode.tdi outputnode.tracts_tck outputnode.tracts_trk outputnode.csdeconv """ inputnode = pe.Node(interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), name="inputnode") bet = pe.Node(interface=fsl.BET(), name="bet") bet.inputs.mask = True fsl2mrtrix = pe.Node(interface=mrtrix.FSL2MRTrix(), name='fsl2mrtrix') fsl2mrtrix.inputs.invert_y = True dwi2tensor = pe.Node(interface=mrtrix.DWI2Tensor(), name='dwi2tensor') tensor2vector = pe.Node(interface=mrtrix.Tensor2Vector(), name='tensor2vector') tensor2adc = pe.Node(interface=mrtrix.Tensor2ApparentDiffusion(), name='tensor2adc') tensor2fa = pe.Node(interface=mrtrix.Tensor2FractionalAnisotropy(), name='tensor2fa') erode_mask_firstpass = pe.Node(interface=mrtrix.Erode(), name='erode_mask_firstpass') erode_mask_secondpass = pe.Node(interface=mrtrix.Erode(), name='erode_mask_secondpass') threshold_b0 = pe.Node(interface=mrtrix.Threshold(), name='threshold_b0') threshold_FA = pe.Node(interface=mrtrix.Threshold(), name='threshold_FA') threshold_FA.inputs.absolute_threshold_value = 0.7 threshold_wmmask = pe.Node(interface=mrtrix.Threshold(), name='threshold_wmmask') threshold_wmmask.inputs.absolute_threshold_value = 0.4 MRmultiply = pe.Node(interface=mrtrix.MRMultiply(), name='MRmultiply') MRmult_merge = pe.Node(interface=util.Merge(2), name='MRmultiply_merge') median3d = pe.Node(interface=mrtrix.MedianFilter3D(), name='median3D') MRconvert = pe.Node(interface=mrtrix.MRConvert(), name='MRconvert') MRconvert.inputs.extract_at_axis = 3 MRconvert.inputs.extract_at_coordinate = [0] csdeconv = pe.Node(interface=mrtrix.ConstrainedSphericalDeconvolution(), name='csdeconv') gen_WM_mask = pe.Node(interface=mrtrix.GenerateWhiteMatterMask(), name='gen_WM_mask') estimateresponse = pe.Node(interface=mrtrix.EstimateResponseForSH(), name='estimateresponse') if tractography_type == 'probabilistic': CSDstreamtrack = pe.Node(interface=mrtrix.ProbabilisticSphericallyDeconvolutedStreamlineTrack(), name='CSDstreamtrack') else: CSDstreamtrack = pe.Node(interface=mrtrix.SphericallyDeconvolutedStreamlineTrack(), name='CSDstreamtrack') CSDstreamtrack.inputs.desired_number_of_tracks = 15000 tracks2prob = pe.Node(interface=mrtrix.Tracks2Prob(), name='tracks2prob') tracks2prob.inputs.colour = True tck2trk = pe.Node(interface=mrtrix.MRTrix2TrackVis(), name='tck2trk') workflow = pe.Workflow(name=name) workflow.base_output_dir = name workflow.connect([(inputnode, fsl2mrtrix, [("bvecs", "bvec_file"), ("bvals", "bval_file")])]) workflow.connect([(inputnode, dwi2tensor, [("dwi", "in_file")])]) workflow.connect([(fsl2mrtrix, dwi2tensor, [("encoding_file", "encoding_file")])]) workflow.connect([(dwi2tensor, tensor2vector, [['tensor', 'in_file']]), (dwi2tensor, tensor2adc, [['tensor', 'in_file']]), (dwi2tensor, tensor2fa, [['tensor', 'in_file']]), ]) workflow.connect([(inputnode, MRconvert, [("dwi", "in_file")])]) workflow.connect([(MRconvert, threshold_b0, [("converted", "in_file")])]) workflow.connect([(threshold_b0, median3d, [("out_file", "in_file")])]) workflow.connect([(median3d, erode_mask_firstpass, [("out_file", "in_file")])]) workflow.connect([(erode_mask_firstpass, erode_mask_secondpass, [("out_file", "in_file")])]) workflow.connect([(tensor2fa, MRmult_merge, [("FA", "in1")])]) workflow.connect([(erode_mask_secondpass, MRmult_merge, [("out_file", "in2")])]) workflow.connect([(MRmult_merge, MRmultiply, [("out", "in_files")])]) workflow.connect([(MRmultiply, threshold_FA, [("out_file", "in_file")])]) workflow.connect([(threshold_FA, estimateresponse, [("out_file", "mask_image")])]) workflow.connect([(inputnode, bet, [("dwi", "in_file")])]) workflow.connect([(inputnode, gen_WM_mask, [("dwi", "in_file")])]) workflow.connect([(bet, gen_WM_mask, [("mask_file", "binary_mask")])]) workflow.connect([(fsl2mrtrix, gen_WM_mask, [("encoding_file", "encoding_file")])]) workflow.connect([(inputnode, estimateresponse, [("dwi", "in_file")])]) workflow.connect([(fsl2mrtrix, estimateresponse, [("encoding_file", "encoding_file")])]) workflow.connect([(inputnode, csdeconv, [("dwi", "in_file")])]) workflow.connect([(gen_WM_mask, csdeconv, [("WMprobabilitymap", "mask_image")])]) workflow.connect([(estimateresponse, csdeconv, [("response", "response_file")])]) workflow.connect([(fsl2mrtrix, csdeconv, [("encoding_file", "encoding_file")])]) workflow.connect([(gen_WM_mask, threshold_wmmask, [("WMprobabilitymap", "in_file")])]) workflow.connect([(threshold_wmmask, CSDstreamtrack, [("out_file", "seed_file")])]) workflow.connect([(csdeconv, CSDstreamtrack, [("spherical_harmonics_image", "in_file")])]) if tractography_type == 'probabilistic': workflow.connect([(CSDstreamtrack, tracks2prob, [("tracked", "in_file")])]) workflow.connect([(inputnode, tracks2prob, [("dwi", "template_file")])]) workflow.connect([(CSDstreamtrack, tck2trk, [("tracked", "in_file")])]) workflow.connect([(inputnode, tck2trk, [("dwi", "image_file")])]) output_fields = ["fa", "tracts_trk", "csdeconv", "tracts_tck"] if tractography_type == 'probabilistic': output_fields.append("tdi") outputnode = pe.Node(interface=util.IdentityInterface(fields=output_fields), name="outputnode") workflow.connect([(CSDstreamtrack, outputnode, [("tracked", "tracts_tck")]), (csdeconv, outputnode, [("spherical_harmonics_image", "csdeconv")]), (tensor2fa, outputnode, [("FA", "fa")]), (tck2trk, outputnode, [("out_file", "tracts_trk")]) ]) if tractography_type == 'probabilistic': workflow.connect([(tracks2prob, outputnode, [("tract_image", "tdi")])]) return workflow
smart-developerr/my-first-blog
refs/heads/master
blog/admin.py
36
from django.contrib import admin from .models import Post admin.site.register(Post)
nonrational/qt-everywhere-opensource-src-4.8.6
refs/heads/master
src/3rdparty/webkit/Source/ThirdParty/gyp/PRESUBMIT.py
12
# Copyright 2010, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. EXCLUDED_PATHS = () def CheckChangeOnUpload(input_api, output_api): report = [] black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDED_PATHS sources = lambda x: input_api.FilterSourceFile(x, black_list=black_list) report.extend(input_api.canned_checks.CheckChangeSvnEolStyle( input_api, output_api, sources)) return report def CheckChangeOnCommit(input_api, output_api): report = [] black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDED_PATHS sources = lambda x: input_api.FilterSourceFile(x, black_list=black_list) report.extend(input_api.canned_checks.CheckChangeSvnEolStyle( input_api, output_api, sources)) report.extend(input_api.canned_checks.CheckTreeIsOpen( input_api, output_api, 'http://gyp-status.appspot.com/status', 'http://gyp-status.appspot.com/current')) return report
sephii/django
refs/heads/master
tests/inspectdb/models.py
17
# -*- encoding: utf-8 -*- from __future__ import unicode_literals from django.db import models class People(models.Model): name = models.CharField(max_length=255) parent = models.ForeignKey('self') class Message(models.Model): from_field = models.ForeignKey(People, db_column='from_id') class PeopleData(models.Model): people_pk = models.ForeignKey(People, primary_key=True) ssn = models.CharField(max_length=11) class PeopleMoreData(models.Model): people_unique = models.ForeignKey(People, unique=True) license = models.CharField(max_length=255) class DigitsInColumnName(models.Model): all_digits = models.CharField(max_length=11, db_column='123') leading_digit = models.CharField(max_length=11, db_column='4extra') leading_digits = models.CharField(max_length=11, db_column='45extra') class SpecialName(models.Model): field = models.IntegerField(db_column='field') # Underscores field_field_0 = models.IntegerField(db_column='Field_') field_field_1 = models.IntegerField(db_column='Field__') field_field_2 = models.IntegerField(db_column='__field') # Other chars prc_x = models.IntegerField(db_column='prc(%) x') non_ascii = models.IntegerField(db_column='tamaño') class Meta: db_table = "inspectdb_special.table name" class ColumnTypes(models.Model): id = models.AutoField(primary_key=True) big_int_field = models.BigIntegerField() bool_field = models.BooleanField(default=False) null_bool_field = models.NullBooleanField() char_field = models.CharField(max_length=10) null_char_field = models.CharField(max_length=10, blank=True, null=True) comma_separated_int_field = models.CommaSeparatedIntegerField(max_length=99) date_field = models.DateField() date_time_field = models.DateTimeField() decimal_field = models.DecimalField(max_digits=6, decimal_places=1) email_field = models.EmailField() file_field = models.FileField(upload_to="unused") file_path_field = models.FilePathField() float_field = models.FloatField() int_field = models.IntegerField() ip_address_field = models.IPAddressField() gen_ip_adress_field = models.GenericIPAddressField(protocol="ipv4") pos_int_field = models.PositiveIntegerField() pos_small_int_field = models.PositiveSmallIntegerField() slug_field = models.SlugField() small_int_field = models.SmallIntegerField() text_field = models.TextField() time_field = models.TimeField() url_field = models.URLField() class UniqueTogether(models.Model): field1 = models.IntegerField() field2 = models.CharField(max_length=10) class Meta: unique_together = ('field1', 'field2')
sander76/home-assistant
refs/heads/dev
tests/components/mqtt_json/test_device_tracker.py
8
"""The tests for the JSON MQTT device tracker platform.""" import json import logging import os from unittest.mock import patch import pytest from homeassistant.components.device_tracker.legacy import ( DOMAIN as DT_DOMAIN, YAML_DEVICES, ) from homeassistant.const import CONF_PLATFORM from homeassistant.setup import async_setup_component from tests.common import async_fire_mqtt_message LOCATION_MESSAGE = { "longitude": 1.0, "gps_accuracy": 60, "latitude": 2.0, "battery_level": 99.9, } LOCATION_MESSAGE_INCOMPLETE = {"longitude": 2.0} @pytest.fixture(autouse=True) def setup_comp(hass, mqtt_mock): """Initialize components.""" yaml_devices = hass.config.path(YAML_DEVICES) yield if os.path.isfile(yaml_devices): os.remove(yaml_devices) async def test_ensure_device_tracker_platform_validation(hass): """Test if platform validation was done.""" async def mock_setup_scanner(hass, config, see, discovery_info=None): """Check that Qos was added by validation.""" assert "qos" in config with patch( "homeassistant.components.mqtt_json.device_tracker.async_setup_scanner", autospec=True, side_effect=mock_setup_scanner, ) as mock_sp: dev_id = "paulus" topic = "location/paulus" assert await async_setup_component( hass, DT_DOMAIN, {DT_DOMAIN: {CONF_PLATFORM: "mqtt_json", "devices": {dev_id: topic}}}, ) assert mock_sp.call_count == 1 async def test_json_message(hass): """Test json location message.""" dev_id = "zanzito" topic = "location/zanzito" location = json.dumps(LOCATION_MESSAGE) assert await async_setup_component( hass, DT_DOMAIN, {DT_DOMAIN: {CONF_PLATFORM: "mqtt_json", "devices": {dev_id: topic}}}, ) async_fire_mqtt_message(hass, topic, location) await hass.async_block_till_done() state = hass.states.get("device_tracker.zanzito") assert state.attributes.get("latitude") == 2.0 assert state.attributes.get("longitude") == 1.0 async def test_non_json_message(hass, caplog): """Test receiving a non JSON message.""" dev_id = "zanzito" topic = "location/zanzito" location = "home" assert await async_setup_component( hass, DT_DOMAIN, {DT_DOMAIN: {CONF_PLATFORM: "mqtt_json", "devices": {dev_id: topic}}}, ) caplog.set_level(logging.ERROR) caplog.clear() async_fire_mqtt_message(hass, topic, location) await hass.async_block_till_done() assert "Error parsing JSON payload: home" in caplog.text async def test_incomplete_message(hass, caplog): """Test receiving an incomplete message.""" dev_id = "zanzito" topic = "location/zanzito" location = json.dumps(LOCATION_MESSAGE_INCOMPLETE) assert await async_setup_component( hass, DT_DOMAIN, {DT_DOMAIN: {CONF_PLATFORM: "mqtt_json", "devices": {dev_id: topic}}}, ) caplog.set_level(logging.ERROR) caplog.clear() async_fire_mqtt_message(hass, topic, location) await hass.async_block_till_done() assert ( "Skipping update for following data because of missing " 'or malformatted data: {"longitude": 2.0}' in caplog.text ) async def test_single_level_wildcard_topic(hass): """Test single level wildcard topic.""" dev_id = "zanzito" subscription = "location/+/zanzito" topic = "location/room/zanzito" location = json.dumps(LOCATION_MESSAGE) assert await async_setup_component( hass, DT_DOMAIN, {DT_DOMAIN: {CONF_PLATFORM: "mqtt_json", "devices": {dev_id: subscription}}}, ) async_fire_mqtt_message(hass, topic, location) await hass.async_block_till_done() state = hass.states.get("device_tracker.zanzito") assert state.attributes.get("latitude") == 2.0 assert state.attributes.get("longitude") == 1.0 async def test_multi_level_wildcard_topic(hass): """Test multi level wildcard topic.""" dev_id = "zanzito" subscription = "location/#" topic = "location/zanzito" location = json.dumps(LOCATION_MESSAGE) assert await async_setup_component( hass, DT_DOMAIN, {DT_DOMAIN: {CONF_PLATFORM: "mqtt_json", "devices": {dev_id: subscription}}}, ) async_fire_mqtt_message(hass, topic, location) await hass.async_block_till_done() state = hass.states.get("device_tracker.zanzito") assert state.attributes.get("latitude") == 2.0 assert state.attributes.get("longitude") == 1.0 async def test_single_level_wildcard_topic_not_matching(hass): """Test not matching single level wildcard topic.""" dev_id = "zanzito" entity_id = f"{DT_DOMAIN}.{dev_id}" subscription = "location/+/zanzito" topic = "location/zanzito" location = json.dumps(LOCATION_MESSAGE) assert await async_setup_component( hass, DT_DOMAIN, {DT_DOMAIN: {CONF_PLATFORM: "mqtt_json", "devices": {dev_id: subscription}}}, ) async_fire_mqtt_message(hass, topic, location) await hass.async_block_till_done() assert hass.states.get(entity_id) is None async def test_multi_level_wildcard_topic_not_matching(hass): """Test not matching multi level wildcard topic.""" dev_id = "zanzito" entity_id = f"{DT_DOMAIN}.{dev_id}" subscription = "location/#" topic = "somewhere/zanzito" location = json.dumps(LOCATION_MESSAGE) assert await async_setup_component( hass, DT_DOMAIN, {DT_DOMAIN: {CONF_PLATFORM: "mqtt_json", "devices": {dev_id: subscription}}}, ) async_fire_mqtt_message(hass, topic, location) await hass.async_block_till_done() assert hass.states.get(entity_id) is None
RDAP1337/jive-turkey
refs/heads/master
app/models/jobModel.py
1
from app import db class Job(db.Model): __tablename__ = 'jobs' id = db.Column(db.Integer, primary_key=True) job_id = db.Column(db.String(64), unique=True) job_created = db.Column(db.DateTime) job_modified = db.Column(db.DateTime) job_sales_rep_id = db.Column(db.String(64), unique=True) job_customer_id = db.Column(db.String(64), unique=True) def __init__( self, job_id, job_created, job_modified, job_sales_rep_id, job_customer_id ): self.job_id = job_id self.job_created = job_created self.job_modified = job_modified self.job_sales_rep_id = job_sales_rep_id self.job_customer_id = job_customer_id
thejens/luigi
refs/heads/master
test/subtask_test.py
36
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc from helpers import unittest import luigi class AbstractTask(luigi.Task): k = luigi.IntParameter() @abc.abstractproperty def foo(self): raise NotImplementedError @abc.abstractmethod def helper_function(self): raise NotImplementedError def run(self): return ",".join([self.foo, self.helper_function()]) class Implementation(AbstractTask): @property def foo(self): return "bar" def helper_function(self): return "hello" * self.k class AbstractSubclassTest(unittest.TestCase): def test_instantiate_abstract(self): def try_instantiate(): AbstractTask(k=1) self.assertRaises(TypeError, try_instantiate) def test_instantiate(self): self.assertEqual("bar,hellohello", Implementation(k=2).run()) if __name__ == '__main__': luigi.run()
EmadMokhtar/Django
refs/heads/master
tests/sites_tests/tests.py
26
from django.apps import apps from django.apps.registry import Apps from django.conf import settings from django.contrib.sites import models from django.contrib.sites.management import create_default_site from django.contrib.sites.middleware import CurrentSiteMiddleware from django.contrib.sites.models import Site, clear_site_cache from django.contrib.sites.requests import RequestSite from django.contrib.sites.shortcuts import get_current_site from django.core.exceptions import ObjectDoesNotExist, ValidationError from django.db.models.signals import post_migrate from django.http import HttpRequest, HttpResponse from django.test import ( SimpleTestCase, TestCase, modify_settings, override_settings, ) from django.test.utils import captured_stdout @modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'}) class SitesFrameworkTests(TestCase): databases = {'default', 'other'} @classmethod def setUpTestData(cls): cls.site = Site(id=settings.SITE_ID, domain='example.com', name='example.com') cls.site.save() def tearDown(self): Site.objects.clear_cache() def test_site_manager(self): # Make sure that get_current() does not return a deleted Site object. s = Site.objects.get_current() self.assertIsInstance(s, Site) s.delete() with self.assertRaises(ObjectDoesNotExist): Site.objects.get_current() def test_site_cache(self): # After updating a Site object (e.g. via the admin), we shouldn't return a # bogus value from the SITE_CACHE. site = Site.objects.get_current() self.assertEqual("example.com", site.name) s2 = Site.objects.get(id=settings.SITE_ID) s2.name = "Example site" s2.save() site = Site.objects.get_current() self.assertEqual("Example site", site.name) def test_delete_all_sites_clears_cache(self): # When all site objects are deleted the cache should also # be cleared and get_current() should raise a DoesNotExist. self.assertIsInstance(Site.objects.get_current(), Site) Site.objects.all().delete() with self.assertRaises(Site.DoesNotExist): Site.objects.get_current() @override_settings(ALLOWED_HOSTS=['example.com']) def test_get_current_site(self): # The correct Site object is returned request = HttpRequest() request.META = { "SERVER_NAME": "example.com", "SERVER_PORT": "80", } site = get_current_site(request) self.assertIsInstance(site, Site) self.assertEqual(site.id, settings.SITE_ID) # An exception is raised if the sites framework is installed # but there is no matching Site site.delete() with self.assertRaises(ObjectDoesNotExist): get_current_site(request) # A RequestSite is returned if the sites framework is not installed with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}): site = get_current_site(request) self.assertIsInstance(site, RequestSite) self.assertEqual(site.name, "example.com") @override_settings(SITE_ID='', ALLOWED_HOSTS=['example.com']) def test_get_current_site_no_site_id(self): request = HttpRequest() request.META = { "SERVER_NAME": "example.com", "SERVER_PORT": "80", } del settings.SITE_ID site = get_current_site(request) self.assertEqual(site.name, "example.com") @override_settings(SITE_ID='', ALLOWED_HOSTS=['example.com']) def test_get_current_site_host_with_trailing_dot(self): """ The site is matched if the name in the request has a trailing dot. """ request = HttpRequest() request.META = { 'SERVER_NAME': 'example.com.', 'SERVER_PORT': '80', } site = get_current_site(request) self.assertEqual(site.name, 'example.com') @override_settings(SITE_ID='', ALLOWED_HOSTS=['example.com', 'example.net']) def test_get_current_site_no_site_id_and_handle_port_fallback(self): request = HttpRequest() s1 = self.site s2 = Site.objects.create(domain='example.com:80', name='example.com:80') # Host header without port request.META = {'HTTP_HOST': 'example.com'} site = get_current_site(request) self.assertEqual(site, s1) # Host header with port - match, no fallback without port request.META = {'HTTP_HOST': 'example.com:80'} site = get_current_site(request) self.assertEqual(site, s2) # Host header with port - no match, fallback without port request.META = {'HTTP_HOST': 'example.com:81'} site = get_current_site(request) self.assertEqual(site, s1) # Host header with non-matching domain request.META = {'HTTP_HOST': 'example.net'} with self.assertRaises(ObjectDoesNotExist): get_current_site(request) # Ensure domain for RequestSite always matches host header with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}): request.META = {'HTTP_HOST': 'example.com'} site = get_current_site(request) self.assertEqual(site.name, 'example.com') request.META = {'HTTP_HOST': 'example.com:80'} site = get_current_site(request) self.assertEqual(site.name, 'example.com:80') def test_domain_name_with_whitespaces(self): # Regression for #17320 # Domain names are not allowed contain whitespace characters site = Site(name="test name", domain="test test") with self.assertRaises(ValidationError): site.full_clean() site.domain = "test\ttest" with self.assertRaises(ValidationError): site.full_clean() site.domain = "test\ntest" with self.assertRaises(ValidationError): site.full_clean() @override_settings(ALLOWED_HOSTS=['example.com']) def test_clear_site_cache(self): request = HttpRequest() request.META = { "SERVER_NAME": "example.com", "SERVER_PORT": "80", } self.assertEqual(models.SITE_CACHE, {}) get_current_site(request) expected_cache = {self.site.id: self.site} self.assertEqual(models.SITE_CACHE, expected_cache) with self.settings(SITE_ID=''): get_current_site(request) expected_cache.update({self.site.domain: self.site}) self.assertEqual(models.SITE_CACHE, expected_cache) clear_site_cache(Site, instance=self.site, using='default') self.assertEqual(models.SITE_CACHE, {}) @override_settings(SITE_ID='', ALLOWED_HOSTS=['example2.com']) def test_clear_site_cache_domain(self): site = Site.objects.create(name='example2.com', domain='example2.com') request = HttpRequest() request.META = { "SERVER_NAME": "example2.com", "SERVER_PORT": "80", } get_current_site(request) # prime the models.SITE_CACHE expected_cache = {site.domain: site} self.assertEqual(models.SITE_CACHE, expected_cache) # Site exists in 'default' database so using='other' shouldn't clear. clear_site_cache(Site, instance=site, using='other') self.assertEqual(models.SITE_CACHE, expected_cache) # using='default' should clear. clear_site_cache(Site, instance=site, using='default') self.assertEqual(models.SITE_CACHE, {}) def test_unique_domain(self): site = Site(domain=self.site.domain) msg = 'Site with this Domain name already exists.' with self.assertRaisesMessage(ValidationError, msg): site.validate_unique() def test_site_natural_key(self): self.assertEqual(Site.objects.get_by_natural_key(self.site.domain), self.site) self.assertEqual(self.site.natural_key(), (self.site.domain,)) @override_settings(ALLOWED_HOSTS=['example.com']) class RequestSiteTests(SimpleTestCase): def setUp(self): request = HttpRequest() request.META = {'HTTP_HOST': 'example.com'} self.site = RequestSite(request) def test_init_attributes(self): self.assertEqual(self.site.domain, 'example.com') self.assertEqual(self.site.name, 'example.com') def test_str(self): self.assertEqual(str(self.site), 'example.com') def test_save(self): msg = 'RequestSite cannot be saved.' with self.assertRaisesMessage(NotImplementedError, msg): self.site.save() def test_delete(self): msg = 'RequestSite cannot be deleted.' with self.assertRaisesMessage(NotImplementedError, msg): self.site.delete() class JustOtherRouter: def allow_migrate(self, db, app_label, **hints): return db == 'other' @modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'}) class CreateDefaultSiteTests(TestCase): databases = {'default', 'other'} @classmethod def setUpTestData(cls): # Delete the site created as part of the default migration process. Site.objects.all().delete() def setUp(self): self.app_config = apps.get_app_config('sites') def test_basic(self): """ #15346, #15573 - create_default_site() creates an example site only if none exist. """ with captured_stdout() as stdout: create_default_site(self.app_config) self.assertEqual(Site.objects.count(), 1) self.assertIn("Creating example.com", stdout.getvalue()) with captured_stdout() as stdout: create_default_site(self.app_config) self.assertEqual(Site.objects.count(), 1) self.assertEqual("", stdout.getvalue()) @override_settings(DATABASE_ROUTERS=[JustOtherRouter()]) def test_multi_db_with_router(self): """ #16353, #16828 - The default site creation should respect db routing. """ create_default_site(self.app_config, using='default', verbosity=0) create_default_site(self.app_config, using='other', verbosity=0) self.assertFalse(Site.objects.using('default').exists()) self.assertTrue(Site.objects.using('other').exists()) def test_multi_db(self): create_default_site(self.app_config, using='default', verbosity=0) create_default_site(self.app_config, using='other', verbosity=0) self.assertTrue(Site.objects.using('default').exists()) self.assertTrue(Site.objects.using('other').exists()) def test_save_another(self): """ #17415 - Another site can be created right after the default one. On some backends the sequence needs to be reset after saving with an explicit ID. There shouldn't be a sequence collisions by saving another site. This test is only meaningful with databases that use sequences for automatic primary keys such as PostgreSQL and Oracle. """ create_default_site(self.app_config, verbosity=0) Site(domain='example2.com', name='example2.com').save() def test_signal(self): """ #23641 - Sending the ``post_migrate`` signal triggers creation of the default site. """ post_migrate.send(sender=self.app_config, app_config=self.app_config, verbosity=0) self.assertTrue(Site.objects.exists()) @override_settings(SITE_ID=35696) def test_custom_site_id(self): """ #23945 - The configured ``SITE_ID`` should be respected. """ create_default_site(self.app_config, verbosity=0) self.assertEqual(Site.objects.get().pk, 35696) @override_settings() # Restore original ``SITE_ID`` afterwards. def test_no_site_id(self): """ #24488 - The pk should default to 1 if no ``SITE_ID`` is configured. """ del settings.SITE_ID create_default_site(self.app_config, verbosity=0) self.assertEqual(Site.objects.get().pk, 1) def test_unavailable_site_model(self): """ #24075 - A Site shouldn't be created if the model isn't available. """ apps = Apps() create_default_site(self.app_config, verbosity=0, apps=apps) self.assertFalse(Site.objects.exists()) class MiddlewareTest(TestCase): def test_old_style_request(self): """The request has correct `site` attribute.""" middleware = CurrentSiteMiddleware() request = HttpRequest() middleware.process_request(request) self.assertEqual(request.site.id, settings.SITE_ID) def test_request(self): def get_response(request): return HttpResponse(str(request.site.id)) response = CurrentSiteMiddleware(get_response)(HttpRequest()) self.assertContains(response, settings.SITE_ID)
switchkiller/ProjDjanko
refs/heads/master
lib/python2.7/site-packages/pip/commands/wheel.py
239
# -*- coding: utf-8 -*- from __future__ import absolute_import import logging import os import warnings from pip.basecommand import RequirementCommand from pip.index import PackageFinder from pip.exceptions import CommandError, PreviousBuildDirError from pip.req import RequirementSet from pip.utils import import_or_raise, normalize_path from pip.utils.build import BuildDirectory from pip.utils.deprecation import RemovedInPip8Warning from pip.wheel import WheelCache, WheelBuilder from pip import cmdoptions DEFAULT_WHEEL_DIR = os.path.join(normalize_path(os.curdir), 'wheelhouse') logger = logging.getLogger(__name__) class WheelCommand(RequirementCommand): """ Build Wheel archives for your requirements and dependencies. Wheel is a built-package format, and offers the advantage of not recompiling your software during every install. For more details, see the wheel docs: http://wheel.readthedocs.org/en/latest. Requirements: setuptools>=0.8, and wheel. 'pip wheel' uses the bdist_wheel setuptools extension from the wheel package to build individual wheels. """ name = 'wheel' usage = """ %prog [options] <requirement specifier> ... %prog [options] -r <requirements file> ... %prog [options] [-e] <vcs project url> ... %prog [options] [-e] <local project path> ... %prog [options] <archive url/path> ...""" summary = 'Build wheels from your requirements.' def __init__(self, *args, **kw): super(WheelCommand, self).__init__(*args, **kw) cmd_opts = self.cmd_opts cmd_opts.add_option( '-w', '--wheel-dir', dest='wheel_dir', metavar='dir', default=DEFAULT_WHEEL_DIR, help=("Build wheels into <dir>, where the default is " "'<cwd>/wheelhouse'."), ) cmd_opts.add_option(cmdoptions.use_wheel()) cmd_opts.add_option(cmdoptions.no_use_wheel()) cmd_opts.add_option(cmdoptions.no_binary()) cmd_opts.add_option(cmdoptions.only_binary()) cmd_opts.add_option( '--build-option', dest='build_options', metavar='options', action='append', help="Extra arguments to be supplied to 'setup.py bdist_wheel'.") cmd_opts.add_option(cmdoptions.constraints()) cmd_opts.add_option(cmdoptions.editable()) cmd_opts.add_option(cmdoptions.requirements()) cmd_opts.add_option(cmdoptions.download_cache()) cmd_opts.add_option(cmdoptions.src()) cmd_opts.add_option(cmdoptions.no_deps()) cmd_opts.add_option(cmdoptions.build_dir()) cmd_opts.add_option( '--global-option', dest='global_options', action='append', metavar='options', help="Extra global options to be supplied to the setup.py " "call before the 'bdist_wheel' command.") cmd_opts.add_option( '--pre', action='store_true', default=False, help=("Include pre-release and development versions. By default, " "pip only finds stable versions."), ) cmd_opts.add_option(cmdoptions.no_clean()) index_opts = cmdoptions.make_option_group( cmdoptions.index_group, self.parser, ) self.parser.insert_option_group(0, index_opts) self.parser.insert_option_group(0, cmd_opts) def check_required_packages(self): import_or_raise( 'wheel.bdist_wheel', CommandError, "'pip wheel' requires the 'wheel' package. To fix this, run: " "pip install wheel" ) pkg_resources = import_or_raise( 'pkg_resources', CommandError, "'pip wheel' requires setuptools >= 0.8 for dist-info support." " To fix this, run: pip install --upgrade setuptools" ) if not hasattr(pkg_resources, 'DistInfoDistribution'): raise CommandError( "'pip wheel' requires setuptools >= 0.8 for dist-info " "support. To fix this, run: pip install --upgrade " "setuptools" ) def run(self, options, args): self.check_required_packages() cmdoptions.resolve_wheel_no_use_binary(options) cmdoptions.check_install_build_global(options) index_urls = [options.index_url] + options.extra_index_urls if options.no_index: logger.info('Ignoring indexes: %s', ','.join(index_urls)) index_urls = [] if options.download_cache: warnings.warn( "--download-cache has been deprecated and will be removed in " "the future. Pip now automatically uses and configures its " "cache.", RemovedInPip8Warning, ) if options.build_dir: options.build_dir = os.path.abspath(options.build_dir) with self._build_session(options) as session: finder = PackageFinder( find_links=options.find_links, format_control=options.format_control, index_urls=index_urls, allow_external=options.allow_external, allow_unverified=options.allow_unverified, allow_all_external=options.allow_all_external, allow_all_prereleases=options.pre, trusted_hosts=options.trusted_hosts, process_dependency_links=options.process_dependency_links, session=session, ) build_delete = (not (options.no_clean or options.build_dir)) wheel_cache = WheelCache(options.cache_dir, options.format_control) with BuildDirectory(options.build_dir, delete=build_delete) as build_dir: requirement_set = RequirementSet( build_dir=build_dir, src_dir=options.src_dir, download_dir=None, ignore_dependencies=options.ignore_dependencies, ignore_installed=True, isolated=options.isolated_mode, session=session, wheel_cache=wheel_cache, wheel_download_dir=options.wheel_dir ) self.populate_requirement_set( requirement_set, args, options, finder, session, self.name, wheel_cache ) if not requirement_set.has_requirements: return try: # build wheels wb = WheelBuilder( requirement_set, finder, build_options=options.build_options or [], global_options=options.global_options or [], ) if not wb.build(): raise CommandError( "Failed to build one or more wheels" ) except PreviousBuildDirError: options.no_clean = True raise finally: if not options.no_clean: requirement_set.cleanup_files()
cpausmit/Kraken
refs/heads/master
filefi/021/writeCfg.py
1
#!/usr/bin/env python """ Re-write config file and optionally convert to python """ __revision__ = "$Id: writeCfg.py,v 1.2 2011/06/19 01:46:53 paus Exp $" __version__ = "$Revision: 1.2 $" import getopt import imp import os import pickle import sys import xml.dom.minidom from random import SystemRandom from ProdCommon.CMSConfigTools.ConfigAPI.CfgInterface import CfgInterface import FWCore.ParameterSet.Types as CfgTypes MyRandom = SystemRandom() class ConfigException(Exception): """ Exceptions raised by writeCfg """ def __init__(self, msg): Exception.__init__(self, msg) self._msg = msg return def __str__(self): return self._msg def main(argv) : """ writeCfg - Read in existing, user supplied pycfg or pickled pycfg file - Modify job specific parameters based on environment variables and arguments.xml - Write out pickled pycfg file required parameters: none optional parameters: --help : help --debug : debug statements """ # defaults inputFileNames = None parentFileNames = None debug = False _MAXINT = 900000000 try: opts, args = getopt.getopt(argv, "", ["debug", "help"]) except getopt.GetoptError: print main.__doc__ sys.exit(2) try: CMSSW = os.environ['CMSSW_VERSION'] parts = CMSSW.split('_') CMSSW_major = int(parts[1]) CMSSW_minor = int(parts[2]) CMSSW_patch = int(parts[3]) except (KeyError, ValueError): msg = "Your environment doesn't specify the CMSSW version or specifies it incorrectly" raise ConfigException(msg) # Parse command line options for opt, arg in opts : if opt == "--help" : print main.__doc__ sys.exit() elif opt == "--debug" : debug = True # Parse remaining parameters try: fileName = args[0] outFileName = args[1] except IndexError: print main.__doc__ sys.exit() # Read in Environment, XML and get optional Parameters nJob = int(os.environ.get('NJob', '0')) preserveSeeds = os.environ.get('PreserveSeeds','') incrementSeeds = os.environ.get('IncrementSeeds','') # Defaults maxEvents = 0 skipEvents = 0 firstEvent = -1 compHEPFirstEvent = 0 firstRun = 0 # FUTURE: Remove firstRun firstLumi = 0 dom = xml.dom.minidom.parse(os.environ['RUNTIME_AREA']+'/arguments.xml') for elem in dom.getElementsByTagName("Job"): if nJob == int(elem.getAttribute("JobID")): if elem.getAttribute("MaxEvents"): maxEvents = int(elem.getAttribute("MaxEvents")) if elem.getAttribute("SkipEvents"): skipEvents = int(elem.getAttribute("SkipEvents")) if elem.getAttribute("FirstEvent"): firstEvent = int(elem.getAttribute("FirstEvent")) if elem.getAttribute("FirstRun"): firstRun = int(elem.getAttribute("FirstRun")) if elem.getAttribute("FirstLumi"): firstLumi = int(elem.getAttribute("FirstLumi")) generator = str(elem.getAttribute('Generator')) inputFiles = str(elem.getAttribute('InputFiles')) parentFiles = str(elem.getAttribute('ParentFiles')) lumis = str(elem.getAttribute('Lumis')) # Read Input python config file handle = open(fileName, 'r') try: # Nested form for Python < 2.5 try: print "Importing .py file" cfo = imp.load_source("pycfg", fileName, handle) cmsProcess = cfo.process except Exception, ex: msg = "Your pycfg file is not valid python: %s" % str(ex) raise ConfigException(msg) finally: handle.close() cfg = CfgInterface(cmsProcess) # Set parameters for job print "Setting parameters" inModule = cfg.inputSource if maxEvents: cfg.maxEvents.setMaxEventsInput(maxEvents) if skipEvents: inModule.setSkipEvents(skipEvents) # Set "skip events" for various generators if generator == 'comphep': cmsProcess.source.CompHEPFirstEvent = CfgTypes.int32(firstEvent) elif generator == 'lhe': cmsProcess.source.skipEvents = CfgTypes.untracked(CfgTypes.uint32(firstEvent)) cmsProcess.source.firstEvent = CfgTypes.untracked(CfgTypes.uint32(firstEvent+1)) elif firstEvent != -1: # (Old? Madgraph) cmsProcess.source.firstEvent = CfgTypes.untracked(CfgTypes.uint32(firstEvent)) if inputFiles: inputFileNames = inputFiles.split(',') inModule.setFileNames(*inputFileNames) # handle parent files if needed if parentFiles: parentFileNames = parentFiles.split(',') inModule.setSecondaryFileNames(*parentFileNames) if lumis: if CMSSW_major < 3: # FUTURE: Can remove this check print "Cannot skip lumis for CMSSW 2_x" else: lumiRanges = lumis.split(',') inModule.setLumisToProcess(*lumiRanges) # Pythia parameters if (firstRun): inModule.setFirstRun(firstRun) if (firstLumi): inModule.setFirstLumi(firstLumi) # Check if there are random #'s to deal with if cfg.data.services.has_key('RandomNumberGeneratorService'): print "RandomNumberGeneratorService found, will attempt to change seeds" from IOMC.RandomEngine.RandomServiceHelper import RandomNumberServiceHelper ranGenerator = cfg.data.services['RandomNumberGeneratorService'] randSvc = RandomNumberServiceHelper(ranGenerator) incrementSeedList = [] preserveSeedList = [] if incrementSeeds: incrementSeedList = incrementSeeds.split(',') if preserveSeeds: preserveSeedList = preserveSeeds.split(',') # Increment requested seed sets for seedName in incrementSeedList: curSeeds = randSvc.getNamedSeed(seedName) newSeeds = [x+nJob for x in curSeeds] randSvc.setNamedSeed(seedName, *newSeeds) preserveSeedList.append(seedName) # Randomize remaining seeds randSvc.populate(*preserveSeedList) # Write out new config file outFile = open(outFileName,"w") outFile.write("import FWCore.ParameterSet.Config as cms\n") outFile.write("import pickle\n") outFile.write("pickledCfg=\"\"\"%s\"\"\"\n" % pickle.dumps(cmsProcess)) outFile.write("process = pickle.loads(pickledCfg)\n") outFile.close() if (debug): print "writeCfg output (May not be exact):" print "import FWCore.ParameterSet.Config as cms" print cmsProcess.dumpPython() if __name__ == '__main__' : exit_status = main(sys.argv[1:]) sys.exit(exit_status)
maxsocl/django
refs/heads/master
django/db/backends/sqlite3/utils.py
108
from django.conf import settings from django.utils import timezone from django.utils.dateparse import parse_datetime def parse_datetime_with_timezone_support(value): dt = parse_datetime(value) # Confirm that dt is naive before overwriting its tzinfo. if dt is not None and settings.USE_TZ and timezone.is_naive(dt): dt = dt.replace(tzinfo=timezone.utc) return dt
fluxus-study/kimjmin
refs/heads/master
node_modules/meanio/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py
505
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Xcode project file generator. This module is both an Xcode project file generator and a documentation of the Xcode project file format. Knowledge of the project file format was gained based on extensive experience with Xcode, and by making changes to projects in Xcode.app and observing the resultant changes in the associated project files. XCODE PROJECT FILES The generator targets the file format as written by Xcode 3.2 (specifically, 3.2.6), but past experience has taught that the format has not changed significantly in the past several years, and future versions of Xcode are able to read older project files. Xcode project files are "bundled": the project "file" from an end-user's perspective is actually a directory with an ".xcodeproj" extension. The project file from this module's perspective is actually a file inside this directory, always named "project.pbxproj". This file contains a complete description of the project and is all that is needed to use the xcodeproj. Other files contained in the xcodeproj directory are simply used to store per-user settings, such as the state of various UI elements in the Xcode application. The project.pbxproj file is a property list, stored in a format almost identical to the NeXTstep property list format. The file is able to carry Unicode data, and is encoded in UTF-8. The root element in the property list is a dictionary that contains several properties of minimal interest, and two properties of immense interest. The most important property is a dictionary named "objects". The entire structure of the project is represented by the children of this property. The objects dictionary is keyed by unique 96-bit values represented by 24 uppercase hexadecimal characters. Each value in the objects dictionary is itself a dictionary, describing an individual object. Each object in the dictionary is a member of a class, which is identified by the "isa" property of each object. A variety of classes are represented in a project file. Objects can refer to other objects by ID, using the 24-character hexadecimal object key. A project's objects form a tree, with a root object of class PBXProject at the root. As an example, the PBXProject object serves as parent to an XCConfigurationList object defining the build configurations used in the project, a PBXGroup object serving as a container for all files referenced in the project, and a list of target objects, each of which defines a target in the project. There are several different types of target object, such as PBXNativeTarget and PBXAggregateTarget. In this module, this relationship is expressed by having each target type derive from an abstract base named XCTarget. The project.pbxproj file's root dictionary also contains a property, sibling to the "objects" dictionary, named "rootObject". The value of rootObject is a 24-character object key referring to the root PBXProject object in the objects dictionary. In Xcode, every file used as input to a target or produced as a final product of a target must appear somewhere in the hierarchy rooted at the PBXGroup object referenced by the PBXProject's mainGroup property. A PBXGroup is generally represented as a folder in the Xcode application. PBXGroups can contain other PBXGroups as well as PBXFileReferences, which are pointers to actual files. Each XCTarget contains a list of build phases, represented in this module by the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the "Compile Sources" and "Link Binary With Libraries" phases displayed in the Xcode application. Files used as input to these phases (for example, source files in the former case and libraries and frameworks in the latter) are represented by PBXBuildFile objects, referenced by elements of "files" lists in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile object as a "weak" reference: it does not "own" the PBXBuildFile, which is owned by the root object's mainGroup or a descendant group. In most cases, the layer of indirection between an XCBuildPhase and a PBXFileReference via a PBXBuildFile appears extraneous, but there's actually one reason for this: file-specific compiler flags are added to the PBXBuildFile object so as to allow a single file to be a member of multiple targets while having distinct compiler flags for each. These flags can be modified in the Xcode applciation in the "Build" tab of a File Info window. When a project is open in the Xcode application, Xcode will rewrite it. As such, this module is careful to adhere to the formatting used by Xcode, to avoid insignificant changes appearing in the file when it is used in the Xcode application. This will keep version control repositories happy, and makes it possible to compare a project file used in Xcode to one generated by this module to determine if any significant changes were made in the application. Xcode has its own way of assigning 24-character identifiers to each object, which is not duplicated here. Because the identifier only is only generated once, when an object is created, and is then left unchanged, there is no need to attempt to duplicate Xcode's behavior in this area. The generator is free to select any identifier, even at random, to refer to the objects it creates, and Xcode will retain those identifiers and use them when subsequently rewriting the project file. However, the generator would choose new random identifiers each time the project files are generated, leading to difficulties comparing "used" project files to "pristine" ones produced by this module, and causing the appearance of changes as every object identifier is changed when updated projects are checked in to a version control repository. To mitigate this problem, this module chooses identifiers in a more deterministic way, by hashing a description of each object as well as its parent and ancestor objects. This strategy should result in minimal "shift" in IDs as successive generations of project files are produced. THIS MODULE This module introduces several classes, all derived from the XCObject class. Nearly all of the "brains" are built into the XCObject class, which understands how to create and modify objects, maintain the proper tree structure, compute identifiers, and print objects. For the most part, classes derived from XCObject need only provide a _schema class object, a dictionary that expresses what properties objects of the class may contain. Given this structure, it's possible to build a minimal project file by creating objects of the appropriate types and making the proper connections: config_list = XCConfigurationList() group = PBXGroup() project = PBXProject({'buildConfigurationList': config_list, 'mainGroup': group}) With the project object set up, it can be added to an XCProjectFile object. XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject subclass that does not actually correspond to a class type found in a project file. Rather, it is used to represent the project file's root dictionary. Printing an XCProjectFile will print the entire project file, including the full "objects" dictionary. project_file = XCProjectFile({'rootObject': project}) project_file.ComputeIDs() project_file.Print() Xcode project files are always encoded in UTF-8. This module will accept strings of either the str class or the unicode class. Strings of class str are assumed to already be encoded in UTF-8. Obviously, if you're just using ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset. Strings of class unicode are handled properly and encoded in UTF-8 when a project file is output. """ import gyp.common import posixpath import re import struct import sys # hashlib is supplied as of Python 2.5 as the replacement interface for sha # and other secure hashes. In 2.6, sha is deprecated. Import hashlib if # available, avoiding a deprecation warning under 2.6. Import sha otherwise, # preserving 2.4 compatibility. try: import hashlib _new_sha1 = hashlib.sha1 except ImportError: import sha _new_sha1 = sha.new # See XCObject._EncodeString. This pattern is used to determine when a string # can be printed unquoted. Strings that match this pattern may be printed # unquoted. Strings that do not match must be quoted and may be further # transformed to be properly encoded. Note that this expression matches the # characters listed with "+", for 1 or more occurrences: if a string is empty, # it must not match this pattern, because it needs to be encoded as "". _unquoted = re.compile('^[A-Za-z0-9$./_]+$') # Strings that match this pattern are quoted regardless of what _unquoted says. # Oddly, Xcode will quote any string with a run of three or more underscores. _quoted = re.compile('___') # This pattern should match any character that needs to be escaped by # XCObject._EncodeString. See that function. _escaped = re.compile('[\\\\"]|[\x00-\x1f]') # Used by SourceTreeAndPathFromPath _path_leading_variable = re.compile('^\$\((.*?)\)(/(.*))?$') def SourceTreeAndPathFromPath(input_path): """Given input_path, returns a tuple with sourceTree and path values. Examples: input_path (source_tree, output_path) '$(VAR)/path' ('VAR', 'path') '$(VAR)' ('VAR', None) 'path' (None, 'path') """ source_group_match = _path_leading_variable.match(input_path) if source_group_match: source_tree = source_group_match.group(1) output_path = source_group_match.group(3) # This may be None. else: source_tree = None output_path = input_path return (source_tree, output_path) def ConvertVariablesToShellSyntax(input_string): return re.sub('\$\((.*?)\)', '${\\1}', input_string) class XCObject(object): """The abstract base of all class types used in Xcode project files. Class variables: _schema: A dictionary defining the properties of this class. The keys to _schema are string property keys as used in project files. Values are a list of four or five elements: [ is_list, property_type, is_strong, is_required, default ] is_list: True if the property described is a list, as opposed to a single element. property_type: The type to use as the value of the property, or if is_list is True, the type to use for each element of the value's list. property_type must be an XCObject subclass, or one of the built-in types str, int, or dict. is_strong: If property_type is an XCObject subclass, is_strong is True to assert that this class "owns," or serves as parent, to the property value (or, if is_list is True, values). is_strong must be False if property_type is not an XCObject subclass. is_required: True if the property is required for the class. Note that is_required being True does not preclude an empty string ("", in the case of property_type str) or list ([], in the case of is_list True) from being set for the property. default: Optional. If is_requried is True, default may be set to provide a default value for objects that do not supply their own value. If is_required is True and default is not provided, users of the class must supply their own value for the property. Note that although the values of the array are expressed in boolean terms, subclasses provide values as integers to conserve horizontal space. _should_print_single_line: False in XCObject. Subclasses whose objects should be written to the project file in the alternate single-line format, such as PBXFileReference and PBXBuildFile, should set this to True. _encode_transforms: Used by _EncodeString to encode unprintable characters. The index into this list is the ordinal of the character to transform; each value is a string used to represent the character in the output. XCObject provides an _encode_transforms list suitable for most XCObject subclasses. _alternate_encode_transforms: Provided for subclasses that wish to use the alternate encoding rules. Xcode seems to use these rules when printing objects in single-line format. Subclasses that desire this behavior should set _encode_transforms to _alternate_encode_transforms. _hashables: A list of XCObject subclasses that can be hashed by ComputeIDs to construct this object's ID. Most classes that need custom hashing behavior should do it by overriding Hashables, but in some cases an object's parent may wish to push a hashable value into its child, and it can do so by appending to _hashables. Attributes: id: The object's identifier, a 24-character uppercase hexadecimal string. Usually, objects being created should not set id until the entire project file structure is built. At that point, UpdateIDs() should be called on the root object to assign deterministic values for id to each object in the tree. parent: The object's parent. This is set by a parent XCObject when a child object is added to it. _properties: The object's property dictionary. An object's properties are described by its class' _schema variable. """ _schema = {} _should_print_single_line = False # See _EncodeString. _encode_transforms = [] i = 0 while i < ord(' '): _encode_transforms.append('\\U%04x' % i) i = i + 1 _encode_transforms[7] = '\\a' _encode_transforms[8] = '\\b' _encode_transforms[9] = '\\t' _encode_transforms[10] = '\\n' _encode_transforms[11] = '\\v' _encode_transforms[12] = '\\f' _encode_transforms[13] = '\\n' _alternate_encode_transforms = list(_encode_transforms) _alternate_encode_transforms[9] = chr(9) _alternate_encode_transforms[10] = chr(10) _alternate_encode_transforms[11] = chr(11) def __init__(self, properties=None, id=None, parent=None): self.id = id self.parent = parent self._properties = {} self._hashables = [] self._SetDefaultsFromSchema() self.UpdateProperties(properties) def __repr__(self): try: name = self.Name() except NotImplementedError: return '<%s at 0x%x>' % (self.__class__.__name__, id(self)) return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self)) def Copy(self): """Make a copy of this object. The new object will have its own copy of lists and dicts. Any XCObject objects owned by this object (marked "strong") will be copied in the new object, even those found in lists. If this object has any weak references to other XCObjects, the same references are added to the new object without making a copy. """ that = self.__class__(id=self.id, parent=self.parent) for key, value in self._properties.iteritems(): is_strong = self._schema[key][2] if isinstance(value, XCObject): if is_strong: new_value = value.Copy() new_value.parent = that that._properties[key] = new_value else: that._properties[key] = value elif isinstance(value, str) or isinstance(value, unicode) or \ isinstance(value, int): that._properties[key] = value elif isinstance(value, list): if is_strong: # If is_strong is True, each element is an XCObject, so it's safe to # call Copy. that._properties[key] = [] for item in value: new_item = item.Copy() new_item.parent = that that._properties[key].append(new_item) else: that._properties[key] = value[:] elif isinstance(value, dict): # dicts are never strong. if is_strong: raise TypeError, 'Strong dict for key ' + key + ' in ' + \ self.__class__.__name__ else: that._properties[key] = value.copy() else: raise TypeError, 'Unexpected type ' + value.__class__.__name__ + \ ' for key ' + key + ' in ' + self.__class__.__name__ return that def Name(self): """Return the name corresponding to an object. Not all objects necessarily need to be nameable, and not all that do have a "name" property. Override as needed. """ # If the schema indicates that "name" is required, try to access the # property even if it doesn't exist. This will result in a KeyError # being raised for the property that should be present, which seems more # appropriate than NotImplementedError in this case. if 'name' in self._properties or \ ('name' in self._schema and self._schema['name'][3]): return self._properties['name'] raise NotImplementedError, \ self.__class__.__name__ + ' must implement Name' def Comment(self): """Return a comment string for the object. Most objects just use their name as the comment, but PBXProject uses different values. The returned comment is not escaped and does not have any comment marker strings applied to it. """ return self.Name() def Hashables(self): hashables = [self.__class__.__name__] name = self.Name() if name != None: hashables.append(name) hashables.extend(self._hashables) return hashables def HashablesForChild(self): return None def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None): """Set "id" properties deterministically. An object's "id" property is set based on a hash of its class type and name, as well as the class type and name of all ancestor objects. As such, it is only advisable to call ComputeIDs once an entire project file tree is built. If recursive is True, recurse into all descendant objects and update their hashes. If overwrite is True, any existing value set in the "id" property will be replaced. """ def _HashUpdate(hash, data): """Update hash with data's length and contents. If the hash were updated only with the value of data, it would be possible for clowns to induce collisions by manipulating the names of their objects. By adding the length, it's exceedingly less likely that ID collisions will be encountered, intentionally or not. """ hash.update(struct.pack('>i', len(data))) hash.update(data) if seed_hash is None: seed_hash = _new_sha1() hash = seed_hash.copy() hashables = self.Hashables() assert len(hashables) > 0 for hashable in hashables: _HashUpdate(hash, hashable) if recursive: hashables_for_child = self.HashablesForChild() if hashables_for_child is None: child_hash = hash else: assert len(hashables_for_child) > 0 child_hash = seed_hash.copy() for hashable in hashables_for_child: _HashUpdate(child_hash, hashable) for child in self.Children(): child.ComputeIDs(recursive, overwrite, child_hash) if overwrite or self.id is None: # Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is # is 160 bits. Instead of throwing out 64 bits of the digest, xor them # into the portion that gets used. assert hash.digest_size % 4 == 0 digest_int_count = hash.digest_size / 4 digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest()) id_ints = [0, 0, 0] for index in xrange(0, digest_int_count): id_ints[index % 3] ^= digest_ints[index] self.id = '%08X%08X%08X' % tuple(id_ints) def EnsureNoIDCollisions(self): """Verifies that no two objects have the same ID. Checks all descendants. """ ids = {} descendants = self.Descendants() for descendant in descendants: if descendant.id in ids: other = ids[descendant.id] raise KeyError, \ 'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \ (descendant.id, str(descendant._properties), str(other._properties), self._properties['rootObject'].Name()) ids[descendant.id] = descendant def Children(self): """Returns a list of all of this object's owned (strong) children.""" children = [] for property, attributes in self._schema.iteritems(): (is_list, property_type, is_strong) = attributes[0:3] if is_strong and property in self._properties: if not is_list: children.append(self._properties[property]) else: children.extend(self._properties[property]) return children def Descendants(self): """Returns a list of all of this object's descendants, including this object. """ children = self.Children() descendants = [self] for child in children: descendants.extend(child.Descendants()) return descendants def PBXProjectAncestor(self): # The base case for recursion is defined at PBXProject.PBXProjectAncestor. if self.parent: return self.parent.PBXProjectAncestor() return None def _EncodeComment(self, comment): """Encodes a comment to be placed in the project file output, mimicing Xcode behavior. """ # This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If # the string already contains a "*/", it is turned into "(*)/". This keeps # the file writer from outputting something that would be treated as the # end of a comment in the middle of something intended to be entirely a # comment. return '/* ' + comment.replace('*/', '(*)/') + ' */' def _EncodeTransform(self, match): # This function works closely with _EncodeString. It will only be called # by re.sub with match.group(0) containing a character matched by the # the _escaped expression. char = match.group(0) # Backslashes (\) and quotation marks (") are always replaced with a # backslash-escaped version of the same. Everything else gets its # replacement from the class' _encode_transforms array. if char == '\\': return '\\\\' if char == '"': return '\\"' return self._encode_transforms[ord(char)] def _EncodeString(self, value): """Encodes a string to be placed in the project file output, mimicing Xcode behavior. """ # Use quotation marks when any character outside of the range A-Z, a-z, 0-9, # $ (dollar sign), . (period), and _ (underscore) is present. Also use # quotation marks to represent empty strings. # # Escape " (double-quote) and \ (backslash) by preceding them with a # backslash. # # Some characters below the printable ASCII range are encoded specially: # 7 ^G BEL is encoded as "\a" # 8 ^H BS is encoded as "\b" # 11 ^K VT is encoded as "\v" # 12 ^L NP is encoded as "\f" # 127 ^? DEL is passed through as-is without escaping # - In PBXFileReference and PBXBuildFile objects: # 9 ^I HT is passed through as-is without escaping # 10 ^J NL is passed through as-is without escaping # 13 ^M CR is passed through as-is without escaping # - In other objects: # 9 ^I HT is encoded as "\t" # 10 ^J NL is encoded as "\n" # 13 ^M CR is encoded as "\n" rendering it indistinguishable from # 10 ^J NL # All other characters within the ASCII control character range (0 through # 31 inclusive) are encoded as "\U001f" referring to the Unicode code point # in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e". # Characters above the ASCII range are passed through to the output encoded # as UTF-8 without any escaping. These mappings are contained in the # class' _encode_transforms list. if _unquoted.search(value) and not _quoted.search(value): return value return '"' + _escaped.sub(self._EncodeTransform, value) + '"' def _XCPrint(self, file, tabs, line): file.write('\t' * tabs + line) def _XCPrintableValue(self, tabs, value, flatten_list=False): """Returns a representation of value that may be printed in a project file, mimicing Xcode's behavior. _XCPrintableValue can handle str and int values, XCObjects (which are made printable by returning their id property), and list and dict objects composed of any of the above types. When printing a list or dict, and _should_print_single_line is False, the tabs parameter is used to determine how much to indent the lines corresponding to the items in the list or dict. If flatten_list is True, single-element lists will be transformed into strings. """ printable = '' comment = None if self._should_print_single_line: sep = ' ' element_tabs = '' end_tabs = '' else: sep = '\n' element_tabs = '\t' * (tabs + 1) end_tabs = '\t' * tabs if isinstance(value, XCObject): printable += value.id comment = value.Comment() elif isinstance(value, str): printable += self._EncodeString(value) elif isinstance(value, unicode): printable += self._EncodeString(value.encode('utf-8')) elif isinstance(value, int): printable += str(value) elif isinstance(value, list): if flatten_list and len(value) <= 1: if len(value) == 0: printable += self._EncodeString('') else: printable += self._EncodeString(value[0]) else: printable = '(' + sep for item in value: printable += element_tabs + \ self._XCPrintableValue(tabs + 1, item, flatten_list) + \ ',' + sep printable += end_tabs + ')' elif isinstance(value, dict): printable = '{' + sep for item_key, item_value in sorted(value.iteritems()): printable += element_tabs + \ self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \ self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \ sep printable += end_tabs + '}' else: raise TypeError, "Can't make " + value.__class__.__name__ + ' printable' if comment != None: printable += ' ' + self._EncodeComment(comment) return printable def _XCKVPrint(self, file, tabs, key, value): """Prints a key and value, members of an XCObject's _properties dictionary, to file. tabs is an int identifying the indentation level. If the class' _should_print_single_line variable is True, tabs is ignored and the key-value pair will be followed by a space insead of a newline. """ if self._should_print_single_line: printable = '' after_kv = ' ' else: printable = '\t' * tabs after_kv = '\n' # Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy # objects without comments. Sometimes it prints them with comments, but # the majority of the time, it doesn't. To avoid unnecessary changes to # the project file after Xcode opens it, don't write comments for # remoteGlobalIDString. This is a sucky hack and it would certainly be # cleaner to extend the schema to indicate whether or not a comment should # be printed, but since this is the only case where the problem occurs and # Xcode itself can't seem to make up its mind, the hack will suffice. # # Also see PBXContainerItemProxy._schema['remoteGlobalIDString']. if key == 'remoteGlobalIDString' and isinstance(self, PBXContainerItemProxy): value_to_print = value.id else: value_to_print = value # PBXBuildFile's settings property is represented in the output as a dict, # but a hack here has it represented as a string. Arrange to strip off the # quotes so that it shows up in the output as expected. if key == 'settings' and isinstance(self, PBXBuildFile): strip_value_quotes = True else: strip_value_quotes = False # In another one-off, let's set flatten_list on buildSettings properties # of XCBuildConfiguration objects, because that's how Xcode treats them. if key == 'buildSettings' and isinstance(self, XCBuildConfiguration): flatten_list = True else: flatten_list = False try: printable_key = self._XCPrintableValue(tabs, key, flatten_list) printable_value = self._XCPrintableValue(tabs, value_to_print, flatten_list) if strip_value_quotes and len(printable_value) > 1 and \ printable_value[0] == '"' and printable_value[-1] == '"': printable_value = printable_value[1:-1] printable += printable_key + ' = ' + printable_value + ';' + after_kv except TypeError, e: gyp.common.ExceptionAppend(e, 'while printing key "%s"' % key) raise self._XCPrint(file, 0, printable) def Print(self, file=sys.stdout): """Prints a reprentation of this object to file, adhering to Xcode output formatting. """ self.VerifyHasRequiredProperties() if self._should_print_single_line: # When printing an object in a single line, Xcode doesn't put any space # between the beginning of a dictionary (or presumably a list) and the # first contained item, so you wind up with snippets like # ...CDEF = {isa = PBXFileReference; fileRef = 0123... # If it were me, I would have put a space in there after the opening # curly, but I guess this is just another one of those inconsistencies # between how Xcode prints PBXFileReference and PBXBuildFile objects as # compared to other objects. Mimic Xcode's behavior here by using an # empty string for sep. sep = '' end_tabs = 0 else: sep = '\n' end_tabs = 2 # Start the object. For example, '\t\tPBXProject = {\n'. self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep) # "isa" isn't in the _properties dictionary, it's an intrinsic property # of the class which the object belongs to. Xcode always outputs "isa" # as the first element of an object dictionary. self._XCKVPrint(file, 3, 'isa', self.__class__.__name__) # The remaining elements of an object dictionary are sorted alphabetically. for property, value in sorted(self._properties.iteritems()): self._XCKVPrint(file, 3, property, value) # End the object. self._XCPrint(file, end_tabs, '};\n') def UpdateProperties(self, properties, do_copy=False): """Merge the supplied properties into the _properties dictionary. The input properties must adhere to the class schema or a KeyError or TypeError exception will be raised. If adding an object of an XCObject subclass and the schema indicates a strong relationship, the object's parent will be set to this object. If do_copy is True, then lists, dicts, strong-owned XCObjects, and strong-owned XCObjects in lists will be copied instead of having their references added. """ if properties is None: return for property, value in properties.iteritems(): # Make sure the property is in the schema. if not property in self._schema: raise KeyError, property + ' not in ' + self.__class__.__name__ # Make sure the property conforms to the schema. (is_list, property_type, is_strong) = self._schema[property][0:3] if is_list: if value.__class__ != list: raise TypeError, \ property + ' of ' + self.__class__.__name__ + \ ' must be list, not ' + value.__class__.__name__ for item in value: if not isinstance(item, property_type) and \ not (item.__class__ == unicode and property_type == str): # Accept unicode where str is specified. str is treated as # UTF-8-encoded. raise TypeError, \ 'item of ' + property + ' of ' + self.__class__.__name__ + \ ' must be ' + property_type.__name__ + ', not ' + \ item.__class__.__name__ elif not isinstance(value, property_type) and \ not (value.__class__ == unicode and property_type == str): # Accept unicode where str is specified. str is treated as # UTF-8-encoded. raise TypeError, \ property + ' of ' + self.__class__.__name__ + ' must be ' + \ property_type.__name__ + ', not ' + value.__class__.__name__ # Checks passed, perform the assignment. if do_copy: if isinstance(value, XCObject): if is_strong: self._properties[property] = value.Copy() else: self._properties[property] = value elif isinstance(value, str) or isinstance(value, unicode) or \ isinstance(value, int): self._properties[property] = value elif isinstance(value, list): if is_strong: # If is_strong is True, each element is an XCObject, so it's safe # to call Copy. self._properties[property] = [] for item in value: self._properties[property].append(item.Copy()) else: self._properties[property] = value[:] elif isinstance(value, dict): self._properties[property] = value.copy() else: raise TypeError, "Don't know how to copy a " + \ value.__class__.__name__ + ' object for ' + \ property + ' in ' + self.__class__.__name__ else: self._properties[property] = value # Set up the child's back-reference to this object. Don't use |value| # any more because it may not be right if do_copy is true. if is_strong: if not is_list: self._properties[property].parent = self else: for item in self._properties[property]: item.parent = self def HasProperty(self, key): return key in self._properties def GetProperty(self, key): return self._properties[key] def SetProperty(self, key, value): self.UpdateProperties({key: value}) def DelProperty(self, key): if key in self._properties: del self._properties[key] def AppendProperty(self, key, value): # TODO(mark): Support ExtendProperty too (and make this call that)? # Schema validation. if not key in self._schema: raise KeyError, key + ' not in ' + self.__class__.__name__ (is_list, property_type, is_strong) = self._schema[key][0:3] if not is_list: raise TypeError, key + ' of ' + self.__class__.__name__ + ' must be list' if not isinstance(value, property_type): raise TypeError, 'item of ' + key + ' of ' + self.__class__.__name__ + \ ' must be ' + property_type.__name__ + ', not ' + \ value.__class__.__name__ # If the property doesn't exist yet, create a new empty list to receive the # item. if not key in self._properties: self._properties[key] = [] # Set up the ownership link. if is_strong: value.parent = self # Store the item. self._properties[key].append(value) def VerifyHasRequiredProperties(self): """Ensure that all properties identified as required by the schema are set. """ # TODO(mark): A stronger verification mechanism is needed. Some # subclasses need to perform validation beyond what the schema can enforce. for property, attributes in self._schema.iteritems(): (is_list, property_type, is_strong, is_required) = attributes[0:4] if is_required and not property in self._properties: raise KeyError, self.__class__.__name__ + ' requires ' + property def _SetDefaultsFromSchema(self): """Assign object default values according to the schema. This will not overwrite properties that have already been set.""" defaults = {} for property, attributes in self._schema.iteritems(): (is_list, property_type, is_strong, is_required) = attributes[0:4] if is_required and len(attributes) >= 5 and \ not property in self._properties: default = attributes[4] defaults[property] = default if len(defaults) > 0: # Use do_copy=True so that each new object gets its own copy of strong # objects, lists, and dicts. self.UpdateProperties(defaults, do_copy=True) class XCHierarchicalElement(XCObject): """Abstract base for PBXGroup and PBXFileReference. Not represented in a project file.""" # TODO(mark): Do name and path belong here? Probably so. # If path is set and name is not, name may have a default value. Name will # be set to the basename of path, if the basename of path is different from # the full value of path. If path is already just a leaf name, name will # not be set. _schema = XCObject._schema.copy() _schema.update({ 'comments': [0, str, 0, 0], 'fileEncoding': [0, str, 0, 0], 'includeInIndex': [0, int, 0, 0], 'indentWidth': [0, int, 0, 0], 'lineEnding': [0, int, 0, 0], 'sourceTree': [0, str, 0, 1, '<group>'], 'tabWidth': [0, int, 0, 0], 'usesTabs': [0, int, 0, 0], 'wrapsLines': [0, int, 0, 0], }) def __init__(self, properties=None, id=None, parent=None): # super XCObject.__init__(self, properties, id, parent) if 'path' in self._properties and not 'name' in self._properties: path = self._properties['path'] name = posixpath.basename(path) if name != '' and path != name: self.SetProperty('name', name) if 'path' in self._properties and \ (not 'sourceTree' in self._properties or \ self._properties['sourceTree'] == '<group>'): # If the pathname begins with an Xcode variable like "$(SDKROOT)/", take # the variable out and make the path be relative to that variable by # assigning the variable name as the sourceTree. (source_tree, path) = SourceTreeAndPathFromPath(self._properties['path']) if source_tree != None: self._properties['sourceTree'] = source_tree if path != None: self._properties['path'] = path if source_tree != None and path is None and \ not 'name' in self._properties: # The path was of the form "$(SDKROOT)" with no path following it. # This object is now relative to that variable, so it has no path # attribute of its own. It does, however, keep a name. del self._properties['path'] self._properties['name'] = source_tree def Name(self): if 'name' in self._properties: return self._properties['name'] elif 'path' in self._properties: return self._properties['path'] else: # This happens in the case of the root PBXGroup. return None def Hashables(self): """Custom hashables for XCHierarchicalElements. XCHierarchicalElements are special. Generally, their hashes shouldn't change if the paths don't change. The normal XCObject implementation of Hashables adds a hashable for each object, which means that if the hierarchical structure changes (possibly due to changes caused when TakeOverOnlyChild runs and encounters slight changes in the hierarchy), the hashes will change. For example, if a project file initially contains a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent a/b. If someone later adds a/f2 to the project file, a/b can no longer be collapsed, and f1 winds up with parent b and grandparent a. That would be sufficient to change f1's hash. To counteract this problem, hashables for all XCHierarchicalElements except for the main group (which has neither a name nor a path) are taken to be just the set of path components. Because hashables are inherited from parents, this provides assurance that a/b/f1 has the same set of hashables whether its parent is b or a/b. The main group is a special case. As it is permitted to have no name or path, it is permitted to use the standard XCObject hash mechanism. This is not considered a problem because there can be only one main group. """ if self == self.PBXProjectAncestor()._properties['mainGroup']: # super return XCObject.Hashables(self) hashables = [] # Put the name in first, ensuring that if TakeOverOnlyChild collapses # children into a top-level group like "Source", the name always goes # into the list of hashables without interfering with path components. if 'name' in self._properties: # Make it less likely for people to manipulate hashes by following the # pattern of always pushing an object type value onto the list first. hashables.append(self.__class__.__name__ + '.name') hashables.append(self._properties['name']) # NOTE: This still has the problem that if an absolute path is encountered, # including paths with a sourceTree, they'll still inherit their parents' # hashables, even though the paths aren't relative to their parents. This # is not expected to be much of a problem in practice. path = self.PathFromSourceTreeAndPath() if path != None: components = path.split(posixpath.sep) for component in components: hashables.append(self.__class__.__name__ + '.path') hashables.append(component) hashables.extend(self._hashables) return hashables def Compare(self, other): # Allow comparison of these types. PBXGroup has the highest sort rank; # PBXVariantGroup is treated as equal to PBXFileReference. valid_class_types = { PBXFileReference: 'file', PBXGroup: 'group', PBXVariantGroup: 'file', } self_type = valid_class_types[self.__class__] other_type = valid_class_types[other.__class__] if self_type == other_type: # If the two objects are of the same sort rank, compare their names. return cmp(self.Name(), other.Name()) # Otherwise, sort groups before everything else. if self_type == 'group': return -1 return 1 def CompareRootGroup(self, other): # This function should be used only to compare direct children of the # containing PBXProject's mainGroup. These groups should appear in the # listed order. # TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the # generator should have a way of influencing this list rather than having # to hardcode for the generator here. order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products', 'Build'] # If the groups aren't in the listed order, do a name comparison. # Otherwise, groups in the listed order should come before those that # aren't. self_name = self.Name() other_name = other.Name() self_in = isinstance(self, PBXGroup) and self_name in order other_in = isinstance(self, PBXGroup) and other_name in order if not self_in and not other_in: return self.Compare(other) if self_name in order and not other_name in order: return -1 if other_name in order and not self_name in order: return 1 # If both groups are in the listed order, go by the defined order. self_index = order.index(self_name) other_index = order.index(other_name) if self_index < other_index: return -1 if self_index > other_index: return 1 return 0 def PathFromSourceTreeAndPath(self): # Turn the object's sourceTree and path properties into a single flat # string of a form comparable to the path parameter. If there's a # sourceTree property other than "<group>", wrap it in $(...) for the # comparison. components = [] if self._properties['sourceTree'] != '<group>': components.append('$(' + self._properties['sourceTree'] + ')') if 'path' in self._properties: components.append(self._properties['path']) if len(components) > 0: return posixpath.join(*components) return None def FullPath(self): # Returns a full path to self relative to the project file, or relative # to some other source tree. Start with self, and walk up the chain of # parents prepending their paths, if any, until no more parents are # available (project-relative path) or until a path relative to some # source tree is found. xche = self path = None while isinstance(xche, XCHierarchicalElement) and \ (path is None or \ (not path.startswith('/') and not path.startswith('$'))): this_path = xche.PathFromSourceTreeAndPath() if this_path != None and path != None: path = posixpath.join(this_path, path) elif this_path != None: path = this_path xche = xche.parent return path class PBXGroup(XCHierarchicalElement): """ Attributes: _children_by_path: Maps pathnames of children of this PBXGroup to the actual child XCHierarchicalElement objects. _variant_children_by_name_and_path: Maps (name, path) tuples of PBXVariantGroup children to the actual child PBXVariantGroup objects. """ _schema = XCHierarchicalElement._schema.copy() _schema.update({ 'children': [1, XCHierarchicalElement, 1, 1, []], 'name': [0, str, 0, 0], 'path': [0, str, 0, 0], }) def __init__(self, properties=None, id=None, parent=None): # super XCHierarchicalElement.__init__(self, properties, id, parent) self._children_by_path = {} self._variant_children_by_name_and_path = {} for child in self._properties.get('children', []): self._AddChildToDicts(child) def Hashables(self): # super hashables = XCHierarchicalElement.Hashables(self) # It is not sufficient to just rely on name and parent to build a unique # hashable : a node could have two child PBXGroup sharing a common name. # To add entropy the hashable is enhanced with the names of all its # children. for child in self._properties.get('children', []): child_name = child.Name() if child_name != None: hashables.append(child_name) return hashables def HashablesForChild(self): # To avoid a circular reference the hashables used to compute a child id do # not include the child names. return XCHierarchicalElement.Hashables(self) def _AddChildToDicts(self, child): # Sets up this PBXGroup object's dicts to reference the child properly. child_path = child.PathFromSourceTreeAndPath() if child_path: if child_path in self._children_by_path: raise ValueError, 'Found multiple children with path ' + child_path self._children_by_path[child_path] = child if isinstance(child, PBXVariantGroup): child_name = child._properties.get('name', None) key = (child_name, child_path) if key in self._variant_children_by_name_and_path: raise ValueError, 'Found multiple PBXVariantGroup children with ' + \ 'name ' + str(child_name) + ' and path ' + \ str(child_path) self._variant_children_by_name_and_path[key] = child def AppendChild(self, child): # Callers should use this instead of calling # AppendProperty('children', child) directly because this function # maintains the group's dicts. self.AppendProperty('children', child) self._AddChildToDicts(child) def GetChildByName(self, name): # This is not currently optimized with a dict as GetChildByPath is because # it has few callers. Most callers probably want GetChildByPath. This # function is only useful to get children that have names but no paths, # which is rare. The children of the main group ("Source", "Products", # etc.) is pretty much the only case where this likely to come up. # # TODO(mark): Maybe this should raise an error if more than one child is # present with the same name. if not 'children' in self._properties: return None for child in self._properties['children']: if child.Name() == name: return child return None def GetChildByPath(self, path): if not path: return None if path in self._children_by_path: return self._children_by_path[path] return None def GetChildByRemoteObject(self, remote_object): # This method is a little bit esoteric. Given a remote_object, which # should be a PBXFileReference in another project file, this method will # return this group's PBXReferenceProxy object serving as a local proxy # for the remote PBXFileReference. # # This function might benefit from a dict optimization as GetChildByPath # for some workloads, but profiling shows that it's not currently a # problem. if not 'children' in self._properties: return None for child in self._properties['children']: if not isinstance(child, PBXReferenceProxy): continue container_proxy = child._properties['remoteRef'] if container_proxy._properties['remoteGlobalIDString'] == remote_object: return child return None def AddOrGetFileByPath(self, path, hierarchical): """Returns an existing or new file reference corresponding to path. If hierarchical is True, this method will create or use the necessary hierarchical group structure corresponding to path. Otherwise, it will look in and create an item in the current group only. If an existing matching reference is found, it is returned, otherwise, a new one will be created, added to the correct group, and returned. If path identifies a directory by virtue of carrying a trailing slash, this method returns a PBXFileReference of "folder" type. If path identifies a variant, by virtue of it identifying a file inside a directory with an ".lproj" extension, this method returns a PBXVariantGroup containing the variant named by path, and possibly other variants. For all other paths, a "normal" PBXFileReference will be returned. """ # Adding or getting a directory? Directories end with a trailing slash. is_dir = False if path.endswith('/'): is_dir = True path = posixpath.normpath(path) if is_dir: path = path + '/' # Adding or getting a variant? Variants are files inside directories # with an ".lproj" extension. Xcode uses variants for localization. For # a variant path/to/Language.lproj/MainMenu.nib, put a variant group named # MainMenu.nib inside path/to, and give it a variant named Language. In # this example, grandparent would be set to path/to and parent_root would # be set to Language. variant_name = None parent = posixpath.dirname(path) grandparent = posixpath.dirname(parent) parent_basename = posixpath.basename(parent) (parent_root, parent_ext) = posixpath.splitext(parent_basename) if parent_ext == '.lproj': variant_name = parent_root if grandparent == '': grandparent = None # Putting a directory inside a variant group is not currently supported. assert not is_dir or variant_name is None path_split = path.split(posixpath.sep) if len(path_split) == 1 or \ ((is_dir or variant_name != None) and len(path_split) == 2) or \ not hierarchical: # The PBXFileReference or PBXVariantGroup will be added to or gotten from # this PBXGroup, no recursion necessary. if variant_name is None: # Add or get a PBXFileReference. file_ref = self.GetChildByPath(path) if file_ref != None: assert file_ref.__class__ == PBXFileReference else: file_ref = PBXFileReference({'path': path}) self.AppendChild(file_ref) else: # Add or get a PBXVariantGroup. The variant group name is the same # as the basename (MainMenu.nib in the example above). grandparent # specifies the path to the variant group itself, and path_split[-2:] # is the path of the specific variant relative to its group. variant_group_name = posixpath.basename(path) variant_group_ref = self.AddOrGetVariantGroupByNameAndPath( variant_group_name, grandparent) variant_path = posixpath.sep.join(path_split[-2:]) variant_ref = variant_group_ref.GetChildByPath(variant_path) if variant_ref != None: assert variant_ref.__class__ == PBXFileReference else: variant_ref = PBXFileReference({'name': variant_name, 'path': variant_path}) variant_group_ref.AppendChild(variant_ref) # The caller is interested in the variant group, not the specific # variant file. file_ref = variant_group_ref return file_ref else: # Hierarchical recursion. Add or get a PBXGroup corresponding to the # outermost path component, and then recurse into it, chopping off that # path component. next_dir = path_split[0] group_ref = self.GetChildByPath(next_dir) if group_ref != None: assert group_ref.__class__ == PBXGroup else: group_ref = PBXGroup({'path': next_dir}) self.AppendChild(group_ref) return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]), hierarchical) def AddOrGetVariantGroupByNameAndPath(self, name, path): """Returns an existing or new PBXVariantGroup for name and path. If a PBXVariantGroup identified by the name and path arguments is already present as a child of this object, it is returned. Otherwise, a new PBXVariantGroup with the correct properties is created, added as a child, and returned. This method will generally be called by AddOrGetFileByPath, which knows when to create a variant group based on the structure of the pathnames passed to it. """ key = (name, path) if key in self._variant_children_by_name_and_path: variant_group_ref = self._variant_children_by_name_and_path[key] assert variant_group_ref.__class__ == PBXVariantGroup return variant_group_ref variant_group_properties = {'name': name} if path != None: variant_group_properties['path'] = path variant_group_ref = PBXVariantGroup(variant_group_properties) self.AppendChild(variant_group_ref) return variant_group_ref def TakeOverOnlyChild(self, recurse=False): """If this PBXGroup has only one child and it's also a PBXGroup, take it over by making all of its children this object's children. This function will continue to take over only children when those children are groups. If there are three PBXGroups representing a, b, and c, with c inside b and b inside a, and a and b have no other children, this will result in a taking over both b and c, forming a PBXGroup for a/b/c. If recurse is True, this function will recurse into children and ask them to collapse themselves by taking over only children as well. Assuming an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f (d1, d2, and f are files, the rest are groups), recursion will result in a group for a/b/c containing a group for d3/e. """ # At this stage, check that child class types are PBXGroup exactly, # instead of using isinstance. The only subclass of PBXGroup, # PBXVariantGroup, should not participate in reparenting in the same way: # reparenting by merging different object types would be wrong. while len(self._properties['children']) == 1 and \ self._properties['children'][0].__class__ == PBXGroup: # Loop to take over the innermost only-child group possible. child = self._properties['children'][0] # Assume the child's properties, including its children. Save a copy # of this object's old properties, because they'll still be needed. # This object retains its existing id and parent attributes. old_properties = self._properties self._properties = child._properties self._children_by_path = child._children_by_path if not 'sourceTree' in self._properties or \ self._properties['sourceTree'] == '<group>': # The child was relative to its parent. Fix up the path. Note that # children with a sourceTree other than "<group>" are not relative to # their parents, so no path fix-up is needed in that case. if 'path' in old_properties: if 'path' in self._properties: # Both the original parent and child have paths set. self._properties['path'] = posixpath.join(old_properties['path'], self._properties['path']) else: # Only the original parent has a path, use it. self._properties['path'] = old_properties['path'] if 'sourceTree' in old_properties: # The original parent had a sourceTree set, use it. self._properties['sourceTree'] = old_properties['sourceTree'] # If the original parent had a name set, keep using it. If the original # parent didn't have a name but the child did, let the child's name # live on. If the name attribute seems unnecessary now, get rid of it. if 'name' in old_properties and old_properties['name'] != None and \ old_properties['name'] != self.Name(): self._properties['name'] = old_properties['name'] if 'name' in self._properties and 'path' in self._properties and \ self._properties['name'] == self._properties['path']: del self._properties['name'] # Notify all children of their new parent. for child in self._properties['children']: child.parent = self # If asked to recurse, recurse. if recurse: for child in self._properties['children']: if child.__class__ == PBXGroup: child.TakeOverOnlyChild(recurse) def SortGroup(self): self._properties['children'] = \ sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y)) # Recurse. for child in self._properties['children']: if isinstance(child, PBXGroup): child.SortGroup() class XCFileLikeElement(XCHierarchicalElement): # Abstract base for objects that can be used as the fileRef property of # PBXBuildFile. def PathHashables(self): # A PBXBuildFile that refers to this object will call this method to # obtain additional hashables specific to this XCFileLikeElement. Don't # just use this object's hashables, they're not specific and unique enough # on their own (without access to the parent hashables.) Instead, provide # hashables that identify this object by path by getting its hashables as # well as the hashables of ancestor XCHierarchicalElement objects. hashables = [] xche = self while xche != None and isinstance(xche, XCHierarchicalElement): xche_hashables = xche.Hashables() for index in xrange(0, len(xche_hashables)): hashables.insert(index, xche_hashables[index]) xche = xche.parent return hashables class XCContainerPortal(XCObject): # Abstract base for objects that can be used as the containerPortal property # of PBXContainerItemProxy. pass class XCRemoteObject(XCObject): # Abstract base for objects that can be used as the remoteGlobalIDString # property of PBXContainerItemProxy. pass class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject): _schema = XCFileLikeElement._schema.copy() _schema.update({ 'explicitFileType': [0, str, 0, 0], 'lastKnownFileType': [0, str, 0, 0], 'name': [0, str, 0, 0], 'path': [0, str, 0, 1], }) # Weird output rules for PBXFileReference. _should_print_single_line = True # super _encode_transforms = XCFileLikeElement._alternate_encode_transforms def __init__(self, properties=None, id=None, parent=None): # super XCFileLikeElement.__init__(self, properties, id, parent) if 'path' in self._properties and self._properties['path'].endswith('/'): self._properties['path'] = self._properties['path'][:-1] is_dir = True else: is_dir = False if 'path' in self._properties and \ not 'lastKnownFileType' in self._properties and \ not 'explicitFileType' in self._properties: # TODO(mark): This is the replacement for a replacement for a quick hack. # It is no longer incredibly sucky, but this list needs to be extended. extension_map = { 'a': 'archive.ar', 'app': 'wrapper.application', 'bdic': 'file', 'bundle': 'wrapper.cfbundle', 'c': 'sourcecode.c.c', 'cc': 'sourcecode.cpp.cpp', 'cpp': 'sourcecode.cpp.cpp', 'css': 'text.css', 'cxx': 'sourcecode.cpp.cpp', 'dart': 'sourcecode', 'dylib': 'compiled.mach-o.dylib', 'framework': 'wrapper.framework', 'gyp': 'sourcecode', 'gypi': 'sourcecode', 'h': 'sourcecode.c.h', 'hxx': 'sourcecode.cpp.h', 'icns': 'image.icns', 'java': 'sourcecode.java', 'js': 'sourcecode.javascript', 'm': 'sourcecode.c.objc', 'mm': 'sourcecode.cpp.objcpp', 'nib': 'wrapper.nib', 'o': 'compiled.mach-o.objfile', 'pdf': 'image.pdf', 'pl': 'text.script.perl', 'plist': 'text.plist.xml', 'pm': 'text.script.perl', 'png': 'image.png', 'py': 'text.script.python', 'r': 'sourcecode.rez', 'rez': 'sourcecode.rez', 's': 'sourcecode.asm', 'storyboard': 'file.storyboard', 'strings': 'text.plist.strings', 'ttf': 'file', 'xcconfig': 'text.xcconfig', 'xcdatamodel': 'wrapper.xcdatamodel', 'xib': 'file.xib', 'y': 'sourcecode.yacc', } prop_map = { 'dart': 'explicitFileType', 'gyp': 'explicitFileType', 'gypi': 'explicitFileType', } if is_dir: file_type = 'folder' prop_name = 'lastKnownFileType' else: basename = posixpath.basename(self._properties['path']) (root, ext) = posixpath.splitext(basename) # Check the map using a lowercase extension. # TODO(mark): Maybe it should try with the original case first and fall # back to lowercase, in case there are any instances where case # matters. There currently aren't. if ext != '': ext = ext[1:].lower() # TODO(mark): "text" is the default value, but "file" is appropriate # for unrecognized files not containing text. Xcode seems to choose # based on content. file_type = extension_map.get(ext, 'text') prop_name = prop_map.get(ext, 'lastKnownFileType') self._properties[prop_name] = file_type class PBXVariantGroup(PBXGroup, XCFileLikeElement): """PBXVariantGroup is used by Xcode to represent localizations.""" # No additions to the schema relative to PBXGroup. pass # PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below # because it uses PBXContainerItemProxy, defined below. class XCBuildConfiguration(XCObject): _schema = XCObject._schema.copy() _schema.update({ 'baseConfigurationReference': [0, PBXFileReference, 0, 0], 'buildSettings': [0, dict, 0, 1, {}], 'name': [0, str, 0, 1], }) def HasBuildSetting(self, key): return key in self._properties['buildSettings'] def GetBuildSetting(self, key): return self._properties['buildSettings'][key] def SetBuildSetting(self, key, value): # TODO(mark): If a list, copy? self._properties['buildSettings'][key] = value def AppendBuildSetting(self, key, value): if not key in self._properties['buildSettings']: self._properties['buildSettings'][key] = [] self._properties['buildSettings'][key].append(value) def DelBuildSetting(self, key): if key in self._properties['buildSettings']: del self._properties['buildSettings'][key] def SetBaseConfiguration(self, value): self._properties['baseConfigurationReference'] = value class XCConfigurationList(XCObject): # _configs is the default list of configurations. _configs = [ XCBuildConfiguration({'name': 'Debug'}), XCBuildConfiguration({'name': 'Release'}) ] _schema = XCObject._schema.copy() _schema.update({ 'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs], 'defaultConfigurationIsVisible': [0, int, 0, 1, 1], 'defaultConfigurationName': [0, str, 0, 1, 'Release'], }) def Name(self): return 'Build configuration list for ' + \ self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"' def ConfigurationNamed(self, name): """Convenience accessor to obtain an XCBuildConfiguration by name.""" for configuration in self._properties['buildConfigurations']: if configuration._properties['name'] == name: return configuration raise KeyError, name def DefaultConfiguration(self): """Convenience accessor to obtain the default XCBuildConfiguration.""" return self.ConfigurationNamed(self._properties['defaultConfigurationName']) def HasBuildSetting(self, key): """Determines the state of a build setting in all XCBuildConfiguration child objects. If all child objects have key in their build settings, and the value is the same in all child objects, returns 1. If no child objects have the key in their build settings, returns 0. If some, but not all, child objects have the key in their build settings, or if any children have different values for the key, returns -1. """ has = None value = None for configuration in self._properties['buildConfigurations']: configuration_has = configuration.HasBuildSetting(key) if has is None: has = configuration_has elif has != configuration_has: return -1 if configuration_has: configuration_value = configuration.GetBuildSetting(key) if value is None: value = configuration_value elif value != configuration_value: return -1 if not has: return 0 return 1 def GetBuildSetting(self, key): """Gets the build setting for key. All child XCConfiguration objects must have the same value set for the setting, or a ValueError will be raised. """ # TODO(mark): This is wrong for build settings that are lists. The list # contents should be compared (and a list copy returned?) value = None for configuration in self._properties['buildConfigurations']: configuration_value = configuration.GetBuildSetting(key) if value is None: value = configuration_value else: if value != configuration_value: raise ValueError, 'Variant values for ' + key return value def SetBuildSetting(self, key, value): """Sets the build setting for key to value in all child XCBuildConfiguration objects. """ for configuration in self._properties['buildConfigurations']: configuration.SetBuildSetting(key, value) def AppendBuildSetting(self, key, value): """Appends value to the build setting for key, which is treated as a list, in all child XCBuildConfiguration objects. """ for configuration in self._properties['buildConfigurations']: configuration.AppendBuildSetting(key, value) def DelBuildSetting(self, key): """Deletes the build setting key from all child XCBuildConfiguration objects. """ for configuration in self._properties['buildConfigurations']: configuration.DelBuildSetting(key) def SetBaseConfiguration(self, value): """Sets the build configuration in all child XCBuildConfiguration objects. """ for configuration in self._properties['buildConfigurations']: configuration.SetBaseConfiguration(value) class PBXBuildFile(XCObject): _schema = XCObject._schema.copy() _schema.update({ 'fileRef': [0, XCFileLikeElement, 0, 1], 'settings': [0, str, 0, 0], # hack, it's a dict }) # Weird output rules for PBXBuildFile. _should_print_single_line = True _encode_transforms = XCObject._alternate_encode_transforms def Name(self): # Example: "main.cc in Sources" return self._properties['fileRef'].Name() + ' in ' + self.parent.Name() def Hashables(self): # super hashables = XCObject.Hashables(self) # It is not sufficient to just rely on Name() to get the # XCFileLikeElement's name, because that is not a complete pathname. # PathHashables returns hashables unique enough that no two # PBXBuildFiles should wind up with the same set of hashables, unless # someone adds the same file multiple times to the same target. That # would be considered invalid anyway. hashables.extend(self._properties['fileRef'].PathHashables()) return hashables class XCBuildPhase(XCObject): """Abstract base for build phase classes. Not represented in a project file. Attributes: _files_by_path: A dict mapping each path of a child in the files list by path (keys) to the corresponding PBXBuildFile children (values). _files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys) to the corresponding PBXBuildFile children (values). """ # TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't # actually have a "files" list. XCBuildPhase should not have "files" but # another abstract subclass of it should provide this, and concrete build # phase types that do have "files" lists should be derived from that new # abstract subclass. XCBuildPhase should only provide buildActionMask and # runOnlyForDeploymentPostprocessing, and not files or the various # file-related methods and attributes. _schema = XCObject._schema.copy() _schema.update({ 'buildActionMask': [0, int, 0, 1, 0x7fffffff], 'files': [1, PBXBuildFile, 1, 1, []], 'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0], }) def __init__(self, properties=None, id=None, parent=None): # super XCObject.__init__(self, properties, id, parent) self._files_by_path = {} self._files_by_xcfilelikeelement = {} for pbxbuildfile in self._properties.get('files', []): self._AddBuildFileToDicts(pbxbuildfile) def FileGroup(self, path): # Subclasses must override this by returning a two-element tuple. The # first item in the tuple should be the PBXGroup to which "path" should be # added, either as a child or deeper descendant. The second item should # be a boolean indicating whether files should be added into hierarchical # groups or one single flat group. raise NotImplementedError, \ self.__class__.__name__ + ' must implement FileGroup' def _AddPathToDict(self, pbxbuildfile, path): """Adds path to the dict tracking paths belonging to this build phase. If the path is already a member of this build phase, raises an exception. """ if path in self._files_by_path: raise ValueError, 'Found multiple build files with path ' + path self._files_by_path[path] = pbxbuildfile def _AddBuildFileToDicts(self, pbxbuildfile, path=None): """Maintains the _files_by_path and _files_by_xcfilelikeelement dicts. If path is specified, then it is the path that is being added to the phase, and pbxbuildfile must contain either a PBXFileReference directly referencing that path, or it must contain a PBXVariantGroup that itself contains a PBXFileReference referencing the path. If path is not specified, either the PBXFileReference's path or the paths of all children of the PBXVariantGroup are taken as being added to the phase. If the path is already present in the phase, raises an exception. If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile are already present in the phase, referenced by a different PBXBuildFile object, raises an exception. This does not raise an exception when a PBXFileReference or PBXVariantGroup reappear and are referenced by the same PBXBuildFile that has already introduced them, because in the case of PBXVariantGroup objects, they may correspond to multiple paths that are not all added simultaneously. When this situation occurs, the path needs to be added to _files_by_path, but nothing needs to change in _files_by_xcfilelikeelement, and the caller should have avoided adding the PBXBuildFile if it is already present in the list of children. """ xcfilelikeelement = pbxbuildfile._properties['fileRef'] paths = [] if path != None: # It's best when the caller provides the path. if isinstance(xcfilelikeelement, PBXVariantGroup): paths.append(path) else: # If the caller didn't provide a path, there can be either multiple # paths (PBXVariantGroup) or one. if isinstance(xcfilelikeelement, PBXVariantGroup): for variant in xcfilelikeelement._properties['children']: paths.append(variant.FullPath()) else: paths.append(xcfilelikeelement.FullPath()) # Add the paths first, because if something's going to raise, the # messages provided by _AddPathToDict are more useful owing to its # having access to a real pathname and not just an object's Name(). for a_path in paths: self._AddPathToDict(pbxbuildfile, a_path) # If another PBXBuildFile references this XCFileLikeElement, there's a # problem. if xcfilelikeelement in self._files_by_xcfilelikeelement and \ self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile: raise ValueError, 'Found multiple build files for ' + \ xcfilelikeelement.Name() self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile def AppendBuildFile(self, pbxbuildfile, path=None): # Callers should use this instead of calling # AppendProperty('files', pbxbuildfile) directly because this function # maintains the object's dicts. Better yet, callers can just call AddFile # with a pathname and not worry about building their own PBXBuildFile # objects. self.AppendProperty('files', pbxbuildfile) self._AddBuildFileToDicts(pbxbuildfile, path) def AddFile(self, path, settings=None): (file_group, hierarchical) = self.FileGroup(path) file_ref = file_group.AddOrGetFileByPath(path, hierarchical) if file_ref in self._files_by_xcfilelikeelement and \ isinstance(file_ref, PBXVariantGroup): # There's already a PBXBuildFile in this phase corresponding to the # PBXVariantGroup. path just provides a new variant that belongs to # the group. Add the path to the dict. pbxbuildfile = self._files_by_xcfilelikeelement[file_ref] self._AddBuildFileToDicts(pbxbuildfile, path) else: # Add a new PBXBuildFile to get file_ref into the phase. if settings is None: pbxbuildfile = PBXBuildFile({'fileRef': file_ref}) else: pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings}) self.AppendBuildFile(pbxbuildfile, path) class PBXHeadersBuildPhase(XCBuildPhase): # No additions to the schema relative to XCBuildPhase. def Name(self): return 'Headers' def FileGroup(self, path): return self.PBXProjectAncestor().RootGroupForPath(path) class PBXResourcesBuildPhase(XCBuildPhase): # No additions to the schema relative to XCBuildPhase. def Name(self): return 'Resources' def FileGroup(self, path): return self.PBXProjectAncestor().RootGroupForPath(path) class PBXSourcesBuildPhase(XCBuildPhase): # No additions to the schema relative to XCBuildPhase. def Name(self): return 'Sources' def FileGroup(self, path): return self.PBXProjectAncestor().RootGroupForPath(path) class PBXFrameworksBuildPhase(XCBuildPhase): # No additions to the schema relative to XCBuildPhase. def Name(self): return 'Frameworks' def FileGroup(self, path): (root, ext) = posixpath.splitext(path) if ext != '': ext = ext[1:].lower() if ext == 'o': # .o files are added to Xcode Frameworks phases, but conceptually aren't # frameworks, they're more like sources or intermediates. Redirect them # to show up in one of those other groups. return self.PBXProjectAncestor().RootGroupForPath(path) else: return (self.PBXProjectAncestor().FrameworksGroup(), False) class PBXShellScriptBuildPhase(XCBuildPhase): _schema = XCBuildPhase._schema.copy() _schema.update({ 'inputPaths': [1, str, 0, 1, []], 'name': [0, str, 0, 0], 'outputPaths': [1, str, 0, 1, []], 'shellPath': [0, str, 0, 1, '/bin/sh'], 'shellScript': [0, str, 0, 1], 'showEnvVarsInLog': [0, int, 0, 0], }) def Name(self): if 'name' in self._properties: return self._properties['name'] return 'ShellScript' class PBXCopyFilesBuildPhase(XCBuildPhase): _schema = XCBuildPhase._schema.copy() _schema.update({ 'dstPath': [0, str, 0, 1], 'dstSubfolderSpec': [0, int, 0, 1], 'name': [0, str, 0, 0], }) # path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is # "DIR", match group 3 is "path" or None. path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$') # path_tree_to_subfolder maps names of Xcode variables to the associated # dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object. path_tree_to_subfolder = { 'BUILT_PRODUCTS_DIR': 16, # Products Directory # Other types that can be chosen via the Xcode UI. # TODO(mark): Map Xcode variable names to these. # : 1, # Wrapper # : 6, # Executables: 6 # : 7, # Resources # : 15, # Java Resources # : 10, # Frameworks # : 11, # Shared Frameworks # : 12, # Shared Support # : 13, # PlugIns } def Name(self): if 'name' in self._properties: return self._properties['name'] return 'CopyFiles' def FileGroup(self, path): return self.PBXProjectAncestor().RootGroupForPath(path) def SetDestination(self, path): """Set the dstSubfolderSpec and dstPath properties from path. path may be specified in the same notation used for XCHierarchicalElements, specifically, "$(DIR)/path". """ path_tree_match = self.path_tree_re.search(path) if path_tree_match: # Everything else needs to be relative to an Xcode variable. path_tree = path_tree_match.group(1) relative_path = path_tree_match.group(3) if path_tree in self.path_tree_to_subfolder: subfolder = self.path_tree_to_subfolder[path_tree] if relative_path is None: relative_path = '' else: # The path starts with an unrecognized Xcode variable # name like $(SRCROOT). Xcode will still handle this # as an "absolute path" that starts with the variable. subfolder = 0 relative_path = path elif path.startswith('/'): # Special case. Absolute paths are in dstSubfolderSpec 0. subfolder = 0 relative_path = path[1:] else: raise ValueError, 'Can\'t use path %s in a %s' % \ (path, self.__class__.__name__) self._properties['dstPath'] = relative_path self._properties['dstSubfolderSpec'] = subfolder class PBXBuildRule(XCObject): _schema = XCObject._schema.copy() _schema.update({ 'compilerSpec': [0, str, 0, 1], 'filePatterns': [0, str, 0, 0], 'fileType': [0, str, 0, 1], 'isEditable': [0, int, 0, 1, 1], 'outputFiles': [1, str, 0, 1, []], 'script': [0, str, 0, 0], }) def Name(self): # Not very inspired, but it's what Xcode uses. return self.__class__.__name__ def Hashables(self): # super hashables = XCObject.Hashables(self) # Use the hashables of the weak objects that this object refers to. hashables.append(self._properties['fileType']) if 'filePatterns' in self._properties: hashables.append(self._properties['filePatterns']) return hashables class PBXContainerItemProxy(XCObject): # When referencing an item in this project file, containerPortal is the # PBXProject root object of this project file. When referencing an item in # another project file, containerPortal is a PBXFileReference identifying # the other project file. # # When serving as a proxy to an XCTarget (in this project file or another), # proxyType is 1. When serving as a proxy to a PBXFileReference (in another # project file), proxyType is 2. Type 2 is used for references to the # producs of the other project file's targets. # # Xcode is weird about remoteGlobalIDString. Usually, it's printed without # a comment, indicating that it's tracked internally simply as a string, but # sometimes it's printed with a comment (usually when the object is initially # created), indicating that it's tracked as a project file object at least # sometimes. This module always tracks it as an object, but contains a hack # to prevent it from printing the comment in the project file output. See # _XCKVPrint. _schema = XCObject._schema.copy() _schema.update({ 'containerPortal': [0, XCContainerPortal, 0, 1], 'proxyType': [0, int, 0, 1], 'remoteGlobalIDString': [0, XCRemoteObject, 0, 1], 'remoteInfo': [0, str, 0, 1], }) def __repr__(self): props = self._properties name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo']) return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self)) def Name(self): # Admittedly not the best name, but it's what Xcode uses. return self.__class__.__name__ def Hashables(self): # super hashables = XCObject.Hashables(self) # Use the hashables of the weak objects that this object refers to. hashables.extend(self._properties['containerPortal'].Hashables()) hashables.extend(self._properties['remoteGlobalIDString'].Hashables()) return hashables class PBXTargetDependency(XCObject): # The "target" property accepts an XCTarget object, and obviously not # NoneType. But XCTarget is defined below, so it can't be put into the # schema yet. The definition of PBXTargetDependency can't be moved below # XCTarget because XCTarget's own schema references PBXTargetDependency. # Python doesn't deal well with this circular relationship, and doesn't have # a real way to do forward declarations. To work around, the type of # the "target" property is reset below, after XCTarget is defined. # # At least one of "name" and "target" is required. _schema = XCObject._schema.copy() _schema.update({ 'name': [0, str, 0, 0], 'target': [0, None.__class__, 0, 0], 'targetProxy': [0, PBXContainerItemProxy, 1, 1], }) def __repr__(self): name = self._properties.get('name') or self._properties['target'].Name() return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self)) def Name(self): # Admittedly not the best name, but it's what Xcode uses. return self.__class__.__name__ def Hashables(self): # super hashables = XCObject.Hashables(self) # Use the hashables of the weak objects that this object refers to. hashables.extend(self._properties['targetProxy'].Hashables()) return hashables class PBXReferenceProxy(XCFileLikeElement): _schema = XCFileLikeElement._schema.copy() _schema.update({ 'fileType': [0, str, 0, 1], 'path': [0, str, 0, 1], 'remoteRef': [0, PBXContainerItemProxy, 1, 1], }) class XCTarget(XCRemoteObject): # An XCTarget is really just an XCObject, the XCRemoteObject thing is just # to allow PBXProject to be used in the remoteGlobalIDString property of # PBXContainerItemProxy. # # Setting a "name" property at instantiation may also affect "productName", # which may in turn affect the "PRODUCT_NAME" build setting in children of # "buildConfigurationList". See __init__ below. _schema = XCRemoteObject._schema.copy() _schema.update({ 'buildConfigurationList': [0, XCConfigurationList, 1, 1, XCConfigurationList()], 'buildPhases': [1, XCBuildPhase, 1, 1, []], 'dependencies': [1, PBXTargetDependency, 1, 1, []], 'name': [0, str, 0, 1], 'productName': [0, str, 0, 1], }) def __init__(self, properties=None, id=None, parent=None, force_outdir=None, force_prefix=None, force_extension=None): # super XCRemoteObject.__init__(self, properties, id, parent) # Set up additional defaults not expressed in the schema. If a "name" # property was supplied, set "productName" if it is not present. Also set # the "PRODUCT_NAME" build setting in each configuration, but only if # the setting is not present in any build configuration. if 'name' in self._properties: if not 'productName' in self._properties: self.SetProperty('productName', self._properties['name']) if 'productName' in self._properties: if 'buildConfigurationList' in self._properties: configs = self._properties['buildConfigurationList'] if configs.HasBuildSetting('PRODUCT_NAME') == 0: configs.SetBuildSetting('PRODUCT_NAME', self._properties['productName']) def AddDependency(self, other): pbxproject = self.PBXProjectAncestor() other_pbxproject = other.PBXProjectAncestor() if pbxproject == other_pbxproject: # Add a dependency to another target in the same project file. container = PBXContainerItemProxy({'containerPortal': pbxproject, 'proxyType': 1, 'remoteGlobalIDString': other, 'remoteInfo': other.Name()}) dependency = PBXTargetDependency({'target': other, 'targetProxy': container}) self.AppendProperty('dependencies', dependency) else: # Add a dependency to a target in a different project file. other_project_ref = \ pbxproject.AddOrGetProjectReference(other_pbxproject)[1] container = PBXContainerItemProxy({ 'containerPortal': other_project_ref, 'proxyType': 1, 'remoteGlobalIDString': other, 'remoteInfo': other.Name(), }) dependency = PBXTargetDependency({'name': other.Name(), 'targetProxy': container}) self.AppendProperty('dependencies', dependency) # Proxy all of these through to the build configuration list. def ConfigurationNamed(self, name): return self._properties['buildConfigurationList'].ConfigurationNamed(name) def DefaultConfiguration(self): return self._properties['buildConfigurationList'].DefaultConfiguration() def HasBuildSetting(self, key): return self._properties['buildConfigurationList'].HasBuildSetting(key) def GetBuildSetting(self, key): return self._properties['buildConfigurationList'].GetBuildSetting(key) def SetBuildSetting(self, key, value): return self._properties['buildConfigurationList'].SetBuildSetting(key, \ value) def AppendBuildSetting(self, key, value): return self._properties['buildConfigurationList'].AppendBuildSetting(key, \ value) def DelBuildSetting(self, key): return self._properties['buildConfigurationList'].DelBuildSetting(key) # Redefine the type of the "target" property. See PBXTargetDependency._schema # above. PBXTargetDependency._schema['target'][1] = XCTarget class PBXNativeTarget(XCTarget): # buildPhases is overridden in the schema to be able to set defaults. # # NOTE: Contrary to most objects, it is advisable to set parent when # constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject # object. A parent reference is required for a PBXNativeTarget during # construction to be able to set up the target defaults for productReference, # because a PBXBuildFile object must be created for the target and it must # be added to the PBXProject's mainGroup hierarchy. _schema = XCTarget._schema.copy() _schema.update({ 'buildPhases': [1, XCBuildPhase, 1, 1, [PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]], 'buildRules': [1, PBXBuildRule, 1, 1, []], 'productReference': [0, PBXFileReference, 0, 1], 'productType': [0, str, 0, 1], }) # Mapping from Xcode product-types to settings. The settings are: # filetype : used for explicitFileType in the project file # prefix : the prefix for the file name # suffix : the suffix for the filen ame _product_filetypes = { 'com.apple.product-type.application': ['wrapper.application', '', '.app'], 'com.apple.product-type.bundle': ['wrapper.cfbundle', '', '.bundle'], 'com.apple.product-type.framework': ['wrapper.framework', '', '.framework'], 'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib', 'lib', '.dylib'], 'com.apple.product-type.library.static': ['archive.ar', 'lib', '.a'], 'com.apple.product-type.tool': ['compiled.mach-o.executable', '', ''], 'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle', '', '.xctest'], 'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib', '', '.so'], } def __init__(self, properties=None, id=None, parent=None, force_outdir=None, force_prefix=None, force_extension=None): # super XCTarget.__init__(self, properties, id, parent) if 'productName' in self._properties and \ 'productType' in self._properties and \ not 'productReference' in self._properties and \ self._properties['productType'] in self._product_filetypes: products_group = None pbxproject = self.PBXProjectAncestor() if pbxproject != None: products_group = pbxproject.ProductsGroup() if products_group != None: (filetype, prefix, suffix) = \ self._product_filetypes[self._properties['productType']] # Xcode does not have a distinct type for loadable modules that are # pure BSD targets (not in a bundle wrapper). GYP allows such modules # to be specified by setting a target type to loadable_module without # having mac_bundle set. These are mapped to the pseudo-product type # com.googlecode.gyp.xcode.bundle. # # By picking up this special type and converting it to a dynamic # library (com.apple.product-type.library.dynamic) with fix-ups, # single-file loadable modules can be produced. # # MACH_O_TYPE is changed to mh_bundle to produce the proper file type # (as opposed to mh_dylib). In order for linking to succeed, # DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be # cleared. They are meaningless for type mh_bundle. # # Finally, the .so extension is forcibly applied over the default # (.dylib), unless another forced extension is already selected. # .dylib is plainly wrong, and .bundle is used by loadable_modules in # bundle wrappers (com.apple.product-type.bundle). .so seems an odd # choice because it's used as the extension on many other systems that # don't distinguish between linkable shared libraries and non-linkable # loadable modules, but there's precedent: Python loadable modules on # Mac OS X use an .so extension. if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle': self._properties['productType'] = \ 'com.apple.product-type.library.dynamic' self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle') self.SetBuildSetting('DYLIB_CURRENT_VERSION', '') self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '') if force_extension is None: force_extension = suffix[1:] if self._properties['productType'] == \ 'com.apple.product-type-bundle.unit.test': if force_extension is None: force_extension = suffix[1:] if force_extension is not None: # If it's a wrapper (bundle), set WRAPPER_EXTENSION. if filetype.startswith('wrapper.'): self.SetBuildSetting('WRAPPER_EXTENSION', force_extension) else: # Extension override. suffix = '.' + force_extension self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension) if filetype.startswith('compiled.mach-o.executable'): product_name = self._properties['productName'] product_name += suffix suffix = '' self.SetProperty('productName', product_name) self.SetBuildSetting('PRODUCT_NAME', product_name) # Xcode handles most prefixes based on the target type, however there # are exceptions. If a "BSD Dynamic Library" target is added in the # Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that # behavior. if force_prefix is not None: prefix = force_prefix if filetype.startswith('wrapper.'): self.SetBuildSetting('WRAPPER_PREFIX', prefix) else: self.SetBuildSetting('EXECUTABLE_PREFIX', prefix) if force_outdir is not None: self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir) # TODO(tvl): Remove the below hack. # http://code.google.com/p/gyp/issues/detail?id=122 # Some targets include the prefix in the target_name. These targets # really should just add a product_name setting that doesn't include # the prefix. For example: # target_name = 'libevent', product_name = 'event' # This check cleans up for them. product_name = self._properties['productName'] prefix_len = len(prefix) if prefix_len and (product_name[:prefix_len] == prefix): product_name = product_name[prefix_len:] self.SetProperty('productName', product_name) self.SetBuildSetting('PRODUCT_NAME', product_name) ref_props = { 'explicitFileType': filetype, 'includeInIndex': 0, 'path': prefix + product_name + suffix, 'sourceTree': 'BUILT_PRODUCTS_DIR', } file_ref = PBXFileReference(ref_props) products_group.AppendChild(file_ref) self.SetProperty('productReference', file_ref) def GetBuildPhaseByType(self, type): if not 'buildPhases' in self._properties: return None the_phase = None for phase in self._properties['buildPhases']: if isinstance(phase, type): # Some phases may be present in multiples in a well-formed project file, # but phases like PBXSourcesBuildPhase may only be present singly, and # this function is intended as an aid to GetBuildPhaseByType. Loop # over the entire list of phases and assert if more than one of the # desired type is found. assert the_phase is None the_phase = phase return the_phase def HeadersPhase(self): headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase) if headers_phase is None: headers_phase = PBXHeadersBuildPhase() # The headers phase should come before the resources, sources, and # frameworks phases, if any. insert_at = len(self._properties['buildPhases']) for index in xrange(0, len(self._properties['buildPhases'])): phase = self._properties['buildPhases'][index] if isinstance(phase, PBXResourcesBuildPhase) or \ isinstance(phase, PBXSourcesBuildPhase) or \ isinstance(phase, PBXFrameworksBuildPhase): insert_at = index break self._properties['buildPhases'].insert(insert_at, headers_phase) headers_phase.parent = self return headers_phase def ResourcesPhase(self): resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase) if resources_phase is None: resources_phase = PBXResourcesBuildPhase() # The resources phase should come before the sources and frameworks # phases, if any. insert_at = len(self._properties['buildPhases']) for index in xrange(0, len(self._properties['buildPhases'])): phase = self._properties['buildPhases'][index] if isinstance(phase, PBXSourcesBuildPhase) or \ isinstance(phase, PBXFrameworksBuildPhase): insert_at = index break self._properties['buildPhases'].insert(insert_at, resources_phase) resources_phase.parent = self return resources_phase def SourcesPhase(self): sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase) if sources_phase is None: sources_phase = PBXSourcesBuildPhase() self.AppendProperty('buildPhases', sources_phase) return sources_phase def FrameworksPhase(self): frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase) if frameworks_phase is None: frameworks_phase = PBXFrameworksBuildPhase() self.AppendProperty('buildPhases', frameworks_phase) return frameworks_phase def AddDependency(self, other): # super XCTarget.AddDependency(self, other) static_library_type = 'com.apple.product-type.library.static' shared_library_type = 'com.apple.product-type.library.dynamic' framework_type = 'com.apple.product-type.framework' if isinstance(other, PBXNativeTarget) and \ 'productType' in self._properties and \ self._properties['productType'] != static_library_type and \ 'productType' in other._properties and \ (other._properties['productType'] == static_library_type or \ ((other._properties['productType'] == shared_library_type or \ other._properties['productType'] == framework_type) and \ ((not other.HasBuildSetting('MACH_O_TYPE')) or other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))): file_ref = other.GetProperty('productReference') pbxproject = self.PBXProjectAncestor() other_pbxproject = other.PBXProjectAncestor() if pbxproject != other_pbxproject: other_project_product_group = \ pbxproject.AddOrGetProjectReference(other_pbxproject)[0] file_ref = other_project_product_group.GetChildByRemoteObject(file_ref) self.FrameworksPhase().AppendProperty('files', PBXBuildFile({'fileRef': file_ref})) class PBXAggregateTarget(XCTarget): pass class PBXProject(XCContainerPortal): # A PBXProject is really just an XCObject, the XCContainerPortal thing is # just to allow PBXProject to be used in the containerPortal property of # PBXContainerItemProxy. """ Attributes: path: "sample.xcodeproj". TODO(mark) Document me! _other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each value is a reference to the dict in the projectReferences list associated with the keyed PBXProject. """ _schema = XCContainerPortal._schema.copy() _schema.update({ 'attributes': [0, dict, 0, 0], 'buildConfigurationList': [0, XCConfigurationList, 1, 1, XCConfigurationList()], 'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'], 'hasScannedForEncodings': [0, int, 0, 1, 1], 'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()], 'projectDirPath': [0, str, 0, 1, ''], 'projectReferences': [1, dict, 0, 0], 'projectRoot': [0, str, 0, 1, ''], 'targets': [1, XCTarget, 1, 1, []], }) def __init__(self, properties=None, id=None, parent=None, path=None): self.path = path self._other_pbxprojects = {} # super return XCContainerPortal.__init__(self, properties, id, parent) def Name(self): name = self.path if name[-10:] == '.xcodeproj': name = name[:-10] return posixpath.basename(name) def Path(self): return self.path def Comment(self): return 'Project object' def Children(self): # super children = XCContainerPortal.Children(self) # Add children that the schema doesn't know about. Maybe there's a more # elegant way around this, but this is the only case where we need to own # objects in a dictionary (that is itself in a list), and three lines for # a one-off isn't that big a deal. if 'projectReferences' in self._properties: for reference in self._properties['projectReferences']: children.append(reference['ProductGroup']) return children def PBXProjectAncestor(self): return self def _GroupByName(self, name): if not 'mainGroup' in self._properties: self.SetProperty('mainGroup', PBXGroup()) main_group = self._properties['mainGroup'] group = main_group.GetChildByName(name) if group is None: group = PBXGroup({'name': name}) main_group.AppendChild(group) return group # SourceGroup and ProductsGroup are created by default in Xcode's own # templates. def SourceGroup(self): return self._GroupByName('Source') def ProductsGroup(self): return self._GroupByName('Products') # IntermediatesGroup is used to collect source-like files that are generated # by rules or script phases and are placed in intermediate directories such # as DerivedSources. def IntermediatesGroup(self): return self._GroupByName('Intermediates') # FrameworksGroup and ProjectsGroup are top-level groups used to collect # frameworks and projects. def FrameworksGroup(self): return self._GroupByName('Frameworks') def ProjectsGroup(self): return self._GroupByName('Projects') def RootGroupForPath(self, path): """Returns a PBXGroup child of this object to which path should be added. This method is intended to choose between SourceGroup and IntermediatesGroup on the basis of whether path is present in a source directory or an intermediates directory. For the purposes of this determination, any path located within a derived file directory such as PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates directory. The returned value is a two-element tuple. The first element is the PBXGroup, and the second element specifies whether that group should be organized hierarchically (True) or as a single flat list (False). """ # TODO(mark): make this a class variable and bind to self on call? # Also, this list is nowhere near exhaustive. # INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by # gyp.generator.xcode. There should probably be some way for that module # to push the names in, rather than having to hard-code them here. source_tree_groups = { 'DERIVED_FILE_DIR': (self.IntermediatesGroup, True), 'INTERMEDIATE_DIR': (self.IntermediatesGroup, True), 'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True), 'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True), } (source_tree, path) = SourceTreeAndPathFromPath(path) if source_tree != None and source_tree in source_tree_groups: (group_func, hierarchical) = source_tree_groups[source_tree] group = group_func() return (group, hierarchical) # TODO(mark): make additional choices based on file extension. return (self.SourceGroup(), True) def AddOrGetFileInRootGroup(self, path): """Returns a PBXFileReference corresponding to path in the correct group according to RootGroupForPath's heuristics. If an existing PBXFileReference for path exists, it will be returned. Otherwise, one will be created and returned. """ (group, hierarchical) = self.RootGroupForPath(path) return group.AddOrGetFileByPath(path, hierarchical) def RootGroupsTakeOverOnlyChildren(self, recurse=False): """Calls TakeOverOnlyChild for all groups in the main group.""" for group in self._properties['mainGroup']._properties['children']: if isinstance(group, PBXGroup): group.TakeOverOnlyChild(recurse) def SortGroups(self): # Sort the children of the mainGroup (like "Source" and "Products") # according to their defined order. self._properties['mainGroup']._properties['children'] = \ sorted(self._properties['mainGroup']._properties['children'], cmp=lambda x,y: x.CompareRootGroup(y)) # Sort everything else by putting group before files, and going # alphabetically by name within sections of groups and files. SortGroup # is recursive. for group in self._properties['mainGroup']._properties['children']: if not isinstance(group, PBXGroup): continue if group.Name() == 'Products': # The Products group is a special case. Instead of sorting # alphabetically, sort things in the order of the targets that # produce the products. To do this, just build up a new list of # products based on the targets. products = [] for target in self._properties['targets']: if not isinstance(target, PBXNativeTarget): continue product = target._properties['productReference'] # Make sure that the product is already in the products group. assert product in group._properties['children'] products.append(product) # Make sure that this process doesn't miss anything that was already # in the products group. assert len(products) == len(group._properties['children']) group._properties['children'] = products else: group.SortGroup() def AddOrGetProjectReference(self, other_pbxproject): """Add a reference to another project file (via PBXProject object) to this one. Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in this project file that contains a PBXReferenceProxy object for each product of each PBXNativeTarget in the other project file. ProjectRef is a PBXFileReference to the other project file. If this project file already references the other project file, the existing ProductGroup and ProjectRef are returned. The ProductGroup will still be updated if necessary. """ if not 'projectReferences' in self._properties: self._properties['projectReferences'] = [] product_group = None project_ref = None if not other_pbxproject in self._other_pbxprojects: # This project file isn't yet linked to the other one. Establish the # link. product_group = PBXGroup({'name': 'Products'}) # ProductGroup is strong. product_group.parent = self # There's nothing unique about this PBXGroup, and if left alone, it will # wind up with the same set of hashables as all other PBXGroup objects # owned by the projectReferences list. Add the hashables of the # remote PBXProject that it's related to. product_group._hashables.extend(other_pbxproject.Hashables()) # The other project reports its path as relative to the same directory # that this project's path is relative to. The other project's path # is not necessarily already relative to this project. Figure out the # pathname that this project needs to use to refer to the other one. this_path = posixpath.dirname(self.Path()) projectDirPath = self.GetProperty('projectDirPath') if projectDirPath: if posixpath.isabs(projectDirPath[0]): this_path = projectDirPath else: this_path = posixpath.join(this_path, projectDirPath) other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path) # ProjectRef is weak (it's owned by the mainGroup hierarchy). project_ref = PBXFileReference({ 'lastKnownFileType': 'wrapper.pb-project', 'path': other_path, 'sourceTree': 'SOURCE_ROOT', }) self.ProjectsGroup().AppendChild(project_ref) ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref} self._other_pbxprojects[other_pbxproject] = ref_dict self.AppendProperty('projectReferences', ref_dict) # Xcode seems to sort this list case-insensitively self._properties['projectReferences'] = \ sorted(self._properties['projectReferences'], cmp=lambda x,y: cmp(x['ProjectRef'].Name().lower(), y['ProjectRef'].Name().lower())) else: # The link already exists. Pull out the relevnt data. project_ref_dict = self._other_pbxprojects[other_pbxproject] product_group = project_ref_dict['ProductGroup'] project_ref = project_ref_dict['ProjectRef'] self._SetUpProductReferences(other_pbxproject, product_group, project_ref) return [product_group, project_ref] def _SetUpProductReferences(self, other_pbxproject, product_group, project_ref): # TODO(mark): This only adds references to products in other_pbxproject # when they don't exist in this pbxproject. Perhaps it should also # remove references from this pbxproject that are no longer present in # other_pbxproject. Perhaps it should update various properties if they # change. for target in other_pbxproject._properties['targets']: if not isinstance(target, PBXNativeTarget): continue other_fileref = target._properties['productReference'] if product_group.GetChildByRemoteObject(other_fileref) is None: # Xcode sets remoteInfo to the name of the target and not the name # of its product, despite this proxy being a reference to the product. container_item = PBXContainerItemProxy({ 'containerPortal': project_ref, 'proxyType': 2, 'remoteGlobalIDString': other_fileref, 'remoteInfo': target.Name() }) # TODO(mark): Does sourceTree get copied straight over from the other # project? Can the other project ever have lastKnownFileType here # instead of explicitFileType? (Use it if so?) Can path ever be # unset? (I don't think so.) Can other_fileref have name set, and # does it impact the PBXReferenceProxy if so? These are the questions # that perhaps will be answered one day. reference_proxy = PBXReferenceProxy({ 'fileType': other_fileref._properties['explicitFileType'], 'path': other_fileref._properties['path'], 'sourceTree': other_fileref._properties['sourceTree'], 'remoteRef': container_item, }) product_group.AppendChild(reference_proxy) def SortRemoteProductReferences(self): # For each remote project file, sort the associated ProductGroup in the # same order that the targets are sorted in the remote project file. This # is the sort order used by Xcode. def CompareProducts(x, y, remote_products): # x and y are PBXReferenceProxy objects. Go through their associated # PBXContainerItem to get the remote PBXFileReference, which will be # present in the remote_products list. x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString'] y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString'] x_index = remote_products.index(x_remote) y_index = remote_products.index(y_remote) # Use the order of each remote PBXFileReference in remote_products to # determine the sort order. return cmp(x_index, y_index) for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems(): # Build up a list of products in the remote project file, ordered the # same as the targets that produce them. remote_products = [] for target in other_pbxproject._properties['targets']: if not isinstance(target, PBXNativeTarget): continue remote_products.append(target._properties['productReference']) # Sort the PBXReferenceProxy children according to the list of remote # products. product_group = ref_dict['ProductGroup'] product_group._properties['children'] = sorted( product_group._properties['children'], cmp=lambda x, y: CompareProducts(x, y, remote_products)) class XCProjectFile(XCObject): _schema = XCObject._schema.copy() _schema.update({ 'archiveVersion': [0, int, 0, 1, 1], 'classes': [0, dict, 0, 1, {}], 'objectVersion': [0, int, 0, 1, 45], 'rootObject': [0, PBXProject, 1, 1], }) def SetXcodeVersion(self, version): version_to_object_version = { '2.4': 45, '3.0': 45, '3.1': 45, '3.2': 46, } if not version in version_to_object_version: supported_str = ', '.join(sorted(version_to_object_version.keys())) raise Exception( 'Unsupported Xcode version %s (supported: %s)' % ( version, supported_str ) ) compatibility_version = 'Xcode %s' % version self._properties['rootObject'].SetProperty('compatibilityVersion', compatibility_version) self.SetProperty('objectVersion', version_to_object_version[version]); def ComputeIDs(self, recursive=True, overwrite=True, hash=None): # Although XCProjectFile is implemented here as an XCObject, it's not a # proper object in the Xcode sense, and it certainly doesn't have its own # ID. Pass through an attempt to update IDs to the real root object. if recursive: self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash) def Print(self, file=sys.stdout): self.VerifyHasRequiredProperties() # Add the special "objects" property, which will be caught and handled # separately during printing. This structure allows a fairly standard # loop do the normal printing. self._properties['objects'] = {} self._XCPrint(file, 0, '// !$*UTF8*$!\n') if self._should_print_single_line: self._XCPrint(file, 0, '{ ') else: self._XCPrint(file, 0, '{\n') for property, value in sorted(self._properties.iteritems(), cmp=lambda x, y: cmp(x, y)): if property == 'objects': self._PrintObjects(file) else: self._XCKVPrint(file, 1, property, value) self._XCPrint(file, 0, '}\n') del self._properties['objects'] def _PrintObjects(self, file): if self._should_print_single_line: self._XCPrint(file, 0, 'objects = {') else: self._XCPrint(file, 1, 'objects = {\n') objects_by_class = {} for object in self.Descendants(): if object == self: continue class_name = object.__class__.__name__ if not class_name in objects_by_class: objects_by_class[class_name] = [] objects_by_class[class_name].append(object) for class_name in sorted(objects_by_class): self._XCPrint(file, 0, '\n') self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n') for object in sorted(objects_by_class[class_name], cmp=lambda x, y: cmp(x.id, y.id)): object.Print(file) self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n') if self._should_print_single_line: self._XCPrint(file, 0, '}; ') else: self._XCPrint(file, 1, '};\n')
dagwieers/ansible
refs/heads/devel
contrib/inventory/linode.py
15
#!/usr/bin/env python ''' Linode external inventory script ================================= Generates inventory that Ansible can understand by making API request to Linode using the Chube library. NOTE: This script assumes Ansible is being executed where Chube is already installed and has a valid config at ~/.chube. If not, run: pip install chube echo -e "---\napi_key: <YOUR API KEY GOES HERE>" > ~/.chube For more details, see: https://github.com/exosite/chube NOTE: By default, this script also assumes that the Linodes in your account all have labels that correspond to hostnames that are in your resolver search path. Your resolver search path resides in /etc/hosts. Optionally, if you would like to use the hosts public IP instead of it's label use the following setting in linode.ini: use_public_ip = true When run against a specific host, this script returns the following variables: - api_id - datacenter_id - datacenter_city (lowercase city name of data center, e.g. 'tokyo') - label - display_group - create_dt - total_hd - total_xfer - total_ram - status - public_ip (The first public IP found) - private_ip (The first private IP found, or empty string if none) - alert_cpu_enabled - alert_cpu_threshold - alert_diskio_enabled - alert_diskio_threshold - alert_bwin_enabled - alert_bwin_threshold - alert_bwout_enabled - alert_bwout_threshold - alert_bwquota_enabled - alert_bwquota_threshold - backup_weekly_daily - backup_window - watchdog Peter Sankauskas did most of the legwork here with his linode plugin; I just adapted that for Linode. ''' # (c) 2013, Dan Slimmon # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ###################################################################### # Standard imports import os import re import sys import argparse from time import time import json try: from chube import load_chube_config from chube import api as chube_api from chube.datacenter import Datacenter from chube.linode_obj import Linode except Exception: try: # remove local paths and other stuff that may # cause an import conflict, as chube is sensitive # to name collisions on importing old_path = sys.path sys.path = [d for d in sys.path if d not in ('', os.getcwd(), os.path.dirname(os.path.realpath(__file__)))] from chube import load_chube_config from chube import api as chube_api from chube.datacenter import Datacenter from chube.linode_obj import Linode sys.path = old_path except Exception as e: raise Exception("could not import chube") load_chube_config() # Imports for ansible import ConfigParser class LinodeInventory(object): def _empty_inventory(self): return {"_meta": {"hostvars": {}}} def __init__(self): """Main execution path.""" # Inventory grouped by display group self.inventory = self._empty_inventory() # Index of label to Linode ID self.index = {} # Local cache of Datacenter objects populated by populate_datacenter_cache() self._datacenter_cache = None # Read settings and parse CLI arguments self.read_settings() self.parse_cli_args() # Cache if self.args.refresh_cache: self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() # Data to print if self.args.host: data_to_print = self.get_host_info() elif self.args.list: # Display list of nodes for inventory if len(self.inventory) == 1: data_to_print = self.get_inventory_from_cache() else: data_to_print = self.json_format_dict(self.inventory, True) print(data_to_print) def is_cache_valid(self): """Determines if the cache file has expired, or if it is still valid.""" if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False def read_settings(self): """Reads the settings from the .ini file.""" config = ConfigParser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/linode.ini') # Cache related cache_path = config.get('linode', 'cache_path') self.cache_path_cache = cache_path + "/ansible-linode.cache" self.cache_path_index = cache_path + "/ansible-linode.index" self.cache_max_age = config.getint('linode', 'cache_max_age') self.use_public_ip = config.getboolean('linode', 'use_public_ip') def parse_cli_args(self): """Command line argument processing""" parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Linode') parser.add_argument('--list', action='store_true', default=True, help='List nodes (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific node') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to Linode (default: False - use cache files)') self.args = parser.parse_args() def do_api_calls_update_cache(self): """Do API calls, and save data in cache files.""" self.get_nodes() self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) def get_nodes(self): """Makes an Linode API call to get the list of nodes.""" try: for node in Linode.search(status=Linode.STATUS_RUNNING): self.add_node(node) except chube_api.linode_api.ApiError as e: sys.exit("Looks like Linode's API is down:\n %s" % e) def get_node(self, linode_id): """Gets details about a specific node.""" try: return Linode.find(api_id=linode_id) except chube_api.linode_api.ApiError as e: sys.exit("Looks like Linode's API is down:\n%s" % e) def populate_datacenter_cache(self): """Creates self._datacenter_cache, containing all Datacenters indexed by ID.""" self._datacenter_cache = {} dcs = Datacenter.search() for dc in dcs: self._datacenter_cache[dc.api_id] = dc def get_datacenter_city(self, node): """Returns a the lowercase city name of the node's data center.""" if self._datacenter_cache is None: self.populate_datacenter_cache() location = self._datacenter_cache[node.datacenter_id].location location = location.lower() location = location.split(",")[0] return location def add_node(self, node): """Adds an node to the inventory and index.""" if self.use_public_ip: dest = self.get_node_public_ip(node) else: dest = node.label # Add to index self.index[dest] = node.api_id # Inventory: Group by node ID (always a group of 1) self.inventory[node.api_id] = [dest] # Inventory: Group by datacenter city self.push(self.inventory, self.get_datacenter_city(node), dest) # Inventory: Group by display group self.push(self.inventory, node.display_group, dest) # Inventory: Add a "linode" global tag group self.push(self.inventory, "linode", dest) # Add host info to hostvars self.inventory["_meta"]["hostvars"][dest] = self._get_host_info(node) def get_node_public_ip(self, node): """Returns a the public IP address of the node""" return [addr.address for addr in node.ipaddresses if addr.is_public][0] def get_host_info(self): """Get variables about a specific host.""" if len(self.index) == 0: # Need to load index from cache self.load_index_from_cache() if self.args.host not in self.index: # try updating the cache self.do_api_calls_update_cache() if self.args.host not in self.index: # host might not exist anymore return self.json_format_dict({}, True) node_id = self.index[self.args.host] node = self.get_node(node_id) return self.json_format_dict(self._get_host_info(node), True) def _get_host_info(self, node): node_vars = {} for direct_attr in [ "api_id", "datacenter_id", "label", "display_group", "create_dt", "total_hd", "total_xfer", "total_ram", "status", "alert_cpu_enabled", "alert_cpu_threshold", "alert_diskio_enabled", "alert_diskio_threshold", "alert_bwin_enabled", "alert_bwin_threshold", "alert_bwout_enabled", "alert_bwout_threshold", "alert_bwquota_enabled", "alert_bwquota_threshold", "backup_weekly_daily", "backup_window", "watchdog" ]: node_vars[direct_attr] = getattr(node, direct_attr) node_vars["datacenter_city"] = self.get_datacenter_city(node) node_vars["public_ip"] = self.get_node_public_ip(node) # Set the SSH host information, so these inventory items can be used if # their labels aren't FQDNs node_vars['ansible_ssh_host'] = node_vars["public_ip"] node_vars['ansible_host'] = node_vars["public_ip"] private_ips = [addr.address for addr in node.ipaddresses if not addr.is_public] if private_ips: node_vars["private_ip"] = private_ips[0] return node_vars def push(self, my_dict, key, element): """Pushed an element onto an array that may not have been defined in the dict.""" if key in my_dict: my_dict[key].append(element) else: my_dict[key] = [element] def get_inventory_from_cache(self): """Reads the inventory from the cache file and returns it as a JSON object.""" cache = open(self.cache_path_cache, 'r') json_inventory = cache.read() return json_inventory def load_index_from_cache(self): """Reads the index from the cache file and sets self.index.""" cache = open(self.cache_path_index, 'r') json_index = cache.read() self.index = json.loads(json_index) def write_to_cache(self, data, filename): """Writes data in JSON format to a file.""" json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() def to_safe(self, word): """Escapes any characters that would be invalid in an ansible group name.""" return re.sub(r"[^A-Za-z0-9\-]", "_", word) def json_format_dict(self, data, pretty=False): """Converts a dict to a JSON object and dumps it as a formatted string.""" if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) LinodeInventory()
m0ose/node-firefly2
refs/heads/master
node_modules/node-gyp/gyp/pylib/gyp/generator/gypd.py
912
# Copyright (c) 2011 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """gypd output module This module produces gyp input as its output. Output files are given the .gypd extension to avoid overwriting the .gyp files that they are generated from. Internal references to .gyp files (such as those found in "dependencies" sections) are not adjusted to point to .gypd files instead; unlike other paths, which are relative to the .gyp or .gypd file, such paths are relative to the directory from which gyp was run to create the .gypd file. This generator module is intended to be a sample and a debugging aid, hence the "d" for "debug" in .gypd. It is useful to inspect the results of the various merges, expansions, and conditional evaluations performed by gyp and to see a representation of what would be fed to a generator module. It's not advisable to rename .gypd files produced by this module to .gyp, because they will have all merges, expansions, and evaluations already performed and the relevant constructs not present in the output; paths to dependencies may be wrong; and various sections that do not belong in .gyp files such as such as "included_files" and "*_excluded" will be present. Output will also be stripped of comments. This is not intended to be a general-purpose gyp pretty-printer; for that, you probably just want to run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip comments but won't do all of the other things done to this module's output. The specific formatting of the output generated by this module is subject to change. """ import gyp.common import errno import os import pprint # These variables should just be spit back out as variable references. _generator_identity_variables = [ 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX', 'INTERMEDIATE_DIR', 'PRODUCT_DIR', 'RULE_INPUT_ROOT', 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT', 'RULE_INPUT_NAME', 'RULE_INPUT_PATH', 'SHARED_INTERMEDIATE_DIR', ] # gypd doesn't define a default value for OS like many other generator # modules. Specify "-D OS=whatever" on the command line to provide a value. generator_default_variables = { } # gypd supports multiple toolsets generator_supports_multiple_toolsets = True # TODO(mark): This always uses <, which isn't right. The input module should # notify the generator to tell it which phase it is operating in, and this # module should use < for the early phase and then switch to > for the late # phase. Bonus points for carrying @ back into the output too. for v in _generator_identity_variables: generator_default_variables[v] = '<(%s)' % v def GenerateOutput(target_list, target_dicts, data, params): output_files = {} for qualified_target in target_list: [input_file, target] = \ gyp.common.ParseQualifiedTarget(qualified_target)[0:2] if input_file[-4:] != '.gyp': continue input_file_stem = input_file[:-4] output_file = input_file_stem + params['options'].suffix + '.gypd' if not output_file in output_files: output_files[output_file] = input_file for output_file, input_file in output_files.iteritems(): output = open(output_file, 'w') pprint.pprint(data[input_file], output) output.close()
asmateus/event_manager
refs/heads/master
server/manager/events/views.py
1
from django.utils.decorators import method_decorator from rest_framework.response import Response from rest_framework.views import APIView from rest_framework import status from . import services def heartbeat(request): pass class ServerStatusView(APIView): """ Server status, data timestamps, etc. """ def get(self, request, format=None): pass def post(self, request, format=None): pass class PersonView(APIView): """ Handles all Person related requests """ def get(self, request, format=None): pass def post(self, request, format=None): pass class LoginView(APIView): """ User login and initial token retreival """ def get(self, request, format=None): print(request.data) return Response({}, status=status.HTTP_200_OK) def post(self, request, format=None): print(request.data) return Response( {'token': 'hello, token here!'}, status=status.HTTP_200_OK ) class EventFormMetadata(APIView): """ Returns metadata for filling the event creation form. """ def get(self, request, format=None): metadata = services.extract_form_metadata() return Response(metadata, status=status.HTTP_200_OK) class ClientListView(APIView): """ Returns list of clients in the system. """ def get(self, request, format=None): clients = services.get_client_list() return Response(clients, status=status.HTTP_200_OK) class EventCreateView(APIView): """ Create an event in the system. """ def get(self, request, format=None): events = services.get_events() return Response(events, status=status.HTTP_200_OK) def post(self, request, format=None): created_event = services.create_event(request.data['event']) return Response({'event': created_event}, status=status.HTTP_200_OK) def patch(self, request, format=None): updated_event = services.update_event(request.data['event']) return Response({'event': updated_event}, status=status.HTTP_200_OK) class PayEventView(APIView): """ Pay an event. """ def post(self, request, format=None): event_id = request.data['event'] payer_name = request.data['name'] amount = request.data['amount'] payments = services.pay_event(event_id, payer_name, amount) return Response({'payments': payments}, status=status.HTTP_200_OK) def get(self, request, format=None): payments = services.get_payments(request.query_params['event']) return Response({'payments': payments}, status=status.HTTP_200_OK) class SearchInCalendarView(APIView): """ Search for events in calendar. Match with client name, client identification, or date. """ def get(self, request, format=None): term = request.query_params['term'] events, term_type, cast_term = services.get_events_by_term(term) return Response( { 'events': events, 'term': term, 'cast_term': cast_term, 'term_type': term_type }, status=status.HTTP_200_OK )
jonathonwalz/ansible
refs/heads/devel
lib/ansible/plugins/connection/libvirt_lxc.py
44
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> # Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com> # (c) 2013, Michael Scherer <misc@zarb.org> # (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import distutils.spawn import os import os.path import subprocess import traceback from ansible import constants as C from ansible.errors import AnsibleError from ansible.module_utils.six.moves import shlex_quote from ansible.module_utils._text import to_bytes from ansible.plugins.connection import ConnectionBase, BUFSIZE try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class Connection(ConnectionBase): ''' Local lxc based connections ''' transport = 'libvirt_lxc' has_pipelining = True # su currently has an undiagnosed issue with calculating the file # checksums (so copy, for instance, doesn't work right) # Have to look into that before re-enabling this become_methods = frozenset(C.BECOME_METHODS).difference(('su',)) def __init__(self, play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) self.lxc = self._play_context.remote_addr self.virsh = self._search_executable('virsh') self._check_domain(self.lxc) def _search_executable(self, executable): cmd = distutils.spawn.find_executable(executable) if not cmd: raise AnsibleError("%s command not found in PATH") % executable return cmd def _check_domain(self, domain): p = subprocess.Popen([self.virsh, '-q', '-c', 'lxc:///', 'dominfo', to_bytes(domain)], stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.communicate() if p.returncode: raise AnsibleError("%s is not a lxc defined in libvirt" % domain) def _connect(self): ''' connect to the lxc; nothing to do here ''' super(Connection, self)._connect() if not self._connected: display.vvv("THIS IS A LOCAL LXC DIR", host=self.lxc) self._connected = True def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): ''' run a command on the chroot. This is only needed for implementing put_file() get_file() so that we don't have to read the whole file into memory. compared to exec_command() it looses some niceties like being able to return the process's exit code immediately. ''' executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' local_cmd = [self.virsh, '-q', '-c', 'lxc:///', 'lxc-enter-namespace'] if C.DEFAULT_LIBVIRT_LXC_NOSECLABEL: local_cmd += ['--noseclabel'] local_cmd += [self.lxc, '--', executable, '-c', cmd] display.vvv("EXEC %s" % (local_cmd,), host=self.lxc) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return p def exec_command(self, cmd, in_data=None, sudoable=False): ''' run a command on the chroot ''' super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) p = self._buffered_exec_command(cmd) stdout, stderr = p.communicate(in_data) return (p.returncode, stdout, stderr) def _prefix_login_path(self, remote_path): ''' Make sure that we put files into a standard path If a path is relative, then we need to choose where to put it. ssh chooses $HOME but we aren't guaranteed that a home dir will exist in any given chroot. So for now we're choosing "/" instead. This also happens to be the former default. Can revisit using $HOME instead if it's a problem ''' if not remote_path.startswith(os.path.sep): remote_path = os.path.join(os.path.sep, remote_path) return os.path.normpath(remote_path) def put_file(self, in_path, out_path): ''' transfer a file from local to lxc ''' super(Connection, self).put_file(in_path, out_path) display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.lxc) out_path = shlex_quote(self._prefix_login_path(out_path)) try: with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file: try: p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file) except OSError: raise AnsibleError("chroot connection requires dd command in the chroot") try: stdout, stderr = p.communicate() except: traceback.print_exc() raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) if p.returncode != 0: raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) except IOError: raise AnsibleError("file or module does not exist at: %s" % in_path) def fetch_file(self, in_path, out_path): ''' fetch a file from lxc to local ''' super(Connection, self).fetch_file(in_path, out_path) display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.lxc) in_path = shlex_quote(self._prefix_login_path(in_path)) try: p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE)) except OSError: raise AnsibleError("chroot connection requires dd command in the chroot") with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file: try: chunk = p.stdout.read(BUFSIZE) while chunk: out_file.write(chunk) chunk = p.stdout.read(BUFSIZE) except: traceback.print_exc() raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) stdout, stderr = p.communicate() if p.returncode != 0: raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) def close(self): ''' terminate the connection; nothing to do here ''' super(Connection, self).close() self._connected = False
HaebinShin/tensorflow
refs/heads/master
tensorflow/contrib/learn/python/learn/coordinated_session.py
1
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Wrapper for a Session object that handles threads and recovery.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.learn.python.learn.wrapped_session import WrappedSession class CoordinatedSession(WrappedSession): """A wrapped session that works with a `tf.Coordinator`. Calls to `run()` are delegated to the wrapped session. If a call raises an exception, the exception is reported to the coordinator. In addition, after each call to `run()` this session ask the coordinator if the session should stop. In that case it will will join all the coordinated threads passed to the constructor before returning. If the coordinator was requested to stop with an exception, that exception will be re-raised from the call to `run()`. """ def __init__(self, sess, coord, coordinated_threads_to_join): """Create a new `CoordinatedSession`. Args: sess: A `tf.Session` object. The wrapped session. coord: A `tf.train.Coordinator` object. coordinated_threads_to_join: A list of threads. """ WrappedSession.__init__(self, sess) self._coord = coord self._coordinated_threads_to_join = coordinated_threads_to_join def _check_stop(self): # Check with the coordinator if we should stop. return self._coord.should_stop() def run(self, *args, **kwargs): try: return self._sess.run(*args, **kwargs) except Exception as e: # pylint: disable=broad-except self._coord.request_stop(e) if self._coord.should_stop(): self._coord.join(self._coordinated_threads_to_join) # TODO(touts): Add a close() method that also joins the coordinator # but does not raise exceptions. This can only be done reliably when the # Coordinator keeps a pointer to the coordinated threads, otherwise we do not # know which threads to join.
Pirata-Repository/Pirata
refs/heads/master
plugin.video.1channel/utils.py
3
""" 1Channel XBMC Addon Copyright (C) 2014 Bstrdsmkr, tknorris This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import os import re import sys import time import datetime import json import _strptime # fix bug in python import import xbmc import xbmcgui import xbmcplugin from addon.common.addon import Addon DAY_NUMS = list('0123456') DAY_CODES = ['M', 'T', 'W', 'H', 'F', 'Sa', 'Su'] _1CH = Addon('plugin.video.1channel') def enum(**enums): return type('Enum', (), enums) MODES = enum(SAVE_FAV='SaveFav', DEL_FAV='DeleteFav', GET_SOURCES='GetSources', PLAY_SOURCE='PlaySource', CH_WATCH='ChangeWatched', PLAY_TRAILER='PlayTrailer', SEARCH_QUERY='GetSearchQuery', DESC_QUERY='GetSearchQueryDesc', ADV_QUERY='GetSearchQueryAdvanced', SEARCH='Search', SEARCH_DESC='SearchDesc', SEARCH_ADV='SearchAdvanced', REMOTE_SEARCH='7000', MAIN='main', LIST_MENU='BrowseListMenu', AZ_MENU='BrowseAlphabetMenu', GENRE_MENU='BrowseByGenreMenu', FILTER_RESULTS='GetFilteredResults', SEASON_LIST='TVShowSeasonList', EPISODE_LIST='TVShowEpisodeList', BROWSE_FAVS='browse_favorites', BROWSE_FAVS_WEB='browse_favorites_website', MIG_FAVS='migrateFavs', FAV2LIB='fav2Library', BROWSE_W_WEB='browse_watched_website', ADD2LIB='add_to_library', ADD_SUB='add_subscription', CANCEL_SUB='cancel_subscription', MAN_UPD_SUBS='manual_update_subscriptions', UPD_SUBS='update_subscriptions', MAN_CLEAN_SUBS='manual_clean_up_subscriptions', CLEAN_SUBS='clean_up_subscriptions', MANAGE_SUBS='manage_subscriptions', PAGE_SELECT='PageSelect', FAV_PAGE_SELECT='FavPageSelect', WATCH_PAGE_SELECT='WatchedPageSelect', SEARCH_PAGE_SELECT='SearchPageSelect', EXPORT_DB='export_db', IMPORT_DB='import_db', BACKUP_DB='backup_db', EDIT_DAYS='edit_days', HELP='Help', FLUSH_CACHE='flush_cache', INSTALL_META='install_metapack', INSTALL_LOCAL_META='install_local_metapack', MOVIE_UPDATE='movie_update', SELECT_SOURCES='SelectSources', REFRESH_META='refresh_meta', META_SETTINGS='9988', RES_SETTINGS='ResolverSettings', TOGGLE_X_FAVS='toggle_xbmc_fav', PLAYLISTS_MENU='playlists_menu', BROWSE_PLAYLISTS='get_playlists', SHOW_PLAYLIST='show_playlist', PL_PAGE_SELECT='PLPageSelect', RM_FROM_PL='remove_from_playlist', ADD2PL='add_to_playlist', BROWSE_TW_WEB='browse_towatch_website', CH_TOWATCH_WEB='change_towatch_website', CH_WATCH_WEB='change_watched_website', MAN_UPD_TOWATCH='man_update_towatch', RESET_DB='reset_db', INSTALL_THEMES='install_themes', SHOW_SCHEDULE='show_schedule') SUB_TYPES = enum(PW_PL=0) hours_list={} hours_list[MODES.UPD_SUBS] = [2, 2] + range(2, 25) # avoid accidental runaway subscription updates hours_list[MODES.MOVIE_UPDATE] = [2, 5, 10, 15, 24] hours_list[MODES.BACKUP_DB] = [12, 24, 168, 720] def get_days_string_from_days(days): if days is None: days='' days_string='' fdow=int(_1CH.get_setting('first-dow')) adj_day_nums=DAY_NUMS[fdow:]+DAY_NUMS[:fdow] adj_day_codes=DAY_CODES[fdow:]+DAY_CODES[:fdow] all_days = ''.join(adj_day_codes) for i, day_num in enumerate(adj_day_nums): if day_num in days: days_string += adj_day_codes[i] if days_string==all_days: days_string='ALL' return days_string def get_days_from_days_string(days_string): if days_string is None: days_string='' days_string=days_string.upper() days='' if days_string=='ALL': days='0123456' else: for i, day in enumerate(DAY_CODES): if day.upper() in days_string: days += DAY_NUMS[i] return days def get_default_days(): def_days= ['0123456', '', '0246'] dow=datetime.datetime.now().weekday() def_days.append(str(dow)) def_days.append(str(dow)+str((dow+1)%7)) return def_days[int(_1CH.get_setting('sub-days'))] def format_label_tvshow(info): if 'premiered' in info: year = info['premiered'][:4] else: year = '' title = info['title'] label = _1CH.get_setting('format-tvshow') label = re.sub('\{t\}', title, label) label = re.sub('\{y\}', year, label) label = re.sub('\{ft\}', format_tvshow_title(title), label) label = re.sub('\{fy\}', format_tvshow_year(year), label) return label def format_tvshow_title(title): title_format = _1CH.get_setting('format-tvshow-title') label = re.sub('\{t\}', title, title_format) return label def format_tvshow_year(year): if not year: return '' year_format = _1CH.get_setting('format-tvshow-year') label = re.sub('\{y\}', year, year_format) return label def format_tvshow_episode(info): episode_format = _1CH.get_setting('format-tvshow-episode') label = re.sub('\{s\}', str(info['season']), episode_format) label = re.sub('\{e\}', str(info['episode']), label) label = re.sub('\{t\}', info['title'], label) label = re.sub('\{st\}', info['TVShowTitle'], label) return label def format_label_sub(info): sub_format = _1CH.get_setting('format-tvshow-sub') label = format_label_tvshow(info) formatted_label = re.sub('\{L\}', label, sub_format) return formatted_label def format_label_movie(info): if 'premiered' in info: year = info['premiered'][:4] else: year = '' label = _1CH.get_setting('format-movie') title = info['title'] label = re.sub('\{t\}', title, label) label = re.sub('\{y\}', year, label) label = re.sub('\{ft\}', format_movie_title(title), label) label = re.sub('\{fy\}', format_movie_year(year), label) return label def format_movie_title(title): title_format = _1CH.get_setting('format-movie-title') label = re.sub('\{t\}', title, title_format) return label def format_movie_year(year): if not year: return '' year_format = _1CH.get_setting('format-movie-year') label = re.sub('\{y\}', year, year_format) return label def format_label_source(info): label = _1CH.get_setting('format-source') label = re.sub('\{q\}', info['quality'], label) label = re.sub('\{h\}', info['host'], label) label = re.sub('\{v\}', str(info['views']), label) if info['multi-part']: parts = 'part 1' else: parts = '' label = re.sub('\{p\}', parts, label) if info['verified']: label = format_label_source_verified(label) return label def format_label_source_verified(label): ver_format = _1CH.get_setting('format-source-verified') formatted_label = re.sub('\{L\}', label, ver_format) return formatted_label def format_label_source_parts(info, part_num): label = _1CH.get_setting('format-source-parts') label = re.sub('\{q\}', info['quality'], label) label = re.sub('\{h\}', info['host'], label) label = re.sub('\{v\}', str(info['views']), label) parts = 'part %s' % part_num label = re.sub('\{p\}', parts, label) if info['verified']: label = format_label_source_verified(label) return label def has_upgraded(): old_version = _1CH.get_setting('old_version').split('.') new_version = _1CH.get_version().split('.') current_oct = 0 for octant in old_version: if int(new_version[current_oct]) > int(octant): log('New version found') return True current_oct += 1 return False def filename_from_title(title, video_type): if video_type == 'tvshow': filename = '%s S%sE%s.strm' filename = filename % (title, '%s', '%s') else: filename = '%s.strm' % title filename = re.sub(r'(?!%s)[^\w\-_\. ]', '_', filename) xbmc.makeLegalFilename(filename) return filename class TextBox: # constants WINDOW = 10147 CONTROL_LABEL = 1 CONTROL_TEXTBOX = 5 def __init__(self, *args, **kwargs): # activate the text viewer window xbmc.executebuiltin("ActivateWindow(%d)" % ( self.WINDOW, )) # get window self.win = xbmcgui.Window(self.WINDOW) # give window time to initialize xbmc.sleep(1000) self.setControls() def setControls(self): # set heading heading = "PrimeWire v%s" % (_1CH.get_version()) self.win.getControl(self.CONTROL_LABEL).setLabel(heading) # set text root = _1CH.get_path() faq_path = os.path.join(root, 'help.faq') f = open(faq_path) text = f.read() self.win.getControl(self.CONTROL_TEXTBOX).setText(text) def website_is_integrated(): enabled = _1CH.get_setting('site_enabled') == 'true' user = _1CH.get_setting('usename') is not None passwd = _1CH.get_setting('passwd') is not None return enabled and user and passwd def using_pl_subs(): return (website_is_integrated() and _1CH.get_setting('playlist-sub')) def get_subs_pl_url(): return '/playlists.php?id=%s' % (_1CH.get_setting('playlist-sub')) def rank_host(source): host = source['host'] ranking = _1CH.get_setting('host-rank').split(',') host = host.lower() for tier in ranking: tier = tier.lower() if host in tier.split('|'): return ranking.index(tier) + 1 return 1000 def refresh_meta(video_type, old_title, imdb, alt_id, year, new_title=''): from metahandler import metahandlers __metaget__ = metahandlers.MetaData() search_title = new_title if new_title else old_title if video_type in ['tvshow', 'episode']: api = metahandlers.TheTVDB() results = api.get_matching_shows(search_title) search_meta = [] for item in results: option = {'tvdb_id': item[0], 'title': item[1], 'imdb_id': item[2]} search_meta.append(option) else: search_meta = __metaget__.search_movies(search_title) log('search_meta: %s' % search_meta, xbmc.LOGDEBUG) option_list = ['Manual Search...'] if search_meta: for option in search_meta: if 'year' in option and option['year'] is not None: disptitle = '%s (%s)' % (option['title'], option['year']) else: disptitle = option['title'] option_list.append(disptitle) dialog = xbmcgui.Dialog() index = dialog.select('Choose', option_list) if index == 0: refresh_meta_manual(video_type, old_title, imdb, alt_id, year) elif index > -1: new_imdb_id = search_meta[index - 1]['imdb_id'] try: new_tmdb_id = search_meta[index - 1]['tmdb_id'] except: new_tmdb_id='' #Temporary workaround for metahandlers problem: #Error attempting to delete from cache table: no such column: year if video_type == 'tvshow': year = '' log(search_meta[index - 1], xbmc.LOGDEBUG) __metaget__.update_meta(video_type, old_title, imdb, year=year, new_imdb_id=new_imdb_id, new_tmdb_id=new_tmdb_id) xbmc.executebuiltin('Container.Refresh') def refresh_meta_manual(video_type, old_title, imdb, alt_id, year): keyboard = xbmc.Keyboard() if year: disptitle = '%s (%s)' % (old_title, year) else: disptitle = old_title keyboard.setHeading('Enter a title') keyboard.setDefault(disptitle) keyboard.doModal() if keyboard.isConfirmed(): search_string = keyboard.getText() refresh_meta(video_type, old_title, imdb, alt_id, year, search_string) def set_view(content, view_type): # set content type so library shows more views and info if content: xbmcplugin.setContent(int(sys.argv[1]), content) if _1CH.get_setting('auto-view') == 'true': view_mode = _1CH.get_setting(view_type) xbmc.executebuiltin("Container.SetViewMode(%s)" % view_mode) # set sort methods - probably we don't need all of them xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_UNSORTED) xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_LABEL) xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_VIDEO_RATING) xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_DATE) xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_PROGRAM_COUNT) xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_VIDEO_RUNTIME) xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_GENRE) def get_dir_size(start_path): print 'Calculating size of %s' % start_path total_size = 0 for dirpath, dirnames, filenames in os.walk(start_path): for each_file in filenames: fpath = os.path.join(dirpath, each_file) total_size += os.path.getsize(fpath) print 'Calculated: %s' % total_size return total_size def format_eta(seconds): minutes, seconds = divmod(seconds, 60) if minutes > 60: hours, minutes = divmod(minutes, 60) return "ETA: %02d:%02d:%02d " % (hours, minutes, seconds) else: return "ETA: %02d:%02d " % (minutes, seconds) def format_time(seconds): minutes, seconds = divmod(seconds, 60) if minutes > 60: hours, minutes = divmod(minutes, 60) return "%02d:%02d:%02d" % (hours, minutes, seconds) else: return "%02d:%02d" % (minutes, seconds) def filename_filter_out_year(name=''): try: years=re.compile(' \((\d+)\)').findall('__'+name+'__') for year in years: name=name.replace(' ('+year+')','') name=name.replace('[B]','').replace('[/B]','').replace('[/COLOR]','').replace('[COLOR green]','') name=name.strip() return name except: name.strip(); return name def unpack_query(query): expected_keys = ('title','tag','country','genre','actor','director','year','month','decade') criteria=json.loads(query) for key in expected_keys: if key not in criteria: criteria[key]= '' return criteria def get_xbmc_fav_urls(): xbmc_favs=get_xbmc_favs() fav_urls=[] for fav in xbmc_favs: if 'path' in fav: fav_url=fav['path'] elif 'windowparameter' in fav: fav_url=fav['windowparameter'] else: continue fav_urls.append(fav_url) return fav_urls def in_xbmc_favs(url, fav_urls, ignore_dialog=True): if ignore_dialog: fav_urls = (fav_url.replace('&dialog=True','').replace('&dialog=False','') for fav_url in fav_urls) if url in fav_urls: return True else: return False def get_xbmc_favs(): favs=[] cmd = '{"jsonrpc": "2.0", "method": "Favourites.GetFavourites", "params": {"type": null, "properties": ["path", "windowparameter"]}, "id": 1}' result = xbmc.executeJSONRPC(cmd) result=json.loads(result) if 'error' not in result: if result['result']['favourites'] is not None: for fav in result['result']['favourites']: favs.append(fav) else: log('Failed to get XBMC Favourites: %s' % (result['error']['message']), xbmc.LOGERROR) return favs # Run a task on startup. Settings and mode values must match task name def do_startup_task(task): run_on_startup=_1CH.get_setting('auto-%s' % task)=='true' and _1CH.get_setting('%s-during-startup' % task) == 'true' if run_on_startup and not xbmc.abortRequested: log('Service: Running startup task [%s]' % (task)) now = datetime.datetime.now() xbmc.executebuiltin('RunPlugin(plugin://plugin.video.1channel/?mode=%s)' % (task)) _1CH.set_setting('%s-last_run' % (task), now.strftime("%Y-%m-%d %H:%M:%S.%f")) # Run a recurring scheduled task. Settings and mode values must match task name def do_scheduled_task(task, isPlaying): now = datetime.datetime.now() if _1CH.get_setting('auto-%s' % task) == 'true': next_run=get_next_run(task) #log("Update Status on [%s]: Currently: %s Will Run: %s" % (task, now, next_run)) if now >= next_run: is_scanning = xbmc.getCondVisibility('Library.IsScanningVideo') if not is_scanning: during_playback = _1CH.get_setting('%s-during-playback' % (task))=='true' if during_playback or not isPlaying: log('Service: Running Scheduled Task: [%s]' % (task)) builtin = 'RunPlugin(plugin://plugin.video.1channel/?mode=%s)' % (task) xbmc.executebuiltin(builtin) _1CH.set_setting('%s-last_run' % task, now.strftime("%Y-%m-%d %H:%M:%S.%f")) else: log('Service: Playing... Busy... Postponing [%s]' % (task), xbmc.LOGDEBUG) else: log('Service: Scanning... Busy... Postponing [%s]' % (task), xbmc.LOGDEBUG) def get_next_run(task): # strptime mysteriously fails sometimes with TypeError; this is a hacky workaround # note, they aren't 100% equal as time.strptime loses fractional seconds but they are close enough try: last_run=datetime.datetime.strptime(_1CH.get_setting(task+'-last_run'), "%Y-%m-%d %H:%M:%S.%f") except TypeError: last_run=datetime.datetime(*(time.strptime(_1CH.get_setting(task+'-last_run'), '%Y-%m-%d %H:%M:%S.%f')[0:6])) interval=datetime.timedelta(hours=hours_list[MODES.UPD_SUBS][int(_1CH.get_setting(task+'-interval'))]) return (last_run+interval) def log(msg, level=xbmc.LOGNOTICE): # override message level to force logging when addon logging turned on if _1CH.get_setting('addon_debug')=='true' and level==xbmc.LOGDEBUG: level=xbmc.LOGNOTICE try: _1CH.log(msg, level) except: try: xbmc.log('Logging Failure', level) except: pass # just give up
robovm/robovm-studio
refs/heads/master
python/testData/resolve/DocStringClass.py
83
class Foo: "Docstring of class Foo" pass Foo._<ref>_doc__
rohitwaghchaure/erpnext-receipher
refs/heads/master
erpnext/patches/v4_2/repost_stock_reconciliation.py
120
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe import json def execute(): existing_allow_negative_stock = frappe.db.get_value("Stock Settings", None, "allow_negative_stock") frappe.db.set_value("Stock Settings", None, "allow_negative_stock", 1) head_row = ["Item Code", "Warehouse", "Quantity", "Valuation Rate"] stock_reco_to_be_reposted = [] for d in frappe.db.sql("""select name, reconciliation_json from `tabStock Reconciliation` where docstatus=1 and creation > '2014-03-01'""", as_dict=1): data = json.loads(d.reconciliation_json) for row in data[data.index(head_row)+1:]: if row[3] in ["", None]: stock_reco_to_be_reposted.append(d.name) break for dn in stock_reco_to_be_reposted: reco = frappe.get_doc("Stock Reconciliation", dn) reco.docstatus = 2 reco.on_cancel() reco.docstatus = 1 reco.validate() reco.on_submit() frappe.db.set_value("Stock Settings", None, "allow_negative_stock", existing_allow_negative_stock)
arjan/starpy
refs/heads/master
starpy/utilapplication.py
1
"""Class providing utility applications with common support code""" from basicproperty import common, propertied, basic, weak from ConfigParser import ConfigParser from starpy import fastagi, manager from twisted.internet import defer, reactor import logging,os log = logging.getLogger( 'app' ) class UtilApplication( propertied.Propertied ): """Utility class providing simple application-level operations FastAGI entry points are waitForCallOn and handleCallsFor, which allow for one-shot and permanant handling of calls for an extension (respectively), and agiSpecifier, which is loaded from configuration file (as specified in self.configFiles). """ amiSpecifier = basic.BasicProperty( "amiSpecifier", """AMI connection specifier for the application see AMISpecifier""", defaultFunction = lambda prop,client: AMISpecifier() ) agiSpecifier = basic.BasicProperty( "agiSpecifier", """FastAGI server specifier for the application see AGISpecifier""", defaultFunction = lambda prop,client: AGISpecifier() ) extensionWaiters = common.DictionaryProperty( "extensionWaiters", """Set of deferreds waiting for incoming extensions""", ) extensionHandlers = common.DictionaryProperty( "extensionHandlers", """Set of permanant callbacks waiting for incoming extensions""", ) configFiles = configFiles=('starpy.conf','~/.starpy.conf') def __init__( self ): """Initialise the application from options in configFile""" self.loadConfigurations() def loadConfigurations( self ): parser = self._loadConfigFiles( self.configFiles ) self._copyPropertiesFrom( parser, 'AMI', self.amiSpecifier ) self._copyPropertiesFrom( parser, 'FastAGI', self.agiSpecifier ) return parser def _loadConfigFiles( self, configFiles ): """Load options from configuration files given (if present)""" parser = ConfigParser( ) filenames = [ os.path.abspath( os.path.expandvars( os.path.expanduser( file ) )) for file in configFiles ] log.info( "Possible configuration files:\n\t%s", "\n\t".join(filenames) or None) filenames = [ file for file in filenames if os.path.isfile(file) ] log.info( "Actual configuration files:\n\t%s", "\n\t".join(filenames) or None) parser.read( filenames ) return parser def _copyPropertiesFrom( self, parser, section, client, properties=None ): """Copy properties from the config-parser's given section into client""" if properties is None: properties = client.getProperties() for property in properties: if parser.has_option( section, property.name ): try: value = parser.get( section, property.name ) setattr( client, property.name, value ) except (TypeError,ValueError,AttributeError,NameError), err: log( """Unable to set property %r of %r to config-file value %r: %s"""%( property.name, client, parser.get( section, property.name, 1), err, )) return client def dispatchIncomingCall( self, agi ): """Handle an incoming call (dispatch to the appropriate registered handler)""" extension = agi.variables['agi_extension'] log.info( """AGI connection with extension: %r""", extension ) try: df = self.extensionWaiters.pop( extension ) except KeyError, err: try: callback = self.extensionHandlers[ extension ] except KeyError, err: try: callback = self.extensionHandlers[ None ] except KeyError, err: log.warn( """Unexpected connection to extension %r: %s""", extension, agi.variables ) agi.finish() return try: return callback( agi ) except Exception, err: log.error( """Failure during callback %s for agi %s: %s""", callback, agi.variables, err ) # XXX return a -1 here else: if not df.called: df.callback( agi ) def waitForCallOn( self, extension, timeout=15 ): """Wait for an AGI call on extension given extension -- string extension for which to wait timeout -- duration in seconds to wait before defer.TimeoutError is returned to the deferred. Note that waiting callback overrides any registered handler; that is, if you register one callback with waitForCallOn and another with handleCallsFor, the first incoming call will trigger the waitForCallOn handler. returns deferred returning connected FastAGIProtocol or an error """ extension = str(extension) log.info( 'Waiting for extension %r for %s seconds', extension, timeout ) df = defer.Deferred( ) self.extensionWaiters[ extension ] = df def onTimeout( ): if not df.called: df.errback( defer.TimeoutError( """Timeout waiting for call on extension: %r"""%(extension,) )) reactor.callLater( timeout, onTimeout ) return df def handleCallsFor( self, extension, callback ): """Register permanant handler for given extension extension -- string extension for which to wait or None to define a default handler (that chosen if there is not explicit handler or waiter) callback -- callback function to be called for each incoming channel to the given extension. Note that waiting callback overrides any registered handler; that is, if you register one callback with waitForCallOn and another with handleCallsFor, the first incoming call will trigger the waitForCallOn handler. returns None """ if extension is not None: extension = str(extension) self.extensionHandlers[ extension ] = callback class AMISpecifier( propertied.Propertied ): """Manager interface setup/specifier""" username = common.StringLocaleProperty( "username", """Login username for the manager interface""", ) secret = common.StringLocaleProperty( "secret", """Login secret for the manager interface""", ) password = secret server = common.StringLocaleProperty( "server", """Server IP address to which to connect""", defaultValue = '127.0.0.1', ) port = common.IntegerProperty( "port", """Server IP port to which to connect""", defaultValue = 5038, ) timeout = common.FloatProperty( "timeout", """Timeout in seconds for an AMI connection timeout""", defaultValue = 5.0, ) def login( self ): """Login to the specified manager via the AMI""" theManager = manager.AMIFactory(self.username, self.secret) return theManager.login(self.server, self.port, timeout=self.timeout) class AGISpecifier( propertied.Propertied ): """Specifier of where we send the user to connect to our AGI""" port = common.IntegerProperty( "port", """IP port on which to listen""", defaultValue = 4573, ) interface = common.StringLocaleProperty( "interface", """IP interface on which to listen (local only by default)""", defaultValue = '127.0.0.1', ) context = common.StringLocaleProperty( "context", """Asterisk context to which to connect incoming calls""", defaultValue = 'survey', ) def run( self, mainFunction ): """Start up the AGI server with the given mainFunction""" f = fastagi.FastAGIFactory(mainFunction) return reactor.listenTCP(self.port, f, 50, self.interface)
RichardLitt/wyrd-django-dev
refs/heads/master
django/db/models/base.py
4
from __future__ import unicode_literals import copy import sys from functools import update_wrapper from django.utils.six.moves import zip import django.db.models.manager # Imported to register signal handler. from django.conf import settings from django.core.exceptions import (ObjectDoesNotExist, MultipleObjectsReturned, FieldError, ValidationError, NON_FIELD_ERRORS) from django.core import validators from django.db.models.fields import AutoField, FieldDoesNotExist from django.db.models.fields.related import (ManyToOneRel, OneToOneField, add_lazy_relation) from django.db import (router, transaction, DatabaseError, DEFAULT_DB_ALIAS) from django.db.models.query import Q from django.db.models.query_utils import DeferredAttribute, deferred_class_factory from django.db.models.deletion import Collector from django.db.models.options import Options from django.db.models import signals from django.db.models.loading import register_models, get_model from django.utils.translation import ugettext_lazy as _ from django.utils.functional import curry from django.utils.encoding import force_str, force_text from django.utils import six from django.utils.text import get_text_list, capfirst def subclass_exception(name, parents, module, attached_to=None): """ Create exception subclass. Used by ModelBase below. If 'attached_to' is supplied, the exception will be created in a way that allows it to be pickled, assuming the returned exception class will be added as an attribute to the 'attached_to' class. """ class_dict = {'__module__': module} if attached_to is not None: def __reduce__(self): # Exceptions are special - they've got state that isn't # in self.__dict__. We assume it is all in self.args. return (unpickle_inner_exception, (attached_to, name), self.args) def __setstate__(self, args): self.args = args class_dict['__reduce__'] = __reduce__ class_dict['__setstate__'] = __setstate__ return type(name, parents, class_dict) class ModelBase(type): """ Metaclass for all models. """ def __new__(cls, name, bases, attrs): super_new = super(ModelBase, cls).__new__ # six.with_metaclass() inserts an extra class called 'NewBase' in the # inheritance tree: Model -> NewBase -> object. Ignore this class. parents = [b for b in bases if isinstance(b, ModelBase) and not (b.__name__ == 'NewBase' and b.__mro__ == (b, object))] if not parents: # If this isn't a subclass of Model, don't do anything special. return super_new(cls, name, bases, attrs) # Create the class. module = attrs.pop('__module__') new_class = super_new(cls, name, bases, {'__module__': module}) attr_meta = attrs.pop('Meta', None) abstract = getattr(attr_meta, 'abstract', False) if not attr_meta: meta = getattr(new_class, 'Meta', None) else: meta = attr_meta base_meta = getattr(new_class, '_meta', None) if getattr(meta, 'app_label', None) is None: # Figure out the app_label by looking one level up. # For 'django.contrib.sites.models', this would be 'sites'. model_module = sys.modules[new_class.__module__] kwargs = {"app_label": model_module.__name__.split('.')[-2]} else: kwargs = {} new_class.add_to_class('_meta', Options(meta, **kwargs)) if not abstract: new_class.add_to_class('DoesNotExist', subclass_exception(str('DoesNotExist'), tuple(x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract) or (ObjectDoesNotExist,), module, attached_to=new_class)) new_class.add_to_class('MultipleObjectsReturned', subclass_exception(str('MultipleObjectsReturned'), tuple(x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract) or (MultipleObjectsReturned,), module, attached_to=new_class)) if base_meta and not base_meta.abstract: # Non-abstract child classes inherit some attributes from their # non-abstract parent (unless an ABC comes before it in the # method resolution order). if not hasattr(meta, 'ordering'): new_class._meta.ordering = base_meta.ordering if not hasattr(meta, 'get_latest_by'): new_class._meta.get_latest_by = base_meta.get_latest_by is_proxy = new_class._meta.proxy # If the model is a proxy, ensure that the base class # hasn't been swapped out. if is_proxy and base_meta and base_meta.swapped: raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped)) if getattr(new_class, '_default_manager', None): if not is_proxy: # Multi-table inheritance doesn't inherit default manager from # parents. new_class._default_manager = None new_class._base_manager = None else: # Proxy classes do inherit parent's default manager, if none is # set explicitly. new_class._default_manager = new_class._default_manager._copy_to_model(new_class) new_class._base_manager = new_class._base_manager._copy_to_model(new_class) # Bail out early if we have already created this class. m = get_model(new_class._meta.app_label, name, seed_cache=False, only_installed=False) if m is not None: return m # Add all attributes to the class. for obj_name, obj in attrs.items(): new_class.add_to_class(obj_name, obj) # All the fields of any type declared on this model new_fields = new_class._meta.local_fields + \ new_class._meta.local_many_to_many + \ new_class._meta.virtual_fields field_names = set([f.name for f in new_fields]) # Basic setup for proxy models. if is_proxy: base = None for parent in [cls for cls in parents if hasattr(cls, '_meta')]: if parent._meta.abstract: if parent._meta.fields: raise TypeError("Abstract base class containing model fields not permitted for proxy model '%s'." % name) else: continue if base is not None: raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name) else: base = parent if base is None: raise TypeError("Proxy model '%s' has no non-abstract model base class." % name) if (new_class._meta.local_fields or new_class._meta.local_many_to_many): raise FieldError("Proxy model '%s' contains model fields." % name) new_class._meta.setup_proxy(base) new_class._meta.concrete_model = base._meta.concrete_model else: new_class._meta.concrete_model = new_class # Do the appropriate setup for any model parents. o2o_map = dict([(f.rel.to, f) for f in new_class._meta.local_fields if isinstance(f, OneToOneField)]) for base in parents: original_base = base if not hasattr(base, '_meta'): # Things without _meta aren't functional models, so they're # uninteresting parents. continue parent_fields = base._meta.local_fields + base._meta.local_many_to_many # Check for clashes between locally declared fields and those # on the base classes (we cannot handle shadowed fields at the # moment). for field in parent_fields: if field.name in field_names: raise FieldError('Local field %r in class %r clashes ' 'with field of similar name from ' 'base class %r' % (field.name, name, base.__name__)) if not base._meta.abstract: # Concrete classes... base = base._meta.concrete_model if base in o2o_map: field = o2o_map[base] elif not is_proxy: attr_name = '%s_ptr' % base._meta.module_name field = OneToOneField(base, name=attr_name, auto_created=True, parent_link=True) new_class.add_to_class(attr_name, field) else: field = None new_class._meta.parents[base] = field else: # .. and abstract ones. for field in parent_fields: new_class.add_to_class(field.name, copy.deepcopy(field)) # Pass any non-abstract parent classes onto child. new_class._meta.parents.update(base._meta.parents) # Inherit managers from the abstract base classes. new_class.copy_managers(base._meta.abstract_managers) # Proxy models inherit the non-abstract managers from their base, # unless they have redefined any of them. if is_proxy: new_class.copy_managers(original_base._meta.concrete_managers) # Inherit virtual fields (like GenericForeignKey) from the parent # class for field in base._meta.virtual_fields: if base._meta.abstract and field.name in field_names: raise FieldError('Local field %r in class %r clashes '\ 'with field of similar name from '\ 'abstract base class %r' % \ (field.name, name, base.__name__)) new_class.add_to_class(field.name, copy.deepcopy(field)) if abstract: # Abstract base models can't be instantiated and don't appear in # the list of models for an app. We do the final setup for them a # little differently from normal models. attr_meta.abstract = False new_class.Meta = attr_meta return new_class new_class._prepare() register_models(new_class._meta.app_label, new_class) # Because of the way imports happen (recursively), we may or may not be # the first time this model tries to register with the framework. There # should only be one class for each model, so we always return the # registered version. return get_model(new_class._meta.app_label, name, seed_cache=False, only_installed=False) def copy_managers(cls, base_managers): # This is in-place sorting of an Options attribute, but that's fine. base_managers.sort() for _, mgr_name, manager in base_managers: val = getattr(cls, mgr_name, None) if not val or val is manager: new_manager = manager._copy_to_model(cls) cls.add_to_class(mgr_name, new_manager) def add_to_class(cls, name, value): if hasattr(value, 'contribute_to_class'): value.contribute_to_class(cls, name) else: setattr(cls, name, value) def _prepare(cls): """ Creates some methods once self._meta has been populated. """ opts = cls._meta opts._prepare(cls) if opts.order_with_respect_to: cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True) cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False) # defer creating accessors on the foreign class until we are # certain it has been created def make_foreign_order_accessors(field, model, cls): setattr( field.rel.to, 'get_%s_order' % cls.__name__.lower(), curry(method_get_order, cls) ) setattr( field.rel.to, 'set_%s_order' % cls.__name__.lower(), curry(method_set_order, cls) ) add_lazy_relation( cls, opts.order_with_respect_to, opts.order_with_respect_to.rel.to, make_foreign_order_accessors ) # Give the class a docstring -- its definition. if cls.__doc__ is None: cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join([f.attname for f in opts.fields])) if hasattr(cls, 'get_absolute_url'): cls.get_absolute_url = update_wrapper(curry(get_absolute_url, opts, cls.get_absolute_url), cls.get_absolute_url) signals.class_prepared.send(sender=cls) class ModelState(object): """ A class for storing instance state """ def __init__(self, db=None): self.db = db # If true, uniqueness validation checks will consider this a new, as-yet-unsaved object. # Necessary for correct validation of new instances of objects with explicit (non-auto) PKs. # This impacts validation only; it has no effect on the actual save. self.adding = True class Model(six.with_metaclass(ModelBase, object)): _deferred = False def __init__(self, *args, **kwargs): signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs) # Set up the storage for instance state self._state = ModelState() # There is a rather weird disparity here; if kwargs, it's set, then args # overrides it. It should be one or the other; don't duplicate the work # The reason for the kwargs check is that standard iterator passes in by # args, and instantiation for iteration is 33% faster. args_len = len(args) if args_len > len(self._meta.fields): # Daft, but matches old exception sans the err msg. raise IndexError("Number of args exceeds number of fields") fields_iter = iter(self._meta.fields) if not kwargs: # The ordering of the zip calls matter - zip throws StopIteration # when an iter throws it. So if the first iter throws it, the second # is *not* consumed. We rely on this, so don't change the order # without changing the logic. for val, field in zip(args, fields_iter): setattr(self, field.attname, val) else: # Slower, kwargs-ready version. for val, field in zip(args, fields_iter): setattr(self, field.attname, val) kwargs.pop(field.name, None) # Maintain compatibility with existing calls. if isinstance(field.rel, ManyToOneRel): kwargs.pop(field.attname, None) # Now we're left with the unprocessed fields that *must* come from # keywords, or default. for field in fields_iter: is_related_object = False # This slightly odd construct is so that we can access any # data-descriptor object (DeferredAttribute) without triggering its # __get__ method. if (field.attname not in kwargs and isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)): # This field will be populated on request. continue if kwargs: if isinstance(field.rel, ManyToOneRel): try: # Assume object instance was passed in. rel_obj = kwargs.pop(field.name) is_related_object = True except KeyError: try: # Object instance wasn't passed in -- must be an ID. val = kwargs.pop(field.attname) except KeyError: val = field.get_default() else: # Object instance was passed in. Special case: You can # pass in "None" for related objects if it's allowed. if rel_obj is None and field.null: val = None else: try: val = kwargs.pop(field.attname) except KeyError: # This is done with an exception rather than the # default argument on pop because we don't want # get_default() to be evaluated, and then not used. # Refs #12057. val = field.get_default() else: val = field.get_default() if is_related_object: # If we are passed a related instance, set it using the # field.name instead of field.attname (e.g. "user" instead of # "user_id") so that the object gets properly cached (and type # checked) by the RelatedObjectDescriptor. setattr(self, field.name, rel_obj) else: setattr(self, field.attname, val) if kwargs: for prop in list(kwargs): try: if isinstance(getattr(self.__class__, prop), property): setattr(self, prop, kwargs.pop(prop)) except AttributeError: pass if kwargs: raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0]) super(Model, self).__init__() signals.post_init.send(sender=self.__class__, instance=self) def __repr__(self): try: u = six.text_type(self) except (UnicodeEncodeError, UnicodeDecodeError): u = '[Bad Unicode data]' return force_str('<%s: %s>' % (self.__class__.__name__, u)) def __str__(self): if not six.PY3 and hasattr(self, '__unicode__'): return force_text(self).encode('utf-8') return '%s object' % self.__class__.__name__ def __eq__(self, other): return isinstance(other, self.__class__) and self._get_pk_val() == other._get_pk_val() def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self._get_pk_val()) def __reduce__(self): """ Provides pickling support. Normally, this just dispatches to Python's standard handling. However, for models with deferred field loading, we need to do things manually, as they're dynamically created classes and only module-level classes can be pickled by the default path. """ if not self._deferred: return super(Model, self).__reduce__() data = self.__dict__ defers = [] for field in self._meta.fields: if isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute): defers.append(field.attname) model = self._meta.proxy_for_model return (model_unpickle, (model, defers), data) def _get_pk_val(self, meta=None): if not meta: meta = self._meta return getattr(self, meta.pk.attname) def _set_pk_val(self, value): return setattr(self, self._meta.pk.attname, value) pk = property(_get_pk_val, _set_pk_val) def serializable_value(self, field_name): """ Returns the value of the field name for this instance. If the field is a foreign key, returns the id value, instead of the object. If there's no Field object with this name on the model, the model attribute's value is returned directly. Used to serialize a field's value (in the serializer, or form output, for example). Normally, you would just access the attribute directly and not use this method. """ try: field = self._meta.get_field_by_name(field_name)[0] except FieldDoesNotExist: return getattr(self, field_name) return getattr(self, field.attname) def save(self, force_insert=False, force_update=False, using=None, update_fields=None): """ Saves the current instance. Override this in a subclass if you want to control the saving process. The 'force_insert' and 'force_update' parameters can be used to insist that the "save" must be an SQL insert or update (or equivalent for non-SQL backends), respectively. Normally, they should not be set. """ using = using or router.db_for_write(self.__class__, instance=self) if force_insert and (force_update or update_fields): raise ValueError("Cannot force both insert and updating in model saving.") if update_fields is not None: # If update_fields is empty, skip the save. We do also check for # no-op saves later on for inheritance cases. This bailout is # still needed for skipping signal sending. if len(update_fields) == 0: return update_fields = frozenset(update_fields) field_names = set() for field in self._meta.fields: if not field.primary_key: field_names.add(field.name) if field.name != field.attname: field_names.add(field.attname) non_model_fields = update_fields.difference(field_names) if non_model_fields: raise ValueError("The following fields do not exist in this " "model or are m2m fields: %s" % ', '.join(non_model_fields)) # If saving to the same database, and this model is deferred, then # automatically do a "update_fields" save on the loaded fields. elif not force_insert and self._deferred and using == self._state.db: field_names = set() for field in self._meta.fields: if not field.primary_key and not hasattr(field, 'through'): field_names.add(field.attname) deferred_fields = [ f.attname for f in self._meta.fields if f.attname not in self.__dict__ and isinstance(self.__class__.__dict__[f.attname], DeferredAttribute)] loaded_fields = field_names.difference(deferred_fields) if loaded_fields: update_fields = frozenset(loaded_fields) self.save_base(using=using, force_insert=force_insert, force_update=force_update, update_fields=update_fields) save.alters_data = True def save_base(self, raw=False, cls=None, origin=None, force_insert=False, force_update=False, using=None, update_fields=None): """ Does the heavy-lifting involved in saving. Subclasses shouldn't need to override this method. It's separate from save() in order to hide the need for overrides of save() to pass around internal-only parameters ('raw', 'cls', and 'origin'). """ using = using or router.db_for_write(self.__class__, instance=self) assert not (force_insert and (force_update or update_fields)) assert update_fields is None or len(update_fields) > 0 if cls is None: cls = self.__class__ meta = cls._meta if not meta.proxy: origin = cls else: meta = cls._meta if origin and not meta.auto_created: signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using, update_fields=update_fields) # If we are in a raw save, save the object exactly as presented. # That means that we don't try to be smart about saving attributes # that might have come from the parent class - we just save the # attributes we have been given to the class we have been given. # We also go through this process to defer the save of proxy objects # to their actual underlying model. if not raw or meta.proxy: if meta.proxy: org = cls else: org = None for parent, field in meta.parents.items(): # At this point, parent's primary key field may be unknown # (for example, from administration form which doesn't fill # this field). If so, fill it. if field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None: setattr(self, parent._meta.pk.attname, getattr(self, field.attname)) self.save_base(cls=parent, origin=org, using=using, update_fields=update_fields) if field: setattr(self, field.attname, self._get_pk_val(parent._meta)) if meta.proxy: return if not meta.proxy: non_pks = [f for f in meta.local_fields if not f.primary_key] if update_fields: non_pks = [f for f in non_pks if f.name in update_fields or f.attname in update_fields] # First, try an UPDATE. If that doesn't update anything, do an INSERT. pk_val = self._get_pk_val(meta) pk_set = pk_val is not None record_exists = True manager = cls._base_manager if pk_set: # Determine if we should do an update (pk already exists, forced update, # no force_insert) if ((force_update or update_fields) or (not force_insert and manager.using(using).filter(pk=pk_val).exists())): if force_update or non_pks: values = [(f, None, (raw and getattr(self, f.attname) or f.pre_save(self, False))) for f in non_pks] if values: rows = manager.using(using).filter(pk=pk_val)._update(values) if force_update and not rows: raise DatabaseError("Forced update did not affect any rows.") if update_fields and not rows: raise DatabaseError("Save with update_fields did not affect any rows.") else: record_exists = False if not pk_set or not record_exists: if meta.order_with_respect_to: # If this is a model with an order_with_respect_to # autopopulate the _order field field = meta.order_with_respect_to order_value = manager.using(using).filter(**{field.name: getattr(self, field.attname)}).count() self._order = order_value fields = meta.local_fields if not pk_set: if force_update or update_fields: raise ValueError("Cannot force an update in save() with no primary key.") fields = [f for f in fields if not isinstance(f, AutoField)] record_exists = False update_pk = bool(meta.has_auto_field and not pk_set) result = manager._insert([self], fields=fields, return_id=update_pk, using=using, raw=raw) if update_pk: setattr(self, meta.pk.attname, result) transaction.commit_unless_managed(using=using) # Store the database on which the object was saved self._state.db = using # Once saved, this is no longer a to-be-added instance. self._state.adding = False # Signal that the save is complete if origin and not meta.auto_created: signals.post_save.send(sender=origin, instance=self, created=(not record_exists), update_fields=update_fields, raw=raw, using=using) save_base.alters_data = True def delete(self, using=None): using = using or router.db_for_write(self.__class__, instance=self) assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname) collector = Collector(using=using) collector.collect([self]) collector.delete() delete.alters_data = True def _get_FIELD_display(self, field): value = getattr(self, field.attname) return force_text(dict(field.flatchoices).get(value, value), strings_only=True) def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs): if not self.pk: raise ValueError("get_next/get_previous cannot be used on unsaved objects.") op = is_next and 'gt' or 'lt' order = not is_next and '-' or '' param = force_text(getattr(self, field.attname)) q = Q(**{'%s__%s' % (field.name, op): param}) q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk}) qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by('%s%s' % (order, field.name), '%spk' % order) try: return qs[0] except IndexError: raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name) def _get_next_or_previous_in_order(self, is_next): cachename = "__%s_order_cache" % is_next if not hasattr(self, cachename): op = is_next and 'gt' or 'lt' order = not is_next and '-_order' or '_order' order_field = self._meta.order_with_respect_to obj = self._default_manager.filter(**{ order_field.name: getattr(self, order_field.attname) }).filter(**{ '_order__%s' % op: self._default_manager.values('_order').filter(**{ self._meta.pk.name: self.pk }) }).order_by(order)[:1].get() setattr(self, cachename, obj) return getattr(self, cachename) def prepare_database_save(self, unused): return self.pk def clean(self): """ Hook for doing any extra model-wide validation after clean() has been called on every field by self.clean_fields. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field defined by NON_FIELD_ERRORS. """ pass def validate_unique(self, exclude=None): """ Checks unique constraints on the model and raises ``ValidationError`` if any failed. """ unique_checks, date_checks = self._get_unique_checks(exclude=exclude) errors = self._perform_unique_checks(unique_checks) date_errors = self._perform_date_checks(date_checks) for k, v in date_errors.items(): errors.setdefault(k, []).extend(v) if errors: raise ValidationError(errors) def _get_unique_checks(self, exclude=None): """ Gather a list of checks to perform. Since validate_unique could be called from a ModelForm, some fields may have been excluded; we can't perform a unique check on a model that is missing fields involved in that check. Fields that did not validate should also be excluded, but they need to be passed in via the exclude argument. """ if exclude is None: exclude = [] unique_checks = [] unique_togethers = [(self.__class__, self._meta.unique_together)] for parent_class in self._meta.parents.keys(): if parent_class._meta.unique_together: unique_togethers.append((parent_class, parent_class._meta.unique_together)) for model_class, unique_together in unique_togethers: for check in unique_together: for name in check: # If this is an excluded field, don't add this check. if name in exclude: break else: unique_checks.append((model_class, tuple(check))) # These are checks for the unique_for_<date/year/month>. date_checks = [] # Gather a list of checks for fields declared as unique and add them to # the list of checks. fields_with_class = [(self.__class__, self._meta.local_fields)] for parent_class in self._meta.parents.keys(): fields_with_class.append((parent_class, parent_class._meta.local_fields)) for model_class, fields in fields_with_class: for f in fields: name = f.name if name in exclude: continue if f.unique: unique_checks.append((model_class, (name,))) if f.unique_for_date and f.unique_for_date not in exclude: date_checks.append((model_class, 'date', name, f.unique_for_date)) if f.unique_for_year and f.unique_for_year not in exclude: date_checks.append((model_class, 'year', name, f.unique_for_year)) if f.unique_for_month and f.unique_for_month not in exclude: date_checks.append((model_class, 'month', name, f.unique_for_month)) return unique_checks, date_checks def _perform_unique_checks(self, unique_checks): errors = {} for model_class, unique_check in unique_checks: # Try to look up an existing object with the same values as this # object's values for all the unique field. lookup_kwargs = {} for field_name in unique_check: f = self._meta.get_field(field_name) lookup_value = getattr(self, f.attname) if lookup_value is None: # no value, skip the lookup continue if f.primary_key and not self._state.adding: # no need to check for unique primary key when editing continue lookup_kwargs[str(field_name)] = lookup_value # some fields were skipped, no reason to do the check if len(unique_check) != len(lookup_kwargs): continue qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) # Note that we need to use the pk as defined by model_class, not # self.pk. These can be different fields because model inheritance # allows single model to have effectively multiple primary keys. # Refs #17615. model_class_pk = self._get_pk_val(model_class._meta) if not self._state.adding and model_class_pk is not None: qs = qs.exclude(pk=model_class_pk) if qs.exists(): if len(unique_check) == 1: key = unique_check[0] else: key = NON_FIELD_ERRORS errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check)) return errors def _perform_date_checks(self, date_checks): errors = {} for model_class, lookup_type, field, unique_for in date_checks: lookup_kwargs = {} # there's a ticket to add a date lookup, we can remove this special # case if that makes it's way in date = getattr(self, unique_for) if date is None: continue if lookup_type == 'date': lookup_kwargs['%s__day' % unique_for] = date.day lookup_kwargs['%s__month' % unique_for] = date.month lookup_kwargs['%s__year' % unique_for] = date.year else: lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type) lookup_kwargs[field] = getattr(self, field) qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) if not self._state.adding and self.pk is not None: qs = qs.exclude(pk=self.pk) if qs.exists(): errors.setdefault(field, []).append( self.date_error_message(lookup_type, field, unique_for) ) return errors def date_error_message(self, lookup_type, field, unique_for): opts = self._meta return _("%(field_name)s must be unique for %(date_field)s %(lookup)s.") % { 'field_name': six.text_type(capfirst(opts.get_field(field).verbose_name)), 'date_field': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)), 'lookup': lookup_type, } def unique_error_message(self, model_class, unique_check): opts = model_class._meta model_name = capfirst(opts.verbose_name) # A unique field if len(unique_check) == 1: field_name = unique_check[0] field = opts.get_field(field_name) field_label = capfirst(field.verbose_name) # Insert the error into the error dict, very sneaky return field.error_messages['unique'] % { 'model_name': six.text_type(model_name), 'field_label': six.text_type(field_label) } # unique_together else: field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check] field_labels = get_text_list(field_labels, _('and')) return _("%(model_name)s with this %(field_label)s already exists.") % { 'model_name': six.text_type(model_name), 'field_label': six.text_type(field_labels) } def full_clean(self, exclude=None): """ Calls clean_fields, clean, and validate_unique, on the model, and raises a ``ValidationError`` for any errors that occured. """ errors = {} if exclude is None: exclude = [] try: self.clean_fields(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) # Form.clean() is run even if other validation fails, so do the # same with Model.clean() for consistency. try: self.clean() except ValidationError as e: errors = e.update_error_dict(errors) # Run unique checks, but only for fields that passed validation. for name in errors.keys(): if name != NON_FIELD_ERRORS and name not in exclude: exclude.append(name) try: self.validate_unique(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) if errors: raise ValidationError(errors) def clean_fields(self, exclude=None): """ Cleans all fields and raises a ValidationError containing message_dict of all validation errors if any occur. """ if exclude is None: exclude = [] errors = {} for f in self._meta.fields: if f.name in exclude: continue # Skip validation for empty fields with blank=True. The developer # is responsible for making sure they have a valid value. raw_value = getattr(self, f.attname) if f.blank and raw_value in validators.EMPTY_VALUES: continue try: setattr(self, f.attname, f.clean(raw_value, self)) except ValidationError as e: errors[f.name] = e.messages if errors: raise ValidationError(errors) ############################################ # HELPER FUNCTIONS (CURRIED MODEL METHODS) # ############################################ # ORDERING METHODS ######################### def method_set_order(ordered_obj, self, id_list, using=None): if using is None: using = DEFAULT_DB_ALIAS rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name) order_name = ordered_obj._meta.order_with_respect_to.name # FIXME: It would be nice if there was an "update many" version of update # for situations like this. for i, j in enumerate(id_list): ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i) transaction.commit_unless_managed(using=using) def method_get_order(ordered_obj, self): rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name) order_name = ordered_obj._meta.order_with_respect_to.name pk_name = ordered_obj._meta.pk.name return [r[pk_name] for r in ordered_obj.objects.filter(**{order_name: rel_val}).values(pk_name)] ############################################## # HELPER FUNCTIONS (CURRIED MODEL FUNCTIONS) # ############################################## def get_absolute_url(opts, func, self, *args, **kwargs): return settings.ABSOLUTE_URL_OVERRIDES.get('%s.%s' % (opts.app_label, opts.module_name), func)(self, *args, **kwargs) ######## # MISC # ######## class Empty(object): pass def model_unpickle(model, attrs): """ Used to unpickle Model subclasses with deferred fields. """ cls = deferred_class_factory(model, attrs) return cls.__new__(cls) model_unpickle.__safe_for_unpickle__ = True def unpickle_inner_exception(klass, exception_name): # Get the exception class from the class it is attached to: exception = getattr(klass, exception_name) return exception.__new__(exception)
Atari007/master
refs/heads/master
dep/ACE_wrappers/bin/make_release.py
94
#!/usr/bin/python # -*- coding: utf-8 -*- # @file make_release.py # @author William R. Otte <wotte@dre.vanderbilt.edu> # # Packaging script for ACE/TAO/CIAO from __future__ import with_statement from time import strftime import pysvn import re import tempfile import shutil import subprocess import shlex ################################################## #### Global variables ################################################## """ Options from the command line """ opts=None """ Arguments from the command line """ args=None """ Absolute path from the SVN workspace to be used for the release""" doc_root=None """ Full name of person performing release, obtained from the environment""" signature=None """ Full email address of person performing release. """ mailid = None """ A dict containing version information used for the release. This dict contains entries of the form COMPONENT_version COMPONENT_beta COMPONENT_minor COMPONENT_major """ comp_versions = dict () release_date = strftime (# ie: Mon Jan 23 00:35:37 CST 2006 "%a %b %d %H:%M:%S %Z %Y") # Packaging configuration """ This is a regex that detects files that SHOULD NOT have line endings converted to CRLF when being put into a ZIP file """ bin_regex = re.compile ("\.(mak|mdp|ide|exe|ico|gz|zip|xls|sxd|gif|vcp|vcproj|vcw|sln|dfm|jpg|png|vsd|bz2|pdf|ppt|graffle|pptx|odt)$") ################################################## #### SVN Client Hooks ################################################## svn_auth_info = None def svn_login_callback (realm, username, may_save): """ Callback used by the SVN library to obtain login credentials""" global svn_auth_info if svn_auth_info is None: print "Please enter your Subversion login credentials. They will be saved for the duration of this script." username = raw_input ("Username: ") password = raw_input ("Password: ") svn_auth_info = (True, username, password, False) return svn_autn_info def svn_log_message_callback (): """ Callback used by the svn library to generate log messages for operations such as copy """ return (True, "ChangeLogTag: %s %s <%s>" % (release_date, signature, mailid)) svn_client = pysvn.Client () svn_client.callback_get_login = svn_login_callback svn_client.callback_get_log_message = svn_log_message_callback ################################################## #### Utility Methods ################################################## def parse_args (): from optparse import OptionParser parser = OptionParser ("usage %prog [options]") parser.add_option ("--major", dest="release_type", action="store_const", help="Create a major release.", default=None, const="major") parser.add_option ("--minor", dest="release_type", action="store_const", help="Create a minor release.", default=None, const="minor") parser.add_option ("--beta", dest="release_type", action="store_const", help="Create a beta release.", default=None, const="beta") parser.add_option ("--tag", dest="action", action="store_const", help="Tag the release. DO NOT USE WITH --kit", default=None, const="tag") parser.add_option ("--update", dest="update", action="store_true", help="Update the version numbers, only used with --tag", default=False) parser.add_option ("--kit", dest="action", action="store_const", help="Create kits. DO NOT USE WITH --tag", default=None, const="kit") parser.add_option ("--dest", dest="package_dir", action="store", help="Specify destination for the created packages.", default=None) parser.add_option ("--root", dest="repo_root", action="store", help="Specify an alternate repository root", default=None) # By default get repo root from working copy # default="https://svn.dre.vanderbilt.edu/DOC/") parser.add_option ("--mpc_root", dest="mpc_root", action="store", help="Specify an alternate MPC repository root", default=None) # By default get repo root from MPC root in working copy parser.add_option ("-n", dest="take_action", action="store_false", help="Take no action", default=True) parser.add_option ("--verbose", dest="verbose", action="store_true", help="Print out actions as they are being performed", default=False) (options, arguments) = parser.parse_args () if options.action is None: parser.error ("Must specify an action, ie --tag or --kit") if options.action == "tag": if options.release_type is None: parser.error ("When tagging, must specify a release type") if options.update is False: print "Warning: You are tagging a release, but not requesting a version increment" return (options, arguments) def ex (command): from os import system global opts vprint ("Executing " + command) if not opts.take_action: print "Executing " + command return status = system(command) if status != 0: print "ERROR: Nonzero retrun value from " + command raise Exception ### # Checks that the users environment is sane. # def check_environment (): from os import getenv global doc_root, signature, mailid, opts doc_root = getenv ("DOC_ROOT") if (doc_root is None): print "ERROR: Environment DOC_ROOT must be defined." return False signature = getenv ("SIGNATURE") if (signature is None): print "ERROR: Must define SIGNATURE environment variable to your full name, used in changelogs." return False mailid = getenv ("MAILID") if (mailid is None): print "ERROR: Must define MAILID environment to your email address for changelogs." return False return True def vprint (string): """ Prints the supplied message if verbose is enabled""" global opts if opts.verbose: print string ################################################## #### Tagging methods ################################################## def commit (files): """ Commits the supplied list of files to the repository. """ vprint ("Committing the following files: " + " ".join (files)) if opts.take_action: rev = svn_client.checkin (files, "ChangeLogTag:%s %s <%s>" % (release_date, signature, mailid)) print "Checked in files, resuling in revision ", rev.number def check_workspace (): """ Checks that the DOC and MPC repositories are up to date. """ global opts, doc_root, svn_client # @@TODO: Replace with a svn library try: rev = svn_client.update (doc_root) print "Successfully updated ACE/TAO/CIAO working copy to revision " except: print "Unable to update ACE/TAO/CIAO workspace at " + doc_root raise try: rev = svn_client.update (doc_root + "/ACE/MPC") print "Successfully updated MPC working copy to revision " except: print "Unable to update the MPC workspace at " + doc_root + "/ACE/MPC" raise # By default retrieve repo root from working copy if opts.repo_root is None: info = svn_client.info2 (doc_root + "/ACE")[0] opts.repo_root = info[1]["repos_root_URL"] # By default retrieve MPC root from working copy if opts.mpc_root is None: info = svn_client.info2 (doc_root + "/ACE/MPC")[0] opts.mpc_root = info[1]["repos_root_URL"] vprint ("Repos root URL = " + opts.repo_root + "\n") vprint ("Repos MPC root URL = " + opts.mpc_root + "\n") def update_version_files (component): """ Updates the version files for a given component. This includes Version.h, the PRF, and the VERSION file.""" global comp_versions, opts, release_date vprint ("Updating version files for " + component) import re retval = list () ## Update component/VERSION with open (component + "/VERSION", "r+") as version_file: new_version = re.sub (component + " version .*", "%s version %s, released %s" % (component, comp_versions[component + "_version"], release_date), version_file.read ()) if opts.take_action: version_file.seek (0) version_file.truncate (0) version_file.write (new_version) else: print "New version file for " + component print new_version vprint ("Updating Version.h for " + component) retval += [component + "/VERSION"] ## Update component/component/Version.h version_header = """ // -*- C++ -*- // $Id: make_release.py 92148 2010-10-04 19:57:24Z wotte $ // This is file was automatically generated by \$ACE_ROOT/bin/make_release.py #define %s_MAJOR_VERSION %s #define %s_MINOR_VERSION %s #define %s_BETA_VERSION %s #define %s_VERSION \"%s\" """ % (component, comp_versions[component + "_major"], component, comp_versions[component + "_minor"], component, comp_versions[component + "_beta"], component, comp_versions[component + "_version"]) if opts.take_action: with open (component + '/' + component.lower () + "/Version.h", 'r+') as version_h: version_h.write (version_header) else: print "New Version.h for " + component print version_header retval += [component + '/' + component.lower () + "/Version.h"] # Update component/PROBLEM-REPORT-FORM vprint ("Updating PRF for " + component) version_string = re.compile ("^\s*(\w+) +VERSION ?:") with open (component + "/PROBLEM-REPORT-FORM", 'r+') as prf: new_prf = "" for line in prf.readlines (): match = None match = version_string.search (line) if match is not None: vprint ("Found PRF Version for " + match.group (1)) line = re.sub ("(\d\.)+\d?", comp_versions[match.group(1) + "_version"], line) new_prf += line if opts.take_action: prf.seek (0) prf.truncate (0) prf.writelines (new_prf) else: print "New PRF for " + component print "".join (new_prf) retval += [component + "/PROBLEM-REPORT-FORM"] return retval def update_debianbuild (): """ Updates ACE_ROOT/debian directory. - renames all files with version nrs in name to new scheme. - updates version nrs in file debian/control Currently ONLY ACE & TAO stuff is handled here """ global comp_versions import glob import re from os.path import basename from os.path import dirname from os.path import join files = list () prev_ace_ver = None prev_tao_ver = None # rename files mask = re.compile ("(libace|libkokyu|libtao)(.*)(\d+\.\d+\.\d+)(.*)") tao = re.compile ("tao", re.IGNORECASE) for fname in glob.iglob(doc_root + '/ACE/debian/*'): print "Considering " + fname match = None fbase = basename (fname) match = mask.search (fbase) fnewname = None if match is not None: if tao.search (fbase) is not None: fnewname = join (dirname (fname), match.group (1) + match.group (2) + comp_versions["TAO_version"] + match.group (4)) prev_tao_ver = match.group (3) else: fnewname = join (dirname (fname), match.group (1) + match.group (2) + comp_versions["ACE_version"] + match.group (4)) prev_ace_ver = match.group (3) print prev_ace_ver # print prev_tao_var if fnewname is not None: if opts.take_action: svn_client.move (fname, fnewname) else: print "Rename: " + fname + " to " + fnewname + "\n" files.append (fname) files.append (fnewname) print "Appending " + fname + " and " + fnewname # update debianbuild/control def update_ver (match): if match.group (1) == 'libtao': return match.group (1) + match.group (2) + comp_versions["TAO_version"] + match.group (4) else: return match.group (1) + match.group (2) + comp_versions["ACE_version"] + match.group (4) with open (doc_root + "/ACE/debian/debian.control", 'r+') as control_file: new_ctrl = "" for line in control_file.readlines (): if re.search ("^(Package|Depends|Suggests):", line) is not None: line = mask.sub (update_ver, line) elif re.search ('^Replaces:', line) is not None: print comp_versions["ACE_version"] line = line.replace (prev_ace_ver, comp_versions["ACE_version"]) new_ctrl += line if opts.take_action: control_file.seek (0) control_file.truncate (0) control_file.writelines (new_ctrl) else: print "New control file:" print "".join (new_ctrl) files.append (doc_root + "/ACE/debian/debian.control") # rewrite debian/dsc dsc_lines = """# Format: 1.0 Source: ACE+TAO+CIAO-src-%s Version: %s Binary: ace Maintainer: Johnny Willemsen <jwillemsen@remedy.nl> Architecture: any Build-Depends: gcc, make, g++, debhelper (>= 5), libssl-dev (>= 0.9.7d), dpatch (>= 2.0.10), libxt-dev (>= 4.3.0), libfltk1.1-dev (>= 1.1.4), libqt4-dev (>= 4.4~rc1-4), tk-dev (>= 8.4.7), zlib1g-dev, docbook-to-man, bzip2, autoconf, automake, libtool, autotools-dev, doxygen, graphviz Files: 65b34001c9605f056713a7e146b052d1 46346654 ACE+TAO+CIAO-src-%s.tar.gz """ % (comp_versions["ACE_version"], comp_versions["TAO_version"], comp_versions["ACE_version"]) if opts.take_action: with open (doc_root + "/ACE/debian/ace.dsc", 'r+') as dsc_file: dsc_file.seek (0) dsc_file.truncate (0) dsc_file.writelines (dsc_lines) else: print "New dsc file:\n" print dsc_lines files.append (doc_root + "/ACE/debian/ace.dsc") return files def get_and_update_versions (): """ Gets current version information for each component, updates the version files, creates changelog entries, and commit the changes into the repository.""" try: get_comp_versions ("ACE") get_comp_versions ("TAO") get_comp_versions ("CIAO") get_comp_versions ("DAnCE") files = list () files += update_version_files ("ACE") files += update_version_files ("TAO") files += update_version_files ("CIAO") files += update_version_files ("DAnCE") files += create_changelog ("ACE") files += create_changelog ("TAO") files += create_changelog ("CIAO") files += create_changelog ("DAnCE") files += update_debianbuild () print "Committing " + str(files) commit (files) except: print "Fatal error in get_and_update_versions." raise def create_changelog (component): """ Creates a changelog entry for the supplied component that includes the version number being released""" vprint ("Creating ChangeLog entry for " + component) global comp_versions, opts # generate our changelog entry changelog_entry = """%s %s <%s> \t* %s version %s released. """ % (release_date, signature, mailid, component, comp_versions[component + "_version"]) vprint ("Changelog Entry for " + component + "\n" + changelog_entry) with open ("%s/ChangeLog" % (component), 'r+') as changelog: changelog_entry += changelog.read () if opts.take_action: changelog.seek (0) changelog.truncate (0) changelog.write (changelog_entry) return ["%s/ChangeLog" % (component)] def get_comp_versions (component): """ Extracts the current version number from the VERSION file and increments it appropriately for the release type requested.""" vprint ("Detecting current version for" + component) import re global comp_versions, opts beta = re.compile ("version (\d+)\.(\d+)\.(\d+)") minor = re.compile ("version (\d+)\.(\d+)[^\.]") major = re.compile ("version (\d+)[^\.]") with open (component + "/VERSION") as version_file: for line in version_file: match = None match = beta.search (line) if match is not None: vprint ("Detected beta version %s.%s.%s" % (match.group (1), match.group (2), match.group (3))) comp_versions[component + "_major"] = int (match.group (1)) comp_versions[component + "_minor"] = int (match.group (2)) comp_versions[component + "_beta"] = int (match.group (3)) break match = minor.search (line) if match is not None: vprint ("Detected minor version %s.%s" % (match.group (1), match.group (2))) comp_versions[component + "_major"] = int (match.group (1)) comp_versions[component + "_minor"] = int (match.group (2)) comp_versions[component + "_beta"] = 0 break match = major.search (line) if match is not None: vprint ("Detected major version " + match.group (1) + ".0") comp_versions[component + "_major"] = int (match.group (1)) comp_versions[component + "_minor"] = 0 comp_versions[component + "_beta"] = 0 break print "FATAL ERROR: Unable to locate current version for " + component raise Exception if opts.update: if opts.release_type == "major": comp_versions[component + "_major"] += 1 comp_versions[component + "_minor"] = 0 comp_versions[component + "_beta"] = 0 elif opts.release_type == "minor": comp_versions[component + "_minor"] += 1 comp_versions[component + "_beta"] = 0 elif opts.release_type == "beta": comp_versions[component + "_beta"] += 1 #if opts.release_type == "beta": comp_versions [component + "_version"] = \ str (comp_versions[component + "_major"]) + '.' + \ str (comp_versions[component + "_minor"]) + '.' + \ str (comp_versions[component + "_beta"]) # else: # comp_versions [component + "_version"] = \ # str (comp_versions[component + "_major"]) + '.' + \ # str (comp_versions[component + "_minor"]) def update_latest_tag (which, branch): """ Update one of the Latest_* tags externals to point the new release """ global opts root_anon = re.sub ("^https:", "svn:", opts.repo_root) propval = """ACE_wrappers %s/tags/%s/ACE ACE_wrappers/TAO %s/tags/%s/TAO ACE_wrappers/TAO/CIAO %s/tags/%s/CIAO ACE_wrappers/TAO/DAnCE %s/tags/%s/DAnCE """ % ((root_anon, branch) * 4) tagname = "Latest_" + which temp = tempfile.gettempdir () + "/" + tagname svn_client.checkout (opts.repo_root + "/tags/" + tagname, temp, False) svn_client.propset ("svn:externals", propval, temp) svn_client.checkin (temp, "Updating for release " + branch) shutil.rmtree (temp, True) def tag (): """ Tags the DOC and MPC repositories for the version """ global comp_versions, opts branch = "ACE+TAO+CIAO-%d_%d_%d" % (comp_versions["ACE_major"], comp_versions["ACE_minor"], comp_versions["ACE_beta"]) if opts.take_action: # Tag middleware svn_client.copy (opts.repo_root + "/trunk", opts.repo_root + "/tags/" + branch) # Tag MPC svn_client.copy (opts.mpc_root + "/trunk", opts.mpc_root + "/tags/" + branch) # Update latest tag # mcorino@remedy.nl - subversion does not seem to support propset directly # on URLs (except for some strange reason through propedit) #if opts.release_type == "major": #update_latest_tag ("Major", branch) #elif opts.release_type == "minor": #update_latest_tag ("Minor", branch) #elif opts.release_type == "beta": #update_latest_tag ("Beta", branch) #update_latest_tag ("Micro", branch) #if comp_versions["ACE_beta"] == 1: #update_latest_tag ("BFO", branch) else: print "Creating tags:\n" print opts.repo_root + "/trunk -> " + opts.repo_root + "/tags/" + branch + "\n" print opts.mpc_root + "/trunk -> " + opts.mpc_root + "/tags/" + branch + "\n" ################################################## #### Packaging methods ################################################## def export_wc (stage_dir): global doc_root # Export our working copy print ("Exporting ACE") svn_client.export (doc_root + "/ACE", stage_dir + "/ACE_wrappers") print ("Exporting MPC") svn_client.export (doc_root + "/ACE/MPC", stage_dir + "/ACE_wrappers/MPC") print ("Exporting TAO") svn_client.export (doc_root + "/TAO", stage_dir + "/ACE_wrappers/TAO") print ("Exporting CIAO") svn_client.export (doc_root + "/CIAO", stage_dir + "/ACE_wrappers/TAO/CIAO") print ("Exporting DAnCE") svn_client.export (doc_root + "/DAnCE", stage_dir + "/ACE_wrappers/TAO/DAnCE") def update_packages (text_files, bin_files, stage_dir, package_dir): import os print "Updating packages...." os.chdir (stage_dir) # -g appends, -q for quiet operation zip_base_args = " -gqu " # -l causes line ending conversion for windows zip_text_args = " -l " zip_file = stage_dir + "/zip-archive.zip" # -r appends, -f specifies file. tar_args = "-uf " tar_file = stage_dir + "/tar-archive.tar" # Zip binary files print "\tAdding binary files to zip...." p = subprocess.Popen (shlex.split ("xargs zip " + zip_base_args + zip_file), stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True) instream, outstream = (p.stdin, p.stdout) instream.write (bin_files) instream.close () outstream.close () # Need to wait for zip process spawned by popen2 to complete # before proceeding. os.wait () print "\tAdding text files to zip....." p = subprocess.Popen (shlex.split ("xargs zip " + zip_base_args + zip_text_args + zip_file), stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True) instream, outstream = (p.stdin, p.stdout) instream.write (text_files) instream.close () outstream.close () # Need to wait for zip process spawned by popen2 to complete # before proceeding. os.wait () # Tar files print "\tAdding to tar file...." if (not os.path.exists (tar_file)): open(tar_file, 'w').close () p = subprocess.Popen (shlex.split ("xargs tar " + tar_args + tar_file), stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True) instream, outstream = (p.stdin, p.stdout) instream.write (' ' + bin_files + ' ' + text_files) instream.close () print outstream.read () outstream.close () os.wait () def move_packages (name, stage_dir, package_dir): """ Copies the temporary files from the stage_dir to the package_dir. Renames them to name.tar and name.zip, respectively, and compresses the tarfile with gzip and bzip2. """ import shutil, os from os.path import join print "Storing packages for ", name # Take care of the zip file print "\tZip file..." target_file = join (package_dir, name + ".zip") shutil.copy (join (stage_dir, "zip-archive.zip"), target_file) ex ("md5sum " + target_file + " > " + target_file + ".md5") tar_file = join (stage_dir, "tar-archive.tar") target_file = join (package_dir, name + ".tar") # bzip print "\tBzip2 file....." shutil.copy (tar_file, target_file) ex ("bzip2 " + target_file) ex ("md5sum " + target_file + ".bz2 > " + target_file + ".bz2.md5") print "\tgzip file....." shutil.copy (tar_file, target_file) ex ("gzip " + target_file) ex ("md5sum " + target_file + ".gz > " + target_file + ".gz.md5") def create_file_lists (base_dir, prefix, exclude): """ Creates two lists of files: files that need CR->CRLF conversions (useful for zip files) and those that don't, excluding filies/directories found in exclude. """ import os text_files = list () bin_files = list () for root, dirs, files in os.walk (base_dir, topdown=True): # print "root", root relroot = root.replace (base_dir, "") # print "relroot", relroot if len(relroot) and relroot[0] == '/': relroot = relroot [1:] excluded = False for item in exclude: dir_item = item + '/' if relroot.startswith (dir_item) or relroot.startswith (item): # print "excluding", relroot excluded = True # else: # print relroot, "does not start with", dir_item, "or", item if excluded: continue # Remove dirs from our exclude pattern for item in dirs: # print "item", item # Remove our excludes if (item) in exclude: # print "Removing " + item + " from consideration...." dirs.remove (item) for item in files: fullitem = os.path.join (relroot, item) if fullitem in exclude or item in exclude: # print "Removing " + fullitem + " from consideration...." files.remove (item) continue else: if bin_regex.search (fullitem) is not None: bin_files.append ('"' + os.path.join (prefix, fullitem) + '"') else: text_files.append ('"' + os.path.join (prefix, fullitem) + '"') return (text_files, bin_files) def write_file_lists (comp, text, bin): outfile = open (comp + ".files", 'w') outfile.write ("\n".join (text)) outfile.write (".............\nbin files\n.............\n") outfile.write ("\n".join (bin)) outfile.close () def package (stage_dir, package_dir, decorator): """ Packages ACE, ACE+TAO, and ACE+TAO+CIAO releases of current staged tree, with decorator appended to the name of the archive. """ from os.path import join from os import remove from os import chdir chdir (stage_dir) text_files = list () bin_files = list () # Erase our old temp files try: # print "removing files", join (stage_dir, "zip-archive.zip"), join (stage_dir, "tar-archive.tar") remove (join (stage_dir, "zip-archive.zip")) remove (join (stage_dir, "tar-archive.tar")) except: print "error removing files", join (stage_dir, "zip-archive.zip"), join (stage_dir, "tar-archive.tar") pass # swallow any errors text_files, bin_files = create_file_lists (join (stage_dir, "ACE_wrappers"), "ACE_wrappers", ["TAO", "autom4te.cache"]) # write_file_lists ("fACE" + decorator, text_files, bin_files) update_packages ("\n".join (text_files), "\n".join (bin_files), stage_dir, package_dir) move_packages ("ACE" + decorator, stage_dir, package_dir) text_files = list () bin_files = list () # for TAO: text_files, bin_files = create_file_lists (join (stage_dir, "ACE_wrappers/TAO"), "ACE_wrappers/TAO", ["CIAO", "DAnCE", "autom4te.cache"]) # write_file_lists ("fTAO" + decorator, text_files, bin_files) update_packages ("\n".join (text_files), "\n".join (bin_files), stage_dir, package_dir) move_packages ("ACE+TAO" + decorator, stage_dir, package_dir) text_files = list () bin_files = list () # for DAnCE: text_files, bin_files = create_file_lists (join (stage_dir, "ACE_wrappers/TAO/DAnCE"), "ACE_wrappers/TAO/DAnCE", ["CIAO", "autom4te.cache"]) # write_file_lists ("fTAO" + decorator, text_files, bin_files) update_packages ("\n".join (text_files), "\n".join (bin_files), stage_dir, package_dir) move_packages ("ACE+TAO+DAnCE" + decorator, stage_dir, package_dir) text_files = list () bin_files = list () # for CIAO: text_files, bin_files = create_file_lists (join (stage_dir, "ACE_wrappers/TAO/CIAO"), "ACE_wrappers/TAO/CIAO", ["DAnCE", "autom4te.cache"]) # write_file_lists ("fCIAO" + decorator, text_files, bin_files) update_packages ("\n".join (text_files), "\n".join (bin_files), stage_dir, package_dir) move_packages ("ACE+TAO+CIAO" + decorator, stage_dir, package_dir) def generate_workspaces (stage_dir): """ Generates workspaces in the given stage_dir """ print "Generating workspaces..." global opts import os # Make sure we are in the right directory... os.chdir (os.path.join (stage_dir, "ACE_wrappers")) # Set up our environment os.putenv ("ACE_ROOT", os.path.join (stage_dir, "ACE_wrappers")) os.putenv ("MPC_ROOT", os.path.join (stage_dir, "ACE_wrappers", "MPC")) os.putenv ("TAO_ROOT", os.path.join (stage_dir, "ACE_wrappers", "TAO")) os.putenv ("CIAO_ROOT", os.path.join (stage_dir, "ACE_wrappers", "TAO", "CIAO")) os.putenv ("DANCE_ROOT", os.path.join (stage_dir, "ACE_wrappers", "TAO", "DAnCE")) # Create option strings mpc_command = os.path.join (stage_dir, "ACE_wrappers", "bin", "mwc.pl") exclude_option = ' -exclude TAO/TAO_*.mwc,TAO/CIAO/CIAO_*.mwc ' mpc_option = ' -recurse -hierarchy -relative ACE_ROOT=' + stage_dir + '/ACE_wrappers ' mpc_option += ' -relative TAO_ROOT=' + stage_dir + '/ACE_wrappers/TAO ' mpc_option += ' -relative CIAO_ROOT=' + stage_dir + '/ACE_wrappers/TAO/CIAO ' mpc_option += ' -relative DANCE_ROOT=' + stage_dir + '/ACE_wrappers/TAO/DAnCE ' vc10_option = ' -name_modifier *_vc10 ' vc9_option = ' -name_modifier *_vc9 ' vc8_option = ' -name_modifier *_vc8 ' redirect_option = str () if not opts.verbose: redirect_option = " >> ../mpc.log 2>&1" # Generate GNUmakefiles print "\tBootstrapping autotools support" ex ("bin/bootstrap " + redirect_option) print "\tGenerating GNUmakefiles...." ex (mpc_command + " -type gnuace " + exclude_option + mpc_option + redirect_option) print "\tGenerating VC10 solutions..." ex (mpc_command + " -type vc10 " + mpc_option + vc10_option + redirect_option) print "\tGenerating VC9 solutions..." ex (mpc_command + " -type vc9 " + mpc_option + vc9_option + redirect_option) print "\tGenerating VC8 solutions..." ex (mpc_command + " -type vc8 " + mpc_option + vc8_option + redirect_option) print "\tCorrecting permissions for all generated files..." ex ("find ./ -name '*.vc[p,w]' -or -name '*.bmak' -or -name '*.vcproj' -or -name '*.sln' -or -name '*.vcxproj' -or -name '*.filters' -or -name 'GNUmake*' | xargs chmod 0644") def create_kit (): """ Creates kits """ import os from os.path import join # Get version numbers for this working copy, note this will # not update the numbers. print "Getting current version information...." get_comp_versions ("ACE") get_comp_versions ("TAO") get_comp_versions ("CIAO") get_comp_versions ("DAnCE") print "Creating working directories...." stage_dir, package_dir = make_working_directories () print "Exporting working copy..." export_wc (stage_dir) ### make source only packages package (stage_dir, package_dir, "-src") generate_workspaces (stage_dir) ### create standard packages. package (stage_dir, package_dir, "") def make_working_directories (): """ Creates directories that we will be working in. In particular, we will have DOC_ROOT/stage-PID and DOC_ROOT/packages-PID """ global doc_root import os.path, os stage_dir = os.path.join (doc_root, "stage-" + str (os.getpid ())) package_dir = os.path.join (doc_root, "package-" + str (os.getpid ())) os.mkdir (stage_dir) os.mkdir (package_dir) return (stage_dir, package_dir) def main (): global opts if opts.action == "tag": print "Tagging a " + opts.release_type + " release." raw_input ("Press enter to continue") check_workspace () get_and_update_versions () tag () else: print "Creating a kit." raw_input ("Press enter to continue") create_kit () if __name__ == "__main__": (opts, args) = parse_args () if check_environment() is not True: exit (1) main ()
KurtJacobson/hazzy
refs/heads/master
hazzy/utilities/constants.py
2
#!/usr/bin/env python # Copyright (c) 2017 Kurt Jacobson # <kurtcjacobson@gmail.com> # # This file is part of Hazzy. # # Hazzy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # Hazzy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Hazzy. If not, see <http://www.gnu.org/licenses/>. # Description: # Collection of constants. # Needs to be cleaned up. Get rid on INI reading, do that with ini_info.py import os from linuxcnc import ini class Units(enumerate): IN = 1 MM = 2 CM = 3 class MessageType(enumerate): INFO = 0 WARNING = 1 ERROR = 2 class Paths(enumerate): # Hazzy Paths HAZZYDIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) MAINDIR = os.path.dirname(HAZZYDIR) UIDIR = os.path.join(HAZZYDIR, 'gui', 'ui') MODULEDIR = os.path.join(HAZZYDIR, 'modules') STYLEDIR = os.path.join(HAZZYDIR, 'themes') # LinuxCNC Paths INI_FILE = os.environ['INI_FILE_NAME'] CONFIGDIR = os.environ['CONFIG_DIR'] NC_FILE_DIR = os.environ['LINUXCNC_NCFILES_DIR'] TCLPATH = os.environ['LINUXCNC_TCL_DIR']
UWPCE-PythonCert/IntroPython2016
refs/heads/master
students/crobison/session05/exceptions_lab.py
3
#!/usr/bin/env python3 # Charles Robison # 2016.11.08 # Exceptions Lab def none(): try: answer = input('Who will be the next President? ') print('Really? You think ' + answer +"?!") except (KeyboardInterrupt) as e: return None none()
bala4901/odoo
refs/heads/master
openerp/addons/base/workflow/workflow.py
33
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (C) 2004-2014 OpenERP S.A. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv from openerp.tools.translate import _ import openerp.workflow class workflow(osv.osv): _name = "workflow" _table = "wkf" _order = "name" _columns = { 'name': fields.char('Name', size=64, required=True), 'osv': fields.char('Resource Object', size=64, required=True,select=True), 'on_create': fields.boolean('On Create', select=True), 'activities': fields.one2many('workflow.activity', 'wkf_id', 'Activities'), } _defaults = { 'on_create': lambda *a: True } def write(self, cr, user, ids, vals, context=None): if not context: context={} openerp.workflow.clear_cache(cr, user) return super(workflow, self).write(cr, user, ids, vals, context=context) def get_active_workitems(self, cr, uid, res, res_id, context=None): cr.execute('select * from wkf where osv=%s limit 1',(res,)) wkfinfo = cr.dictfetchone() workitems = [] if wkfinfo: cr.execute('SELECT id FROM wkf_instance \ WHERE res_id=%s AND wkf_id=%s \ ORDER BY state LIMIT 1', (res_id, wkfinfo['id'])) inst_id = cr.fetchone() cr.execute('select act_id,count(*) from wkf_workitem where inst_id=%s group by act_id', (inst_id,)) workitems = dict(cr.fetchall()) return {'wkf': wkfinfo, 'workitems': workitems} def create(self, cr, user, vals, context=None): if not context: context={} openerp.workflow.clear_cache(cr, user) return super(workflow, self).create(cr, user, vals, context=context) workflow() class wkf_activity(osv.osv): _name = "workflow.activity" _table = "wkf_activity" _order = "name" _columns = { 'name': fields.char('Name', size=64, required=True), 'wkf_id': fields.many2one('workflow', 'Workflow', required=True, select=True, ondelete='cascade'), 'split_mode': fields.selection([('XOR', 'Xor'), ('OR','Or'), ('AND','And')], 'Split Mode', size=3, required=True), 'join_mode': fields.selection([('XOR', 'Xor'), ('AND', 'And')], 'Join Mode', size=3, required=True), 'kind': fields.selection([('dummy', 'Dummy'), ('function', 'Function'), ('subflow', 'Subflow'), ('stopall', 'Stop All')], 'Kind', size=64, required=True), 'action': fields.text('Python Action'), 'action_id': fields.many2one('ir.actions.server', 'Server Action', ondelete='set null'), 'flow_start': fields.boolean('Flow Start'), 'flow_stop': fields.boolean('Flow Stop'), 'subflow_id': fields.many2one('workflow', 'Subflow'), 'signal_send': fields.char('Signal (subflow.*)', size=32), 'out_transitions': fields.one2many('workflow.transition', 'act_from', 'Outgoing Transitions'), 'in_transitions': fields.one2many('workflow.transition', 'act_to', 'Incoming Transitions'), } _defaults = { 'kind': lambda *a: 'dummy', 'join_mode': lambda *a: 'XOR', 'split_mode': lambda *a: 'XOR', } def unlink(self, cr, uid, ids, context=None): if context is None: context = {} if not context.get('_force_unlink') and self.pool.get('workflow.workitem').search(cr, uid, [('act_id', 'in', ids)]): raise osv.except_osv(_('Operation Forbidden'), _('Please make sure no workitems refer to an activity before deleting it!')) super(wkf_activity, self).unlink(cr, uid, ids, context=context) wkf_activity() class wkf_transition(osv.osv): _table = "wkf_transition" _name = "workflow.transition" _rec_name = 'signal' _columns = { 'trigger_model': fields.char('Trigger Object', size=128), 'trigger_expr_id': fields.char('Trigger Expression', size=128), 'signal': fields.char('Signal (Button Name)', size=64, help="When the operation of transition comes from a button pressed in the client form, "\ "signal tests the name of the pressed button. If signal is NULL, no button is necessary to validate this transition."), 'group_id': fields.many2one('res.groups', 'Group Required', help="The group that a user must have to be authorized to validate this transition."), 'condition': fields.char('Condition', required=True, size=128, help="Expression to be satisfied if we want the transition done."), 'act_from': fields.many2one('workflow.activity', 'Source Activity', required=True, select=True, ondelete='cascade', help="Source activity. When this activity is over, the condition is tested to determine if we can start the ACT_TO activity."), 'act_to': fields.many2one('workflow.activity', 'Destination Activity', required=True, select=True, ondelete='cascade', help="The destination activity."), 'wkf_id': fields.related('act_from','wkf_id', type='many2one', relation='workflow', string='Workflow', select=True), } _defaults = { 'condition': lambda *a: 'True', } def name_get(self, cr, uid, ids, context=None): return [(line.id, (line.act_from.name) + '+' + (line.act_to.name)) if line.signal == False else (line.id, line.signal) for line in self.browse(cr, uid, ids, context=context)] def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100): if args is None: args = [] if name: ids = self.search(cr, user, ['|',('act_from', operator, name),('act_to', operator, name)] + args, limit=limit) return self.name_get(cr, user, ids, context=context) return super(wkf_transition, self).name_search(cr, user, name, args=args, operator=operator, context=context, limit=limit) wkf_transition() class wkf_instance(osv.osv): _table = "wkf_instance" _name = "workflow.instance" _rec_name = 'res_type' _log_access = False _columns = { 'uid': fields.integer('User'), # FIXME no constraint?? 'wkf_id': fields.many2one('workflow', 'Workflow', ondelete='cascade', select=True), 'res_id': fields.integer('Resource ID'), 'res_type': fields.char('Resource Object', size=64), 'state': fields.char('Status', size=32), 'transition_ids': fields.many2many('workflow.transition', 'wkf_witm_trans', 'inst_id', 'trans_id'), } def _auto_init(self, cr, context=None): super(wkf_instance, self)._auto_init(cr, context) cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'wkf_instance_res_type_res_id_state_index\'') if not cr.fetchone(): cr.execute('CREATE INDEX wkf_instance_res_type_res_id_state_index ON wkf_instance (res_type, res_id, state)') cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'wkf_instance_res_id_wkf_id_index\'') if not cr.fetchone(): cr.execute('CREATE INDEX wkf_instance_res_id_wkf_id_index ON wkf_instance (res_id, wkf_id)') wkf_instance() class wkf_workitem(osv.osv): _table = "wkf_workitem" _name = "workflow.workitem" _log_access = False _rec_name = 'state' _columns = { 'act_id': fields.many2one('workflow.activity', 'Activity', required=True, ondelete="cascade", select=True), 'wkf_id': fields.related('act_id','wkf_id', type='many2one', relation='workflow', string='Workflow'), 'subflow_id': fields.many2one('workflow.instance', 'Subflow', ondelete="cascade", select=True), 'inst_id': fields.many2one('workflow.instance', 'Instance', required=True, ondelete="cascade", select=True), 'state': fields.char('Status', size=64, select=True), } wkf_workitem() class wkf_triggers(osv.osv): _table = "wkf_triggers" _name = "workflow.triggers" _log_access = False _columns = { 'res_id': fields.integer('Resource ID', size=128), 'model': fields.char('Object', size=128), 'instance_id': fields.many2one('workflow.instance', 'Destination Instance', ondelete="cascade"), 'workitem_id': fields.many2one('workflow.workitem', 'Workitem', required=True, ondelete="cascade"), } def _auto_init(self, cr, context=None): super(wkf_triggers, self)._auto_init(cr, context) cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'wkf_triggers_res_id_model_index\'') if not cr.fetchone(): cr.execute('CREATE INDEX wkf_triggers_res_id_model_index ON wkf_triggers (res_id, model)') wkf_triggers() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
ppmt/Crust
refs/heads/master
flask/lib/python2.7/site-packages/pip/_vendor/distlib/util.py
203
# # Copyright (C) 2012-2014 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # import codecs from collections import deque import contextlib import csv from glob import iglob as std_iglob import io import json import logging import os import py_compile import re import shutil import socket import ssl import subprocess import sys import tarfile import tempfile try: import threading except ImportError: import dummy_threading as threading import time from . import DistlibException from .compat import (string_types, text_type, shutil, raw_input, StringIO, cache_from_source, urlopen, httplib, xmlrpclib, splittype, HTTPHandler, HTTPSHandler as BaseHTTPSHandler, BaseConfigurator, valid_ident, Container, configparser, URLError, match_hostname, CertificateError, ZipFile) logger = logging.getLogger(__name__) # # Requirement parsing code for name + optional constraints + optional extras # # e.g. 'foo >= 1.2, < 2.0 [bar, baz]' # # The regex can seem a bit hairy, so we build it up out of smaller pieces # which are manageable. # COMMA = r'\s*,\s*' COMMA_RE = re.compile(COMMA) IDENT = r'(\w|[.-])+' EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')' VERSPEC = IDENT + r'\*?' RELOP = '([<>=!~]=)|[<>]' # # The first relop is optional - if absent, will be taken as '~=' # BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' + RELOP + r')\s*(' + VERSPEC + '))*') DIRECT_REF = '(from\s+(?P<diref>.*))' # # Either the bare constraints or the bare constraints in parentheses # CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF + r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)') EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*' EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]' REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' + CONSTRAINTS + ')?$') REQUIREMENT_RE = re.compile(REQUIREMENT) # # Used to scan through the constraints # RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')' RELOP_IDENT_RE = re.compile(RELOP_IDENT) def parse_requirement(s): def get_constraint(m): d = m.groupdict() return d['op'], d['vn'] result = None m = REQUIREMENT_RE.match(s) if m: d = m.groupdict() name = d['dn'] cons = d['c1'] or d['c2'] if not d['diref']: url = None else: # direct reference cons = None url = d['diref'].strip() if not cons: cons = None constr = '' rs = d['dn'] else: if cons[0] not in '<>!=': cons = '~=' + cons iterator = RELOP_IDENT_RE.finditer(cons) cons = [get_constraint(m) for m in iterator] rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons])) if not d['ex']: extras = None else: extras = COMMA_RE.split(d['ex']) result = Container(name=name, constraints=cons, extras=extras, requirement=rs, source=s, url=url) return result def get_resources_dests(resources_root, rules): """Find destinations for resources files""" def get_rel_path(base, path): # normalizes and returns a lstripped-/-separated path base = base.replace(os.path.sep, '/') path = path.replace(os.path.sep, '/') assert path.startswith(base) return path[len(base):].lstrip('/') destinations = {} for base, suffix, dest in rules: prefix = os.path.join(resources_root, base) for abs_base in iglob(prefix): abs_glob = os.path.join(abs_base, suffix) for abs_path in iglob(abs_glob): resource_file = get_rel_path(resources_root, abs_path) if dest is None: # remove the entry if it was here destinations.pop(resource_file, None) else: rel_path = get_rel_path(abs_base, abs_path) rel_dest = dest.replace(os.path.sep, '/').rstrip('/') destinations[resource_file] = rel_dest + '/' + rel_path return destinations def in_venv(): if hasattr(sys, 'real_prefix'): # virtualenv venvs result = True else: # PEP 405 venvs result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix) return result def get_executable(): # The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as # changes to the stub launcher mean that sys.executable always points # to the stub on OS X # if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__' # in os.environ): # result = os.environ['__PYVENV_LAUNCHER__'] # else: # result = sys.executable # return result return sys.executable def proceed(prompt, allowed_chars, error_prompt=None, default=None): p = prompt while True: s = raw_input(p) p = prompt if not s and default: s = default if s: c = s[0].lower() if c in allowed_chars: break if error_prompt: p = '%c: %s\n%s' % (c, error_prompt, prompt) return c def extract_by_key(d, keys): if isinstance(keys, string_types): keys = keys.split() result = {} for key in keys: if key in d: result[key] = d[key] return result def read_exports(stream): if sys.version_info[0] >= 3: # needs to be a text stream stream = codecs.getreader('utf-8')(stream) # Try to load as JSON, falling back on legacy format data = stream.read() stream = StringIO(data) try: data = json.load(stream) result = data['extensions']['python.exports']['exports'] for group, entries in result.items(): for k, v in entries.items(): s = '%s = %s' % (k, v) entry = get_export_entry(s) assert entry is not None entries[k] = entry return result except Exception: stream.seek(0, 0) cp = configparser.ConfigParser() if hasattr(cp, 'read_file'): cp.read_file(stream) else: cp.readfp(stream) result = {} for key in cp.sections(): result[key] = entries = {} for name, value in cp.items(key): s = '%s = %s' % (name, value) entry = get_export_entry(s) assert entry is not None #entry.dist = self entries[name] = entry return result def write_exports(exports, stream): if sys.version_info[0] >= 3: # needs to be a text stream stream = codecs.getwriter('utf-8')(stream) cp = configparser.ConfigParser() for k, v in exports.items(): # TODO check k, v for valid values cp.add_section(k) for entry in v.values(): if entry.suffix is None: s = entry.prefix else: s = '%s:%s' % (entry.prefix, entry.suffix) if entry.flags: s = '%s [%s]' % (s, ', '.join(entry.flags)) cp.set(k, entry.name, s) cp.write(stream) @contextlib.contextmanager def tempdir(): td = tempfile.mkdtemp() try: yield td finally: shutil.rmtree(td) @contextlib.contextmanager def chdir(d): cwd = os.getcwd() try: os.chdir(d) yield finally: os.chdir(cwd) @contextlib.contextmanager def socket_timeout(seconds=15): cto = socket.getdefaulttimeout() try: socket.setdefaulttimeout(seconds) yield finally: socket.setdefaulttimeout(cto) class cached_property(object): def __init__(self, func): self.func = func #for attr in ('__name__', '__module__', '__doc__'): # setattr(self, attr, getattr(func, attr, None)) def __get__(self, obj, cls=None): if obj is None: return self value = self.func(obj) object.__setattr__(obj, self.func.__name__, value) #obj.__dict__[self.func.__name__] = value = self.func(obj) return value def convert_path(pathname): """Return 'pathname' as a name that will work on the native filesystem. The path is split on '/' and put back together again using the current directory separator. Needed because filenames in the setup script are always supplied in Unix style, and have to be converted to the local convention before we can actually use them in the filesystem. Raises ValueError on non-Unix-ish systems if 'pathname' either starts or ends with a slash. """ if os.sep == '/': return pathname if not pathname: return pathname if pathname[0] == '/': raise ValueError("path '%s' cannot be absolute" % pathname) if pathname[-1] == '/': raise ValueError("path '%s' cannot end with '/'" % pathname) paths = pathname.split('/') while os.curdir in paths: paths.remove(os.curdir) if not paths: return os.curdir return os.path.join(*paths) class FileOperator(object): def __init__(self, dry_run=False): self.dry_run = dry_run self.ensured = set() self._init_record() def _init_record(self): self.record = False self.files_written = set() self.dirs_created = set() def record_as_written(self, path): if self.record: self.files_written.add(path) def newer(self, source, target): """Tell if the target is newer than the source. Returns true if 'source' exists and is more recently modified than 'target', or if 'source' exists and 'target' doesn't. Returns false if both exist and 'target' is the same age or younger than 'source'. Raise PackagingFileError if 'source' does not exist. Note that this test is not very accurate: files created in the same second will have the same "age". """ if not os.path.exists(source): raise DistlibException("file '%r' does not exist" % os.path.abspath(source)) if not os.path.exists(target): return True return os.stat(source).st_mtime > os.stat(target).st_mtime def copy_file(self, infile, outfile, check=True): """Copy a file respecting dry-run and force flags. """ self.ensure_dir(os.path.dirname(outfile)) logger.info('Copying %s to %s', infile, outfile) if not self.dry_run: msg = None if check: if os.path.islink(outfile): msg = '%s is a symlink' % outfile elif os.path.exists(outfile) and not os.path.isfile(outfile): msg = '%s is a non-regular file' % outfile if msg: raise ValueError(msg + ' which would be overwritten') shutil.copyfile(infile, outfile) self.record_as_written(outfile) def copy_stream(self, instream, outfile, encoding=None): assert not os.path.isdir(outfile) self.ensure_dir(os.path.dirname(outfile)) logger.info('Copying stream %s to %s', instream, outfile) if not self.dry_run: if encoding is None: outstream = open(outfile, 'wb') else: outstream = codecs.open(outfile, 'w', encoding=encoding) try: shutil.copyfileobj(instream, outstream) finally: outstream.close() self.record_as_written(outfile) def write_binary_file(self, path, data): self.ensure_dir(os.path.dirname(path)) if not self.dry_run: with open(path, 'wb') as f: f.write(data) self.record_as_written(path) def write_text_file(self, path, data, encoding): self.ensure_dir(os.path.dirname(path)) if not self.dry_run: with open(path, 'wb') as f: f.write(data.encode(encoding)) self.record_as_written(path) def set_mode(self, bits, mask, files): if os.name == 'posix': # Set the executable bits (owner, group, and world) on # all the files specified. for f in files: if self.dry_run: logger.info("changing mode of %s", f) else: mode = (os.stat(f).st_mode | bits) & mask logger.info("changing mode of %s to %o", f, mode) os.chmod(f, mode) set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f) def ensure_dir(self, path): path = os.path.abspath(path) if path not in self.ensured and not os.path.exists(path): self.ensured.add(path) d, f = os.path.split(path) self.ensure_dir(d) logger.info('Creating %s' % path) if not self.dry_run: os.mkdir(path) if self.record: self.dirs_created.add(path) def byte_compile(self, path, optimize=False, force=False, prefix=None): dpath = cache_from_source(path, not optimize) logger.info('Byte-compiling %s to %s', path, dpath) if not self.dry_run: if force or self.newer(path, dpath): if not prefix: diagpath = None else: assert path.startswith(prefix) diagpath = path[len(prefix):] py_compile.compile(path, dpath, diagpath, True) # raise error self.record_as_written(dpath) return dpath def ensure_removed(self, path): if os.path.exists(path): if os.path.isdir(path) and not os.path.islink(path): logger.debug('Removing directory tree at %s', path) if not self.dry_run: shutil.rmtree(path) if self.record: if path in self.dirs_created: self.dirs_created.remove(path) else: if os.path.islink(path): s = 'link' else: s = 'file' logger.debug('Removing %s %s', s, path) if not self.dry_run: os.remove(path) if self.record: if path in self.files_written: self.files_written.remove(path) def is_writable(self, path): result = False while not result: if os.path.exists(path): result = os.access(path, os.W_OK) break parent = os.path.dirname(path) if parent == path: break path = parent return result def commit(self): """ Commit recorded changes, turn off recording, return changes. """ assert self.record result = self.files_written, self.dirs_created self._init_record() return result def rollback(self): if not self.dry_run: for f in list(self.files_written): if os.path.exists(f): os.remove(f) # dirs should all be empty now, except perhaps for # __pycache__ subdirs # reverse so that subdirs appear before their parents dirs = sorted(self.dirs_created, reverse=True) for d in dirs: flist = os.listdir(d) if flist: assert flist == ['__pycache__'] sd = os.path.join(d, flist[0]) os.rmdir(sd) os.rmdir(d) # should fail if non-empty self._init_record() def resolve(module_name, dotted_path): if module_name in sys.modules: mod = sys.modules[module_name] else: mod = __import__(module_name) if dotted_path is None: result = mod else: parts = dotted_path.split('.') result = getattr(mod, parts.pop(0)) for p in parts: result = getattr(result, p) return result class ExportEntry(object): def __init__(self, name, prefix, suffix, flags): self.name = name self.prefix = prefix self.suffix = suffix self.flags = flags @cached_property def value(self): return resolve(self.prefix, self.suffix) def __repr__(self): return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix, self.suffix, self.flags) def __eq__(self, other): if not isinstance(other, ExportEntry): result = False else: result = (self.name == other.name and self.prefix == other.prefix and self.suffix == other.suffix and self.flags == other.flags) return result __hash__ = object.__hash__ ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.])+) \s*=\s*(?P<callable>(\w+)([:\.]\w+)*) \s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])? ''', re.VERBOSE) def get_export_entry(specification): m = ENTRY_RE.search(specification) if not m: result = None if '[' in specification or ']' in specification: raise DistlibException('Invalid specification ' '%r' % specification) else: d = m.groupdict() name = d['name'] path = d['callable'] colons = path.count(':') if colons == 0: prefix, suffix = path, None else: if colons != 1: raise DistlibException('Invalid specification ' '%r' % specification) prefix, suffix = path.split(':') flags = d['flags'] if flags is None: if '[' in specification or ']' in specification: raise DistlibException('Invalid specification ' '%r' % specification) flags = [] else: flags = [f.strip() for f in flags.split(',')] result = ExportEntry(name, prefix, suffix, flags) return result def get_cache_base(suffix=None): """ Return the default base location for distlib caches. If the directory does not exist, it is created. Use the suffix provided for the base directory, and default to '.distlib' if it isn't provided. On Windows, if LOCALAPPDATA is defined in the environment, then it is assumed to be a directory, and will be the parent directory of the result. On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home directory - using os.expanduser('~') - will be the parent directory of the result. The result is just the directory '.distlib' in the parent directory as determined above, or with the name specified with ``suffix``. """ if suffix is None: suffix = '.distlib' if os.name == 'nt' and 'LOCALAPPDATA' in os.environ: result = os.path.expandvars('$localappdata') else: # Assume posix, or old Windows result = os.path.expanduser('~') # we use 'isdir' instead of 'exists', because we want to # fail if there's a file with that name if os.path.isdir(result): usable = os.access(result, os.W_OK) if not usable: logger.warning('Directory exists but is not writable: %s', result) else: try: os.makedirs(result) usable = True except OSError: logger.warning('Unable to create %s', result, exc_info=True) usable = False if not usable: result = tempfile.mkdtemp() logger.warning('Default location unusable, using %s', result) return os.path.join(result, suffix) def path_to_cache_dir(path): """ Convert an absolute path to a directory name for use in a cache. The algorithm used is: #. On Windows, any ``':'`` in the drive is replaced with ``'---'``. #. Any occurrence of ``os.sep`` is replaced with ``'--'``. #. ``'.cache'`` is appended. """ d, p = os.path.splitdrive(os.path.abspath(path)) if d: d = d.replace(':', '---') p = p.replace(os.sep, '--') return d + p + '.cache' def ensure_slash(s): if not s.endswith('/'): return s + '/' return s def parse_credentials(netloc): username = password = None if '@' in netloc: prefix, netloc = netloc.split('@', 1) if ':' not in prefix: username = prefix else: username, password = prefix.split(':', 1) return username, password, netloc def get_process_umask(): result = os.umask(0o22) os.umask(result) return result def is_string_sequence(seq): result = True i = None for i, s in enumerate(seq): if not isinstance(s, string_types): result = False break assert i is not None return result PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-' '([a-z0-9_.+-]+)', re.I) PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)') def split_filename(filename, project_name=None): """ Extract name, version, python version from a filename (no extension) Return name, version, pyver or None """ result = None pyver = None m = PYTHON_VERSION.search(filename) if m: pyver = m.group(1) filename = filename[:m.start()] if project_name and len(filename) > len(project_name) + 1: m = re.match(re.escape(project_name) + r'\b', filename) if m: n = m.end() result = filename[:n], filename[n + 1:], pyver if result is None: m = PROJECT_NAME_AND_VERSION.match(filename) if m: result = m.group(1), m.group(3), pyver return result # Allow spaces in name because of legacy dists like "Twisted Core" NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*' r'\(\s*(?P<ver>[^\s)]+)\)$') def parse_name_and_version(p): """ A utility method used to get name and version from a string. From e.g. a Provides-Dist value. :param p: A value in a form 'foo (1.0)' :return: The name and version as a tuple. """ m = NAME_VERSION_RE.match(p) if not m: raise DistlibException('Ill-formed name/version string: \'%s\'' % p) d = m.groupdict() return d['name'].strip().lower(), d['ver'] def get_extras(requested, available): result = set() requested = set(requested or []) available = set(available or []) if '*' in requested: requested.remove('*') result |= available for r in requested: if r == '-': result.add(r) elif r.startswith('-'): unwanted = r[1:] if unwanted not in available: logger.warning('undeclared extra: %s' % unwanted) if unwanted in result: result.remove(unwanted) else: if r not in available: logger.warning('undeclared extra: %s' % r) result.add(r) return result # # Extended metadata functionality # def _get_external_data(url): result = {} try: # urlopen might fail if it runs into redirections, # because of Python issue #13696. Fixed in locators # using a custom redirect handler. resp = urlopen(url) headers = resp.info() if headers.get('Content-Type') != 'application/json': logger.debug('Unexpected response for JSON request') else: reader = codecs.getreader('utf-8')(resp) #data = reader.read().decode('utf-8') #result = json.loads(data) result = json.load(reader) except Exception as e: logger.exception('Failed to get external data for %s: %s', url, e) return result def get_project_data(name): url = ('https://www.red-dove.com/pypi/projects/' '%s/%s/project.json' % (name[0].upper(), name)) result = _get_external_data(url) return result def get_package_data(name, version): url = ('https://www.red-dove.com/pypi/projects/' '%s/%s/package-%s.json' % (name[0].upper(), name, version)) return _get_external_data(url) class Cache(object): """ A class implementing a cache for resources that need to live in the file system e.g. shared libraries. This class was moved from resources to here because it could be used by other modules, e.g. the wheel module. """ def __init__(self, base): """ Initialise an instance. :param base: The base directory where the cache should be located. """ # we use 'isdir' instead of 'exists', because we want to # fail if there's a file with that name if not os.path.isdir(base): os.makedirs(base) if (os.stat(base).st_mode & 0o77) != 0: logger.warning('Directory \'%s\' is not private', base) self.base = os.path.abspath(os.path.normpath(base)) def prefix_to_dir(self, prefix): """ Converts a resource prefix to a directory name in the cache. """ return path_to_cache_dir(prefix) def clear(self): """ Clear the cache. """ not_removed = [] for fn in os.listdir(self.base): fn = os.path.join(self.base, fn) try: if os.path.islink(fn) or os.path.isfile(fn): os.remove(fn) elif os.path.isdir(fn): shutil.rmtree(fn) except Exception: not_removed.append(fn) return not_removed class EventMixin(object): """ A very simple publish/subscribe system. """ def __init__(self): self._subscribers = {} def add(self, event, subscriber, append=True): """ Add a subscriber for an event. :param event: The name of an event. :param subscriber: The subscriber to be added (and called when the event is published). :param append: Whether to append or prepend the subscriber to an existing subscriber list for the event. """ subs = self._subscribers if event not in subs: subs[event] = deque([subscriber]) else: sq = subs[event] if append: sq.append(subscriber) else: sq.appendleft(subscriber) def remove(self, event, subscriber): """ Remove a subscriber for an event. :param event: The name of an event. :param subscriber: The subscriber to be removed. """ subs = self._subscribers if event not in subs: raise ValueError('No subscribers: %r' % event) subs[event].remove(subscriber) def get_subscribers(self, event): """ Return an iterator for the subscribers for an event. :param event: The event to return subscribers for. """ return iter(self._subscribers.get(event, ())) def publish(self, event, *args, **kwargs): """ Publish a event and return a list of values returned by its subscribers. :param event: The event to publish. :param args: The positional arguments to pass to the event's subscribers. :param kwargs: The keyword arguments to pass to the event's subscribers. """ result = [] for subscriber in self.get_subscribers(event): try: value = subscriber(event, *args, **kwargs) except Exception: logger.exception('Exception during event publication') value = None result.append(value) logger.debug('publish %s: args = %s, kwargs = %s, result = %s', event, args, kwargs, result) return result # # Simple sequencing # class Sequencer(object): def __init__(self): self._preds = {} self._succs = {} self._nodes = set() # nodes with no preds/succs def add_node(self, node): self._nodes.add(node) def remove_node(self, node, edges=False): if node in self._nodes: self._nodes.remove(node) if edges: for p in set(self._preds.get(node, ())): self.remove(p, node) for s in set(self._succs.get(node, ())): self.remove(node, s) # Remove empties for k, v in list(self._preds.items()): if not v: del self._preds[k] for k, v in list(self._succs.items()): if not v: del self._succs[k] def add(self, pred, succ): assert pred != succ self._preds.setdefault(succ, set()).add(pred) self._succs.setdefault(pred, set()).add(succ) def remove(self, pred, succ): assert pred != succ try: preds = self._preds[succ] succs = self._succs[pred] except KeyError: raise ValueError('%r not a successor of anything' % succ) try: preds.remove(pred) succs.remove(succ) except KeyError: raise ValueError('%r not a successor of %r' % (succ, pred)) def is_step(self, step): return (step in self._preds or step in self._succs or step in self._nodes) def get_steps(self, final): if not self.is_step(final): raise ValueError('Unknown: %r' % final) result = [] todo = [] seen = set() todo.append(final) while todo: step = todo.pop(0) if step in seen: # if a step was already seen, # move it to the end (so it will appear earlier # when reversed on return) ... but not for the # final step, as that would be confusing for # users if step != final: result.remove(step) result.append(step) else: seen.add(step) result.append(step) preds = self._preds.get(step, ()) todo.extend(preds) return reversed(result) @property def strong_connections(self): #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm index_counter = [0] stack = [] lowlinks = {} index = {} result = [] graph = self._succs def strongconnect(node): # set the depth index for this node to the smallest unused index index[node] = index_counter[0] lowlinks[node] = index_counter[0] index_counter[0] += 1 stack.append(node) # Consider successors try: successors = graph[node] except Exception: successors = [] for successor in successors: if successor not in lowlinks: # Successor has not yet been visited strongconnect(successor) lowlinks[node] = min(lowlinks[node],lowlinks[successor]) elif successor in stack: # the successor is in the stack and hence in the current # strongly connected component (SCC) lowlinks[node] = min(lowlinks[node],index[successor]) # If `node` is a root node, pop the stack and generate an SCC if lowlinks[node] == index[node]: connected_component = [] while True: successor = stack.pop() connected_component.append(successor) if successor == node: break component = tuple(connected_component) # storing the result result.append(component) for node in graph: if node not in lowlinks: strongconnect(node) return result @property def dot(self): result = ['digraph G {'] for succ in self._preds: preds = self._preds[succ] for pred in preds: result.append(' %s -> %s;' % (pred, succ)) for node in self._nodes: result.append(' %s;' % node) result.append('}') return '\n'.join(result) # # Unarchiving functionality for zip, tar, tgz, tbz, whl # ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz', '.whl') def unarchive(archive_filename, dest_dir, format=None, check=True): def check_path(path): if not isinstance(path, text_type): path = path.decode('utf-8') p = os.path.abspath(os.path.join(dest_dir, path)) if not p.startswith(dest_dir) or p[plen] != os.sep: raise ValueError('path outside destination: %r' % p) dest_dir = os.path.abspath(dest_dir) plen = len(dest_dir) archive = None if format is None: if archive_filename.endswith(('.zip', '.whl')): format = 'zip' elif archive_filename.endswith(('.tar.gz', '.tgz')): format = 'tgz' mode = 'r:gz' elif archive_filename.endswith(('.tar.bz2', '.tbz')): format = 'tbz' mode = 'r:bz2' elif archive_filename.endswith('.tar'): format = 'tar' mode = 'r' else: raise ValueError('Unknown format for %r' % archive_filename) try: if format == 'zip': archive = ZipFile(archive_filename, 'r') if check: names = archive.namelist() for name in names: check_path(name) else: archive = tarfile.open(archive_filename, mode) if check: names = archive.getnames() for name in names: check_path(name) if format != 'zip' and sys.version_info[0] < 3: # See Python issue 17153. If the dest path contains Unicode, # tarfile extraction fails on Python 2.x if a member path name # contains non-ASCII characters - it leads to an implicit # bytes -> unicode conversion using ASCII to decode. for tarinfo in archive.getmembers(): if not isinstance(tarinfo.name, text_type): tarinfo.name = tarinfo.name.decode('utf-8') archive.extractall(dest_dir) finally: if archive: archive.close() def zip_dir(directory): """zip a directory tree into a BytesIO object""" result = io.BytesIO() dlen = len(directory) with ZipFile(result, "w") as zf: for root, dirs, files in os.walk(directory): for name in files: full = os.path.join(root, name) rel = root[dlen:] dest = os.path.join(rel, name) zf.write(full, dest) return result # # Simple progress bar # UNITS = ('', 'K', 'M', 'G','T','P') class Progress(object): unknown = 'UNKNOWN' def __init__(self, minval=0, maxval=100): assert maxval is None or maxval >= minval self.min = self.cur = minval self.max = maxval self.started = None self.elapsed = 0 self.done = False def update(self, curval): assert self.min <= curval assert self.max is None or curval <= self.max self.cur = curval now = time.time() if self.started is None: self.started = now else: self.elapsed = now - self.started def increment(self, incr): assert incr >= 0 self.update(self.cur + incr) def start(self): self.update(self.min) return self def stop(self): if self.max is not None: self.update(self.max) self.done = True @property def maximum(self): return self.unknown if self.max is None else self.max @property def percentage(self): if self.done: result = '100 %' elif self.max is None: result = ' ?? %' else: v = 100.0 * (self.cur - self.min) / (self.max - self.min) result = '%3d %%' % v return result def format_duration(self, duration): if (duration <= 0) and self.max is None or self.cur == self.min: result = '??:??:??' #elif duration < 1: # result = '--:--:--' else: result = time.strftime('%H:%M:%S', time.gmtime(duration)) return result @property def ETA(self): if self.done: prefix = 'Done' t = self.elapsed #import pdb; pdb.set_trace() else: prefix = 'ETA ' if self.max is None: t = -1 elif self.elapsed == 0 or (self.cur == self.min): t = 0 else: #import pdb; pdb.set_trace() t = float(self.max - self.min) t /= self.cur - self.min t = (t - 1) * self.elapsed return '%s: %s' % (prefix, self.format_duration(t)) @property def speed(self): if self.elapsed == 0: result = 0.0 else: result = (self.cur - self.min) / self.elapsed for unit in UNITS: if result < 1000: break result /= 1000.0 return '%d %sB/s' % (result, unit) # # Glob functionality # RICH_GLOB = re.compile(r'\{([^}]*)\}') _CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]') _CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$') def iglob(path_glob): """Extended globbing function that supports ** and {opt1,opt2,opt3}.""" if _CHECK_RECURSIVE_GLOB.search(path_glob): msg = """invalid glob %r: recursive glob "**" must be used alone""" raise ValueError(msg % path_glob) if _CHECK_MISMATCH_SET.search(path_glob): msg = """invalid glob %r: mismatching set marker '{' or '}'""" raise ValueError(msg % path_glob) return _iglob(path_glob) def _iglob(path_glob): rich_path_glob = RICH_GLOB.split(path_glob, 1) if len(rich_path_glob) > 1: assert len(rich_path_glob) == 3, rich_path_glob prefix, set, suffix = rich_path_glob for item in set.split(','): for path in _iglob(''.join((prefix, item, suffix))): yield path else: if '**' not in path_glob: for item in std_iglob(path_glob): yield item else: prefix, radical = path_glob.split('**', 1) if prefix == '': prefix = '.' if radical == '': radical = '*' else: # we support both radical = radical.lstrip('/') radical = radical.lstrip('\\') for path, dir, files in os.walk(prefix): path = os.path.normpath(path) for fn in _iglob(os.path.join(path, radical)): yield fn # # HTTPSConnection which verifies certificates/matches domains # class HTTPSConnection(httplib.HTTPSConnection): ca_certs = None # set this to the path to the certs file (.pem) check_domain = True # only used if ca_certs is not None # noinspection PyPropertyAccess def connect(self): sock = socket.create_connection((self.host, self.port), self.timeout) if getattr(self, '_tunnel_host', False): self.sock = sock self._tunnel() if not hasattr(ssl, 'SSLContext'): # For 2.x if self.ca_certs: cert_reqs = ssl.CERT_REQUIRED else: cert_reqs = ssl.CERT_NONE self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, cert_reqs=cert_reqs, ssl_version=ssl.PROTOCOL_SSLv23, ca_certs=self.ca_certs) else: context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.options |= ssl.OP_NO_SSLv2 if self.cert_file: context.load_cert_chain(self.cert_file, self.key_file) kwargs = {} if self.ca_certs: context.verify_mode = ssl.CERT_REQUIRED context.load_verify_locations(cafile=self.ca_certs) if getattr(ssl, 'HAS_SNI', False): kwargs['server_hostname'] = self.host self.sock = context.wrap_socket(sock, **kwargs) if self.ca_certs and self.check_domain: try: match_hostname(self.sock.getpeercert(), self.host) logger.debug('Host verified: %s', self.host) except CertificateError: self.sock.shutdown(socket.SHUT_RDWR) self.sock.close() raise class HTTPSHandler(BaseHTTPSHandler): def __init__(self, ca_certs, check_domain=True): BaseHTTPSHandler.__init__(self) self.ca_certs = ca_certs self.check_domain = check_domain def _conn_maker(self, *args, **kwargs): """ This is called to create a connection instance. Normally you'd pass a connection class to do_open, but it doesn't actually check for a class, and just expects a callable. As long as we behave just as a constructor would have, we should be OK. If it ever changes so that we *must* pass a class, we'll create an UnsafeHTTPSConnection class which just sets check_domain to False in the class definition, and choose which one to pass to do_open. """ result = HTTPSConnection(*args, **kwargs) if self.ca_certs: result.ca_certs = self.ca_certs result.check_domain = self.check_domain return result def https_open(self, req): try: return self.do_open(self._conn_maker, req) except URLError as e: if 'certificate verify failed' in str(e.reason): raise CertificateError('Unable to verify server certificate ' 'for %s' % req.host) else: raise # # To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The- # Middle proxy using HTTP listens on port 443, or an index mistakenly serves # HTML containing a http://xyz link when it should be https://xyz), # you can use the following handler class, which does not allow HTTP traffic. # # It works by inheriting from HTTPHandler - so build_opener won't add a # handler for HTTP itself. # class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler): def http_open(self, req): raise URLError('Unexpected HTTP request on what should be a secure ' 'connection: %s' % req) # # XML-RPC with timeouts # _ver_info = sys.version_info[:2] if _ver_info == (2, 6): class HTTP(httplib.HTTP): def __init__(self, host='', port=None, **kwargs): if port == 0: # 0 means use port 0, not the default port port = None self._setup(self._connection_class(host, port, **kwargs)) class HTTPS(httplib.HTTPS): def __init__(self, host='', port=None, **kwargs): if port == 0: # 0 means use port 0, not the default port port = None self._setup(self._connection_class(host, port, **kwargs)) class Transport(xmlrpclib.Transport): def __init__(self, timeout, use_datetime=0): self.timeout = timeout xmlrpclib.Transport.__init__(self, use_datetime) def make_connection(self, host): h, eh, x509 = self.get_host_info(host) if _ver_info == (2, 6): result = HTTP(h, timeout=self.timeout) else: if not self._connection or host != self._connection[0]: self._extra_headers = eh self._connection = host, httplib.HTTPConnection(h) result = self._connection[1] return result class SafeTransport(xmlrpclib.SafeTransport): def __init__(self, timeout, use_datetime=0): self.timeout = timeout xmlrpclib.SafeTransport.__init__(self, use_datetime) def make_connection(self, host): h, eh, kwargs = self.get_host_info(host) if not kwargs: kwargs = {} kwargs['timeout'] = self.timeout if _ver_info == (2, 6): result = HTTPS(host, None, **kwargs) else: if not self._connection or host != self._connection[0]: self._extra_headers = eh self._connection = host, httplib.HTTPSConnection(h, None, **kwargs) result = self._connection[1] return result class ServerProxy(xmlrpclib.ServerProxy): def __init__(self, uri, **kwargs): self.timeout = timeout = kwargs.pop('timeout', None) # The above classes only come into play if a timeout # is specified if timeout is not None: scheme, _ = splittype(uri) use_datetime = kwargs.get('use_datetime', 0) if scheme == 'https': tcls = SafeTransport else: tcls = Transport kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime) self.transport = t xmlrpclib.ServerProxy.__init__(self, uri, **kwargs) # # CSV functionality. This is provided because on 2.x, the csv module can't # handle Unicode. However, we need to deal with Unicode in e.g. RECORD files. # def _csv_open(fn, mode, **kwargs): if sys.version_info[0] < 3: mode += 'b' else: kwargs['newline'] = '' return open(fn, mode, **kwargs) class CSVBase(object): defaults = { 'delimiter': str(','), # The strs are used because we need native 'quotechar': str('"'), # str in the csv API (2.x won't take 'lineterminator': str('\n') # Unicode) } def __enter__(self): return self def __exit__(self, *exc_info): self.stream.close() class CSVReader(CSVBase): def __init__(self, **kwargs): if 'stream' in kwargs: stream = kwargs['stream'] if sys.version_info[0] >= 3: # needs to be a text stream stream = codecs.getreader('utf-8')(stream) self.stream = stream else: self.stream = _csv_open(kwargs['path'], 'r') self.reader = csv.reader(self.stream, **self.defaults) def __iter__(self): return self def next(self): result = next(self.reader) if sys.version_info[0] < 3: for i, item in enumerate(result): if not isinstance(item, text_type): result[i] = item.decode('utf-8') return result __next__ = next class CSVWriter(CSVBase): def __init__(self, fn, **kwargs): self.stream = _csv_open(fn, 'w') self.writer = csv.writer(self.stream, **self.defaults) def writerow(self, row): if sys.version_info[0] < 3: r = [] for item in row: if isinstance(item, text_type): item = item.encode('utf-8') r.append(item) row = r self.writer.writerow(row) # # Configurator functionality # class Configurator(BaseConfigurator): value_converters = dict(BaseConfigurator.value_converters) value_converters['inc'] = 'inc_convert' def __init__(self, config, base=None): super(Configurator, self).__init__(config) self.base = base or os.getcwd() def configure_custom(self, config): def convert(o): if isinstance(o, (list, tuple)): result = type(o)([convert(i) for i in o]) elif isinstance(o, dict): if '()' in o: result = self.configure_custom(o) else: result = {} for k in o: result[k] = convert(o[k]) else: result = self.convert(o) return result c = config.pop('()') if not callable(c): c = self.resolve(c) props = config.pop('.', None) # Check for valid identifiers args = config.pop('[]', ()) if args: args = tuple([convert(o) for o in args]) items = [(k, convert(config[k])) for k in config if valid_ident(k)] kwargs = dict(items) result = c(*args, **kwargs) if props: for n, v in props.items(): setattr(result, n, convert(v)) return result def __getitem__(self, key): result = self.config[key] if isinstance(result, dict) and '()' in result: self.config[key] = result = self.configure_custom(result) return result def inc_convert(self, value): """Default converter for the inc:// protocol.""" if not os.path.isabs(value): value = os.path.join(self.base, value) with codecs.open(value, 'r', encoding='utf-8') as f: result = json.load(f) return result # # Mixin for running subprocesses and capturing their output # class SubprocessMixin(object): def __init__(self, verbose=False, progress=None): self.verbose = verbose self.progress = progress def reader(self, stream, context): """ Read lines from a subprocess' output stream and either pass to a progress callable (if specified) or write progress information to sys.stderr. """ progress = self.progress verbose = self.verbose while True: s = stream.readline() if not s: break if progress is not None: progress(s, context) else: if not verbose: sys.stderr.write('.') else: sys.stderr.write(s.decode('utf-8')) sys.stderr.flush() stream.close() def run_command(self, cmd, **kwargs): p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout')) t1.start() t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr')) t2.start() p.wait() t1.join() t2.join() if self.progress is not None: self.progress('done.', 'main') elif self.verbose: sys.stderr.write('done.\n') return p
zofuthan/edx-platform
refs/heads/master
lms/djangoapps/mobile_api/social_facebook/courses/serializers.py
128
""" Serializer for courses API """ from rest_framework import serializers class CoursesWithFriendsSerializer(serializers.Serializer): """ Serializes oauth token for facebook groups request """ oauth_token = serializers.CharField(required=True)
IMCG/iMapReduce
refs/heads/master
src/examples/python/WordCount.py
123
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from org.apache.hadoop.fs import Path from org.apache.hadoop.io import * from org.apache.hadoop.mapred import * import sys import getopt class WordCountMap(Mapper, MapReduceBase): one = IntWritable(1) def map(self, key, value, output, reporter): for w in value.toString().split(): output.collect(Text(w), self.one) class Summer(Reducer, MapReduceBase): def reduce(self, key, values, output, reporter): sum = 0 while values.hasNext(): sum += values.next().get() output.collect(key, IntWritable(sum)) def printUsage(code): print "wordcount [-m <maps>] [-r <reduces>] <input> <output>" sys.exit(code) def main(args): conf = JobConf(WordCountMap); conf.setJobName("wordcount"); conf.setOutputKeyClass(Text); conf.setOutputValueClass(IntWritable); conf.setMapperClass(WordCountMap); conf.setCombinerClass(Summer); conf.setReducerClass(Summer); try: flags, other_args = getopt.getopt(args[1:], "m:r:") except getopt.GetoptError: printUsage(1) if len(other_args) != 2: printUsage(1) for f,v in flags: if f == "-m": conf.setNumMapTasks(int(v)) elif f == "-r": conf.setNumReduceTasks(int(v)) conf.setInputPath(Path(other_args[0])) conf.setOutputPath(Path(other_args[1])) JobClient.runJob(conf); if __name__ == "__main__": main(sys.argv)
bubae/gazeAssistRecognize
refs/heads/master
calibration.py
1
import sys import init_path import gaze_algorithm as GA import numpy as np import cv2, os import wx import time import csv class CameraObject(): def __init__(self, gazeObject): self.capScene = cv2.VideoCapture(0) if not self.capScene.isOpened(): raise NameError("Scene Camera don`t connected") return self.capEye = cv2.VideoCapture(1) if not self.capEye.isOpened(): raise NameError("Eye Camera don`t connected") return ret, self.sceneIm = self.capScene.read() self.sceneThresh = None ret, self.eyeIm = self.capEye.read() self.calibPoints = [] self.calibPointsLabel = [] self.pupilCenters = [] self.LED_centroids = [] self.gazeObject = gazeObject def update(self, drawLabel): sceneIm, points = self.readFrameScene(); eyeIm, pupilCenter, LED_centroid = self.readFrameEye(); if pupilCenter is None or points is None: return # if points is None: # return self.calibPoints.append(points) self.calibPointsLabel.append(drawLabel) self.pupilCenters.append(pupilCenter) self.LED_centroids.append(LED_centroid) return def readFrameScene(self): scene_threshold = 232 ret, frame = self.capScene.read() if ret == False: return frame, None self.sceneIm = frame gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(gray, scene_threshold, 255, 0) self.sceneThresh = thresh points = self.imProcessingScene(thresh) return frame, points def imProcessingScene(self, threshIm): # out = self.sceneIm.copy() circles = cv2.HoughCircles(threshIm, cv2.cv.CV_HOUGH_GRADIENT, 1.1, 2000, param1=30, param2=5, minRadius=5, maxRadius=8) # print circles[0, 0] if circles is None: return None else: return circles[0, 0] def readFrameEye(self): ret, frame = self.capEye.read() if ret == False: return None, None, None self.eyeIm = frame pupilCenter, LED_centroids = self.imProcessingEye(frame) if pupilCenter == None | len(LED_centroids) < 2: return frame, None, None return frame, pupilCenter, LED_centroids def clearObject(self): self.capScene.release() self.capEye.release() cv2.destroyAllWindows() def imProcessingEye(self, im): gazeObject = self.gazeObject grayIm = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) pupilCenter = gazeObject.GetPupilBoundaryPoints(grayIm) if pupilCenter == None: return pupilCenter else: LED_centroids = gazeObject.FindLEDCentroids(grayIm) im2 = cv2.circle(im, tuple(LED_centroids[0]), 10, (0,0,255), 2) im2 = cv2.circle(im2, tuple(LED_centroids[1]), 10, (0,0,255), 2) im2 = cv2.circle(im2, tuple(pupilCenter), 10, (0,0,255),2) # cv2.imshow('frame', im) # cv2.waitKey(1) return pupilCenter, LED_centroids def saveData(self): with open("calibration.csv", "wb") as csvFile: writer = csv.writer(csvFile, delimiter=',') for i in xrange(0,len(self.calibPoints)): writer.writerow(np.append(self.calibPoints[i], self.calibPointsLabel[i])) writer.writerow(self.pupilCenters) writer.writerow(self.LED_centroids) def optimize(self): gazeObject = self.gazeObject(); # gazeObject.optimizeGaze(self.calibPoints, self.pupilCenters, self.LED_centroids); return class pixelPoint: def __init__(self, x, y, radius, color): self.x = x self.y = y self.radius = radius self.color = color class AnimationPanel(wx.Panel): def __init__(self, parent): wx.Panel.__init__(self, parent) self.DataObj = None self.camera = None self.pointList = [] self.drawNum = 0 self.SetBackgroundColour(wx.BLACK) self.Bind(wx.EVT_PAINT, self.OnPaint) self.timer = wx.Timer(self) self.cameraTimer = wx.Timer(self) self.Bind(wx.EVT_TIMER, self.OnTimer, self.timer) self.Bind(wx.EVT_TIMER, self.OnCameraTimer, self.cameraTimer) self.timer.Start(2000) self.cameraTimer.Start(50) def setPoint(self, point, radius, color): _point = pixelPoint(point[0], point[1], radius, color) self.pointList.append(_point) def listClear(self): self.pointList = [] def OnPaint(self, event): self.dc = wx.PaintDC(self) self.listClear(); self.setDrawData(self.drawNum) self.drawPoint() def OnTimer(self, event): self.drawNum += 1 if self.drawNum > 9: self.camera.saveData() self.GetParent().Close() self.Refresh() def OnCameraTimer(self, event): self.camera.update(self.drawNum) def drawPoint(self): for point in self.pointList: self.dc.SetBrush(wx.Brush(point.color, wx.SOLID)) self.dc.DrawCircle(point.x, point.y, point.radius) def setDrawData(self, i): target = self.DataObj.calTargetTrain[i] self.setPoint(target, 20, "white") class CalibrationFrame(wx.Frame): def __init__(self, parent): wx.Frame.__init__(self, parent, wx.ID_ANY, 'Test FullScreen') self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) self.panel = AnimationPanel(self) def OnCloseWindow(self, event): self.Destroy() def OnFullScreen(self): self.ShowFullScreen(not self.IsFullScreen(),27) class CalibrationData: def __init__(self, winSize): self.calTargetTrain = []; self.windowSize = winSize; self.setTrainPoint(); def setTrainPoint(self): W = self.windowSize[0] H = self.windowSize[1] self.calTargetTrain.append([W/2, H/2]) WList = [0, W/2, W] HList = [0, H/2, H] offset = [100, 0, -100] for i in xrange(0,3): x = WList[i] + offset[i]; for j in xrange(0,3): y = HList[j] +offset[j]; self.calTargetTrain.append([x,y]) self.calTargetTrain.append([W/2, H/2]) def main(): app = wx.App() gazeObject = GA.gazeObject(); cameraObject = CameraObject(gazeObject); DataObj = CalibrationData(wx.Display(1).GetGeometry().GetSize()) frame = CalibrationFrame(None) # frame.Show(True) frame.OnFullScreen() frame.panel.DataObj = DataObj frame.panel.camera = cameraObject app.MainLoop() # cap = cv2.VideoCapture(0) # while(True): # ret, frame = cap.read() # cv2.imshow('frame', frame) # if cv2.waitKey(1) & 0xFF == ord('q'): # break if __name__ == "__main__": main();
jcoady9/beets
refs/heads/master
test/test_template.py
1
# -*- coding: utf-8 -*- # This file is part of beets. # Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """Tests for template engine. """ from __future__ import division, absolute_import, print_function import warnings from test._common import unittest from beets.util import functemplate import six def _normexpr(expr): """Normalize an Expression object's parts, collapsing multiple adjacent text blocks and removing empty text blocks. Generates a sequence of parts. """ textbuf = [] for part in expr.parts: if isinstance(part, six.string_types): textbuf.append(part) else: if textbuf: text = u''.join(textbuf) if text: yield text textbuf = [] yield part if textbuf: text = u''.join(textbuf) if text: yield text def _normparse(text): """Parse a template and then normalize the resulting Expression.""" return _normexpr(functemplate._parse(text)) class ParseTest(unittest.TestCase): def test_empty_string(self): self.assertEqual(list(_normparse(u'')), []) def _assert_symbol(self, obj, ident): """Assert that an object is a Symbol with the given identifier. """ self.assertTrue(isinstance(obj, functemplate.Symbol), u"not a Symbol: %s" % repr(obj)) self.assertEqual(obj.ident, ident, u"wrong identifier: %s vs. %s" % (repr(obj.ident), repr(ident))) def _assert_call(self, obj, ident, numargs): """Assert that an object is a Call with the given identifier and argument count. """ self.assertTrue(isinstance(obj, functemplate.Call), u"not a Call: %s" % repr(obj)) self.assertEqual(obj.ident, ident, u"wrong identifier: %s vs. %s" % (repr(obj.ident), repr(ident))) self.assertEqual(len(obj.args), numargs, u"wrong argument count in %s: %i vs. %i" % (repr(obj.ident), len(obj.args), numargs)) def test_plain_text(self): self.assertEqual(list(_normparse(u'hello world')), [u'hello world']) def test_escaped_character_only(self): self.assertEqual(list(_normparse(u'$$')), [u'$']) def test_escaped_character_in_text(self): self.assertEqual(list(_normparse(u'a $$ b')), [u'a $ b']) def test_escaped_character_at_start(self): self.assertEqual(list(_normparse(u'$$ hello')), [u'$ hello']) def test_escaped_character_at_end(self): self.assertEqual(list(_normparse(u'hello $$')), [u'hello $']) def test_escaped_function_delim(self): self.assertEqual(list(_normparse(u'a $% b')), [u'a % b']) def test_escaped_sep(self): self.assertEqual(list(_normparse(u'a $, b')), [u'a , b']) def test_escaped_close_brace(self): self.assertEqual(list(_normparse(u'a $} b')), [u'a } b']) def test_bare_value_delim_kept_intact(self): self.assertEqual(list(_normparse(u'a $ b')), [u'a $ b']) def test_bare_function_delim_kept_intact(self): self.assertEqual(list(_normparse(u'a % b')), [u'a % b']) def test_bare_opener_kept_intact(self): self.assertEqual(list(_normparse(u'a { b')), [u'a { b']) def test_bare_closer_kept_intact(self): self.assertEqual(list(_normparse(u'a } b')), [u'a } b']) def test_bare_sep_kept_intact(self): self.assertEqual(list(_normparse(u'a , b')), [u'a , b']) def test_symbol_alone(self): parts = list(_normparse(u'$foo')) self.assertEqual(len(parts), 1) self._assert_symbol(parts[0], u"foo") def test_symbol_in_text(self): parts = list(_normparse(u'hello $foo world')) self.assertEqual(len(parts), 3) self.assertEqual(parts[0], u'hello ') self._assert_symbol(parts[1], u"foo") self.assertEqual(parts[2], u' world') def test_symbol_with_braces(self): parts = list(_normparse(u'hello${foo}world')) self.assertEqual(len(parts), 3) self.assertEqual(parts[0], u'hello') self._assert_symbol(parts[1], u"foo") self.assertEqual(parts[2], u'world') def test_unclosed_braces_symbol(self): self.assertEqual(list(_normparse(u'a ${ b')), [u'a ${ b']) def test_empty_braces_symbol(self): self.assertEqual(list(_normparse(u'a ${} b')), [u'a ${} b']) def test_call_without_args_at_end(self): self.assertEqual(list(_normparse(u'foo %bar')), [u'foo %bar']) def test_call_without_args(self): self.assertEqual(list(_normparse(u'foo %bar baz')), [u'foo %bar baz']) def test_call_with_unclosed_args(self): self.assertEqual(list(_normparse(u'foo %bar{ baz')), [u'foo %bar{ baz']) def test_call_with_unclosed_multiple_args(self): self.assertEqual(list(_normparse(u'foo %bar{bar,bar baz')), [u'foo %bar{bar,bar baz']) def test_call_empty_arg(self): parts = list(_normparse(u'%foo{}')) self.assertEqual(len(parts), 1) self._assert_call(parts[0], u"foo", 1) self.assertEqual(list(_normexpr(parts[0].args[0])), []) def test_call_single_arg(self): parts = list(_normparse(u'%foo{bar}')) self.assertEqual(len(parts), 1) self._assert_call(parts[0], u"foo", 1) self.assertEqual(list(_normexpr(parts[0].args[0])), [u'bar']) def test_call_two_args(self): parts = list(_normparse(u'%foo{bar,baz}')) self.assertEqual(len(parts), 1) self._assert_call(parts[0], u"foo", 2) self.assertEqual(list(_normexpr(parts[0].args[0])), [u'bar']) self.assertEqual(list(_normexpr(parts[0].args[1])), [u'baz']) def test_call_with_escaped_sep(self): parts = list(_normparse(u'%foo{bar$,baz}')) self.assertEqual(len(parts), 1) self._assert_call(parts[0], u"foo", 1) self.assertEqual(list(_normexpr(parts[0].args[0])), [u'bar,baz']) def test_call_with_escaped_close(self): parts = list(_normparse(u'%foo{bar$}baz}')) self.assertEqual(len(parts), 1) self._assert_call(parts[0], u"foo", 1) self.assertEqual(list(_normexpr(parts[0].args[0])), [u'bar}baz']) def test_call_with_symbol_argument(self): parts = list(_normparse(u'%foo{$bar,baz}')) self.assertEqual(len(parts), 1) self._assert_call(parts[0], u"foo", 2) arg_parts = list(_normexpr(parts[0].args[0])) self.assertEqual(len(arg_parts), 1) self._assert_symbol(arg_parts[0], u"bar") self.assertEqual(list(_normexpr(parts[0].args[1])), [u"baz"]) def test_call_with_nested_call_argument(self): parts = list(_normparse(u'%foo{%bar{},baz}')) self.assertEqual(len(parts), 1) self._assert_call(parts[0], u"foo", 2) arg_parts = list(_normexpr(parts[0].args[0])) self.assertEqual(len(arg_parts), 1) self._assert_call(arg_parts[0], u"bar", 1) self.assertEqual(list(_normexpr(parts[0].args[1])), [u"baz"]) def test_nested_call_with_argument(self): parts = list(_normparse(u'%foo{%bar{baz}}')) self.assertEqual(len(parts), 1) self._assert_call(parts[0], u"foo", 1) arg_parts = list(_normexpr(parts[0].args[0])) self.assertEqual(len(arg_parts), 1) self._assert_call(arg_parts[0], u"bar", 1) self.assertEqual(list(_normexpr(arg_parts[0].args[0])), [u'baz']) def test_fail_on_utf8(self): parts = u'é'.encode('utf8') warnings.simplefilter("ignore") with self.assertRaises(UnicodeDecodeError): functemplate._parse(parts) warnings.simplefilter("default") class EvalTest(unittest.TestCase): def _eval(self, template): values = { u'foo': u'bar', u'baz': u'BaR', } functions = { u'lower': six.text_type.lower, u'len': len, } return functemplate.Template(template).substitute(values, functions) def test_plain_text(self): self.assertEqual(self._eval(u"foo"), u"foo") def test_subtitute_value(self): self.assertEqual(self._eval(u"$foo"), u"bar") def test_subtitute_value_in_text(self): self.assertEqual(self._eval(u"hello $foo world"), u"hello bar world") def test_not_subtitute_undefined_value(self): self.assertEqual(self._eval(u"$bar"), u"$bar") def test_function_call(self): self.assertEqual(self._eval(u"%lower{FOO}"), u"foo") def test_function_call_with_text(self): self.assertEqual(self._eval(u"A %lower{FOO} B"), u"A foo B") def test_nested_function_call(self): self.assertEqual(self._eval(u"%lower{%lower{FOO}}"), u"foo") def test_symbol_in_argument(self): self.assertEqual(self._eval(u"%lower{$baz}"), u"bar") def test_function_call_exception(self): res = self._eval(u"%lower{a,b,c,d,e}") self.assertTrue(isinstance(res, six.string_types)) def test_function_returning_integer(self): self.assertEqual(self._eval(u"%len{foo}"), u"3") def test_not_subtitute_undefined_func(self): self.assertEqual(self._eval(u"%bar{}"), u"%bar{}") def test_not_subtitute_func_with_no_args(self): self.assertEqual(self._eval(u"%lower"), u"%lower") def test_function_call_with_empty_arg(self): self.assertEqual(self._eval(u"%len{}"), u"0") def suite(): return unittest.TestLoader().loadTestsFromName(__name__) if __name__ == '__main__': unittest.main(defaultTest='suite')
agoose77/hivesystem
refs/heads/master
tutorial/layers/layer5/workers/action3_play_animation.py
7
import bee from bee.segments import * import libcontext from libcontext.socketclasses import * from libcontext.pluginclasses import * class action3_play_animation(bee.worker): def set_playfunc(self, playfunc): self.play = playfunc @modifier def do_play(self): self.play(self.v_inp) inp = antenna('push', 'id') v_inp = variable('id') trigger(v_inp, do_play) connect(inp, v_inp) def place(self): s = socket_single_required(self.set_playfunc) libcontext.socket(("animation", "play"), s)
r-mart/scikit-learn
refs/heads/master
examples/decomposition/plot_faces_decomposition.py
204
""" ============================ Faces dataset decompositions ============================ This example applies to :ref:`olivetti_faces` different unsupervised matrix decomposition (dimension reduction) methods from the module :py:mod:`sklearn.decomposition` (see the documentation chapter :ref:`decompositions`) . """ print(__doc__) # Authors: Vlad Niculae, Alexandre Gramfort # License: BSD 3 clause import logging from time import time from numpy.random import RandomState import matplotlib.pyplot as plt from sklearn.datasets import fetch_olivetti_faces from sklearn.cluster import MiniBatchKMeans from sklearn import decomposition # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') n_row, n_col = 2, 3 n_components = n_row * n_col image_shape = (64, 64) rng = RandomState(0) ############################################################################### # Load faces data dataset = fetch_olivetti_faces(shuffle=True, random_state=rng) faces = dataset.data n_samples, n_features = faces.shape # global centering faces_centered = faces - faces.mean(axis=0) # local centering faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1) print("Dataset consists of %d faces" % n_samples) ############################################################################### def plot_gallery(title, images, n_col=n_col, n_row=n_row): plt.figure(figsize=(2. * n_col, 2.26 * n_row)) plt.suptitle(title, size=16) for i, comp in enumerate(images): plt.subplot(n_row, n_col, i + 1) vmax = max(comp.max(), -comp.min()) plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray, interpolation='nearest', vmin=-vmax, vmax=vmax) plt.xticks(()) plt.yticks(()) plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.) ############################################################################### # List of the different estimators, whether to center and transpose the # problem, and whether the transformer uses the clustering API. estimators = [ ('Eigenfaces - RandomizedPCA', decomposition.RandomizedPCA(n_components=n_components, whiten=True), True), ('Non-negative components - NMF', decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0, tol=5e-3, sparseness='components'), False), ('Independent components - FastICA', decomposition.FastICA(n_components=n_components, whiten=True), True), ('Sparse comp. - MiniBatchSparsePCA', decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8, n_iter=100, batch_size=3, random_state=rng), True), ('MiniBatchDictionaryLearning', decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1, n_iter=50, batch_size=3, random_state=rng), True), ('Cluster centers - MiniBatchKMeans', MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20, max_iter=50, random_state=rng), True), ('Factor Analysis components - FA', decomposition.FactorAnalysis(n_components=n_components, max_iter=2), True), ] ############################################################################### # Plot a sample of the input data plot_gallery("First centered Olivetti faces", faces_centered[:n_components]) ############################################################################### # Do the estimation and plot it for name, estimator, center in estimators: print("Extracting the top %d %s..." % (n_components, name)) t0 = time() data = faces if center: data = faces_centered estimator.fit(data) train_time = (time() - t0) print("done in %0.3fs" % train_time) if hasattr(estimator, 'cluster_centers_'): components_ = estimator.cluster_centers_ else: components_ = estimator.components_ if hasattr(estimator, 'noise_variance_'): plot_gallery("Pixelwise variance", estimator.noise_variance_.reshape(1, -1), n_col=1, n_row=1) plot_gallery('%s - Train time %.1fs' % (name, train_time), components_[:n_components]) plt.show()
KSanthanam/rethinkdb
refs/heads/next
test/rql_test/connections/http_support/httpbin/helpers.py
49
# -*- coding: utf-8 -*- """ httpbin.helpers ~~~~~~~~~~~~~~~ This module provides helper functions for httpbin. """ import json import base64 from hashlib import md5 from werkzeug.http import parse_authorization_header from flask import request, make_response try: from urlparse import urlparse, urlunparse except ImportError: from urllib.parse import urlparse, urlunparse from .structures import CaseInsensitiveDict ASCII_ART = """ -=[ teapot ]=- _...._ .' _ _ `. | ."` ^ `". _, \_;`"---"`|// | ;/ \_ _/ `\"\"\"` """ REDIRECT_LOCATION = '/redirect/1' ENV_HEADERS = ( 'X-Varnish', 'X-Request-Start', 'X-Heroku-Queue-Depth', 'X-Real-Ip', 'X-Forwarded-Proto', 'X-Heroku-Queue-Wait-Time', 'X-Forwarded-For', 'X-Heroku-Dynos-In-Use', 'X-Forwarded-For', 'X-Forwarded-Protocol', 'X-Forwarded-Port' ) ROBOT_TXT = """User-agent: * Disallow: /deny """ ANGRY_ASCII =""" .-''''''-. .' _ _ '. / O O \\ : : | | : __ : \ .-"` `"-. / '. .' '-......-' YOU SHOUDN'T BE HERE """ def json_safe(string, content_type='application/octet-stream'): """Returns JSON-safe version of `string`. If `string` is a Unicode string or a valid UTF-8, it is returned unmodified, as it can safely be encoded to JSON string. If `string` contains raw/binary data, it is Base64-encoded, formatted and returned according to "data" URL scheme (RFC2397). Since JSON is not suitable for binary data, some additional encoding was necessary; "data" URL scheme was chosen for its simplicity. """ try: string = string.decode('utf-8') _encoded = json.dumps(string) return string except (ValueError, TypeError): return b''.join([ b'data:', content_type.encode('utf-8'), b';base64,', base64.b64encode(string) ]).decode('utf-8') def get_files(): """Returns files dict from request context.""" files = dict() for k, v in request.files.items(): content_type = request.files[k].content_type or 'application/octet-stream' val = json_safe(v.read(), content_type) if files.get(k): if not isinstance(files[k], list): files[k] = [files[k]] files[k].append(val) else: files[k] = val return files def get_headers(hide_env=True): """Returns headers dict from request context.""" headers = dict(request.headers.items()) if hide_env and ('show_env' not in request.args): for key in ENV_HEADERS: try: del headers[key] except KeyError: pass return CaseInsensitiveDict(headers.items()) def semiflatten(multi): """Convert a MutiDict into a regular dict. If there are more than one value for a key, the result will have a list of values for the key. Otherwise it will have the plain value.""" if multi: result = multi.to_dict(flat=False) for k, v in result.items(): if len(v) == 1: result[k] = v[0] return result else: return multi def get_url(request): """ Since we might be hosted behind a proxy, we need to check the X-Forwarded-Proto header to find out what protocol was used to access us. """ if 'X-Forwarded-Proto' not in request.headers: return request.url url = list(urlparse(request.url)) url[0] = request.headers.get('X-Forwarded-Proto') return urlunparse(url) def get_dict(*keys, **extras): """Returns request dict of given keys.""" _keys = ('url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json') assert all(map(_keys.__contains__, keys)) data = request.data form = request.form form = semiflatten(request.form) try: _json = json.loads(data.decode('utf-8')) except (ValueError, TypeError): _json = None d = dict( url=get_url(request), args=semiflatten(request.args), form=form, data=json_safe(data), origin=request.headers.get('X-Forwarded-For', request.remote_addr), headers=get_headers(), files=get_files(), json=_json ) out_d = dict() for key in keys: out_d[key] = d.get(key) out_d.update(extras) return out_d def status_code(code): """Returns response object of given status code.""" redirect = dict(headers=dict(location=REDIRECT_LOCATION)) code_map = { 301: redirect, 302: redirect, 303: redirect, 304: dict(data=''), 305: redirect, 307: redirect, 401: dict(headers={'WWW-Authenticate': 'Basic realm="Fake Realm"'}), 402: dict( data='Fuck you, pay me!', headers={ 'x-more-info': 'http://vimeo.com/22053820' } ), 407: dict(headers={'Proxy-Authenticate': 'Basic realm="Fake Realm"'}), 418: dict( # I'm a teapot! data=ASCII_ART, headers={ 'x-more-info': 'http://tools.ietf.org/html/rfc2324' } ), } r = make_response() r.status_code = code if code in code_map: m = code_map[code] if 'data' in m: r.data = m['data'] if 'headers' in m: r.headers = m['headers'] return r def check_basic_auth(user, passwd): """Checks user authentication using HTTP Basic Auth.""" auth = request.authorization return auth and auth.username == user and auth.password == passwd # Digest auth helpers # qop is a quality of protection def H(data): return md5(data).hexdigest() def HA1(realm, username, password): """Create HA1 hash by realm, username, password HA1 = md5(A1) = MD5(username:realm:password) """ if not realm: realm = u'' return H(b":".join([username.encode('utf-8'), realm.encode('utf-8'), password.encode('utf-8')])) def HA2(credentails, request): """Create HA2 md5 hash If the qop directive's value is "auth" or is unspecified, then HA2: HA2 = md5(A2) = MD5(method:digestURI) If the qop directive's value is "auth-int" , then HA2 is HA2 = md5(A2) = MD5(method:digestURI:MD5(entityBody)) """ if credentails.get("qop") == "auth" or credentails.get('qop') is None: return H(b":".join([request['method'].encode('utf-8'), request['uri'].encode('utf-8')])) elif credentails.get("qop") == "auth-int": for k in 'method', 'uri', 'body': if k not in request: raise ValueError("%s required" % k) return H("%s:%s:%s" % (request['method'], request['uri'], H(request['body']))) raise ValueError def response(credentails, user, password, request): """Compile digest auth response If the qop directive's value is "auth" or "auth-int" , then compute the response as follows: RESPONSE = MD5(HA1:nonce:nonceCount:clienNonce:qop:HA2) Else if the qop directive is unspecified, then compute the response as follows: RESPONSE = MD5(HA1:nonce:HA2) Arguments: - `credentails`: credentails dict - `user`: request user name - `password`: request user password - `request`: request dict """ for key in 'nonce', 'realm': if key not in credentails: raise ValueError("%s required for response" % key) response = None HA1_value = HA1( credentails.get('realm'), user, password ) HA2_value = HA2(credentails, request) if credentails.get('qop') is None: response = H(b":".join([ HA1_value.encode('utf-8'), credentails.get('nonce').encode('utf-8'), HA2_value.encode('utf-8') ])) elif credentails.get('qop') == 'auth' or credentails.get('qop') == 'auth-int': for k in 'nonce', 'nc', 'cnonce', 'qop': if k not in credentails: raise ValueError("%s required for response H" % k) response = H(b":".join([HA1_value.encode('utf-8'), credentails.get('nonce').encode('utf-8'), credentails.get('nc').encode('utf-8'), credentails.get('cnonce').encode('utf-8'), credentails.get('qop').encode('utf-8'), HA2_value.encode('utf-8')])) else: raise ValueError("qop value are wrong") return response def check_digest_auth(user, passwd): """Check user authentication using HTTP Digest auth""" if request.headers.get('Authorization'): credentails = parse_authorization_header(request.headers.get('Authorization')) if not credentails: return False response_hash = response(credentails, user, passwd, dict(uri=request.path, body=request.data, method=request.method)) if credentails.get('response') == response_hash: return True return False def secure_cookie(): """Return true if cookie should have secure attribute""" return request.environ['wsgi.url_scheme'] == 'https'
utopiaprince/micropython
refs/heads/master
tests/basics/string_format_modulo.py
47
print("%%" % ()) print("=%s=" % 1) print("=%s=%s=" % (1, 2)) print("=%s=" % (1,)) print("=%s=" % [1, 2]) print("=%s=" % "str") print("=%r=" % "str") try: print("=%s=%s=" % 1) except TypeError: print("TypeError") try: print("=%s=%s=%s=" % (1, 2)) except TypeError: print("TypeError") try: print("=%s=" % (1, 2)) except TypeError: print("TypeError") print("%s" % True) print("%s" % 1) print("%.1s" % "ab") print("%r" % True) print("%r" % 1) print("%c" % 48) print("%c" % 'a') print("%10s" % 'abc') print("%-10s" % 'abc') print("%d" % 10) print("%+d" % 10) print("% d" % 10) print("%d" % -10) print("%d" % True) print("%i" % -10) print("%i" % True) print("%u" % -10) print("%u" % True) print("%x" % 18) print("%o" % 18) print("%X" % 18) print("%#x" % 18) print("%#X" % 18) print("%#6o" % 18) print("%#6x" % 18) print("%#06x" % 18) print("%*d" % (5, 10)) print("%*.*d" % (2, 2, 20)) print("%*.*d" % (5, 8, 20)) print(">%8.4d<" % -12) print(">% 8.4d<" % -12) print(">%+8.4d<" % 12) print(">%+8.4d<" % -12) print(">%08.4d<" % -12) print(">%08.4d<" % 12) print(">%-8.4d<" % -12) print(">%-08.4d<" % -12) print(">%-+08.4d<" % -12) print(">%-+08.4d<" % 12) # Cases when "*" used and there's not enough values total try: print("%*s" % 5) except TypeError: print("TypeError") try: print("%*.*s" % (1, 15)) except TypeError: print("TypeError") print("%(foo)s" % {"foo": "bar", "baz": False}) try: print("%(foo)s" % {}) except KeyError: print("KeyError") # Using in "*" with dict got to fail try: print("%(foo)*s" % {"foo": "bar"}) except TypeError: print("TypeError") try: '%(a' % {'a':1} except ValueError: print('ValueError') try: '%.*d %.*d' % (20, 5) except TypeError: print('TypeError') try: a = '%*' % 1 except (ValueError): print('ValueError') try: '%c' % 'aa' except TypeError: print('TypeError') try: '%l' % 1 except ValueError: print('ValueError') try: 'a%' % 1 except ValueError: print('ValueError')
mohamed--abdel-maksoud/chromium.src
refs/heads/nw12
tools/valgrind/common.py
184
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import platform import os import signal import subprocess import sys import time class NotImplementedError(Exception): pass class TimeoutError(Exception): pass def RunSubprocessInBackground(proc): """Runs a subprocess in the background. Returns a handle to the process.""" logging.info("running %s in the background" % " ".join(proc)) return subprocess.Popen(proc) def RunSubprocess(proc, timeout=0): """ Runs a subprocess, until it finishes or |timeout| is exceeded and the process is killed with taskkill. A |timeout| <= 0 means no timeout. Args: proc: list of process components (exe + args) timeout: how long to wait before killing, <= 0 means wait forever """ logging.info("running %s, timeout %d sec" % (" ".join(proc), timeout)) sys.stdout.flush() sys.stderr.flush() # Manually read and print out stdout and stderr. # By default, the subprocess is supposed to inherit these from its parent, # however when run under buildbot, it seems unable to read data from a # grandchild process, so we have to read the child and print the data as if # it came from us for buildbot to read it. We're not sure why this is # necessary. # TODO(erikkay): should we buffer stderr and stdout separately? p = subprocess.Popen(proc, universal_newlines=True, bufsize=0, # unbuffered stdout=subprocess.PIPE, stderr=subprocess.STDOUT) logging.info("started subprocess") did_timeout = False if timeout > 0: wait_until = time.time() + timeout while p.poll() is None and not did_timeout: # Have to use readline rather than readlines() or "for line in p.stdout:", # otherwise we get buffered even with bufsize=0. line = p.stdout.readline() while line and not did_timeout: sys.stdout.write(line) sys.stdout.flush() line = p.stdout.readline() if timeout > 0: did_timeout = time.time() > wait_until if did_timeout: logging.info("process timed out") else: logging.info("process ended, did not time out") if did_timeout: if IsWindows(): subprocess.call(["taskkill", "/T", "/F", "/PID", str(p.pid)]) else: # Does this kill all children, too? os.kill(p.pid, signal.SIGINT) logging.error("KILLED %d" % p.pid) # Give the process a chance to actually die before continuing # so that cleanup can happen safely. time.sleep(1.0) logging.error("TIMEOUT waiting for %s" % proc[0]) raise TimeoutError(proc[0]) else: for line in p.stdout: sys.stdout.write(line) if not IsMac(): # stdout flush fails on Mac logging.info("flushing stdout") sys.stdout.flush() logging.info("collecting result code") result = p.poll() if result: logging.error("%s exited with non-zero result code %d" % (proc[0], result)) return result def IsLinux(): return sys.platform.startswith('linux') def IsMac(): return sys.platform.startswith('darwin') def IsWindows(): return sys.platform == 'cygwin' or sys.platform.startswith('win') def WindowsVersionName(): """Returns the name of the Windows version if it is known, or None. Possible return values are: xp, vista, 7, 8, or None """ if sys.platform == 'cygwin': # Windows version number is hiding in system name. Looks like: # CYGWIN_NT-6.1-WOW64 try: version_str = platform.uname()[0].split('-')[1] except: return None elif sys.platform.startswith('win'): # Normal Windows version string. Mine: 6.1.7601 version_str = platform.version() else: return None parts = version_str.split('.') try: major = int(parts[0]) minor = int(parts[1]) except: return None # Can't parse, unknown version. if major == 5: return 'xp' elif major == 6 and minor == 0: return 'vista' elif major == 6 and minor == 1: return '7' elif major == 6 and minor == 2: return '8' # Future proof. ;) return None def PlatformNames(): """Return an array of string to be used in paths for the platform (e.g. suppressions, gtest filters, ignore files etc.) The first element of the array describes the 'main' platform """ if IsLinux(): return ['linux'] if IsMac(): return ['mac'] if IsWindows(): names = ['win32'] version_name = WindowsVersionName() if version_name is not None: names.append('win-%s' % version_name) return names raise NotImplementedError('Unknown platform "%s".' % sys.platform) def PutEnvAndLog(env_name, env_value): os.putenv(env_name, env_value) logging.info('export %s=%s', env_name, env_value) def BoringCallers(mangled, use_re_wildcards): """Return a list of 'boring' function names (optinally mangled) with */? wildcards (optionally .*/.). Boring = we drop off the bottom of stack traces below such functions. """ need_mangling = [ # Don't show our testing framework: ("testing::Test::Run", "_ZN7testing4Test3RunEv"), ("testing::TestInfo::Run", "_ZN7testing8TestInfo3RunEv"), ("testing::internal::Handle*ExceptionsInMethodIfSupported*", "_ZN7testing8internal3?Handle*ExceptionsInMethodIfSupported*"), # Depend on scheduling: ("MessageLoop::Run", "_ZN11MessageLoop3RunEv"), ("MessageLoop::RunTask", "_ZN11MessageLoop7RunTask*"), ("RunnableMethod*", "_ZN14RunnableMethod*"), ("DispatchToMethod*", "_Z*16DispatchToMethod*"), ("base::internal::Invoker*::DoInvoke*", "_ZN4base8internal8Invoker*DoInvoke*"), # Invoker{1,2,3} ("base::internal::RunnableAdapter*::Run*", "_ZN4base8internal15RunnableAdapter*Run*"), ] ret = [] for pair in need_mangling: ret.append(pair[1 if mangled else 0]) ret += [ # Also don't show the internals of libc/pthread. "start_thread", "main", "BaseThreadInitThunk", ] if use_re_wildcards: for i in range(0, len(ret)): ret[i] = ret[i].replace('*', '.*').replace('?', '.') return ret def NormalizeWindowsPath(path): """If we're using Cygwin Python, turn the path into a Windows path. Don't turn forward slashes into backslashes for easier copy-pasting and escaping. TODO(rnk): If we ever want to cut out the subprocess invocation, we can use _winreg to get the root Cygwin directory from the registry key: HKEY_LOCAL_MACHINE\SOFTWARE\Cygwin\setup\rootdir. """ if sys.platform.startswith("cygwin"): p = subprocess.Popen(["cygpath", "-m", path], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = p.communicate() if err: logging.warning("WARNING: cygpath error: %s", err) return out.strip() else: return path ############################ # Common output format code def PrintUsedSuppressionsList(suppcounts): """ Prints out the list of used suppressions in a format common to all the memory tools. If the list is empty, prints nothing and returns False, otherwise True. suppcounts: a dictionary of used suppression counts, Key -> name, Value -> count. """ if not suppcounts: return False print "-----------------------------------------------------" print "Suppressions used:" print " count name" for (name, count) in sorted(suppcounts.items(), key=lambda (k,v): (v,k)): print "%7d %s" % (count, name) print "-----------------------------------------------------" sys.stdout.flush() return True
cloudera/hue
refs/heads/master
desktop/core/ext-py/docutils-0.14/docutils/parsers/rst/languages/eo.py
128
# $Id: eo.py 7119 2011-09-02 13:00:23Z milde $ # Author: Marcelo Huerta San Martin <richieadler@users.sourceforge.net> # Copyright: This module has been placed in the public domain. # New language mappings are welcome. Before doing a new translation, please # read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be # translated for each language: one in docutils/languages, the other in # docutils/parsers/rst/languages. """ Esperanto-language mappings for language-dependent features of reStructuredText. """ __docformat__ = 'reStructuredText' directives = { # language-dependent: fixed u'atentu': 'attention', u'zorgu': 'caution', u'code (translation required)': 'code', u'dangxero': 'danger', u'dan\u011dero': 'danger', u'eraro': 'error', u'spuro': 'hint', u'grava': 'important', u'noto': 'note', u'helpeto': 'tip', u'averto': 'warning', u'admono': 'admonition', u'flankteksto': 'sidebar', u'temo': 'topic', u'linea-bloko': 'line-block', u'analizota-literalo': 'parsed-literal', u'rubriko': 'rubric', u'epigrafo': 'epigraph', u'elstarajxoj': 'highlights', u'elstara\u0135oj': 'highlights', u'ekstera-citajxo': 'pull-quote', u'ekstera-cita\u0135o': 'pull-quote', u'kombinajxo': 'compound', u'kombina\u0135o': 'compound', u'tekstingo': 'container', u'enhavilo': 'container', #'questions': 'questions', #'qa': 'questions', #'faq': 'questions', u'tabelo': 'table', u'tabelo-vdk': 'csv-table', # "valoroj disigitaj per komoj" u'tabelo-csv': 'csv-table', u'tabelo-lista': 'list-table', u'meta': 'meta', 'math (translation required)': 'math', #'imagemap': 'imagemap', u'bildo': 'image', u'figuro': 'figure', u'inkludi': 'include', u'senanaliza': 'raw', u'anstatauxi': 'replace', u'anstata\u016di': 'replace', u'unicode': 'unicode', u'dato': 'date', u'klaso': 'class', u'rolo': 'role', u'preterlasita-rolo': 'default-role', u'titolo': 'title', u'enhavo': 'contents', u'seknum': 'sectnum', u'sekcia-numerado': 'sectnum', u'kapsekcio': 'header', u'piedsekcio': 'footer', #'footnotes': 'footnotes', #'citations': 'citations', u'celaj-notoj': 'target-notes', u'restructuredtext-test-directive': 'restructuredtext-test-directive'} """Esperanto name to registered (in directives/__init__.py) directive name mapping.""" roles = { # language-dependent: fixed u'mallongigo': 'abbreviation', u'mall': 'abbreviation', u'komenclitero': 'acronym', u'kl': 'acronym', u'code (translation required)': 'code', u'indekso': 'index', u'i': 'index', u'subskribo': 'subscript', u'sub': 'subscript', u'supraskribo': 'superscript', u'sup': 'superscript', u'titola-referenco': 'title-reference', u'titolo': 'title-reference', u't': 'title-reference', u'pep-referenco': 'pep-reference', u'pep': 'pep-reference', u'rfc-referenco': 'rfc-reference', u'rfc': 'rfc-reference', u'emfazo': 'emphasis', u'forta': 'strong', u'litera': 'literal', 'math (translation required)': 'math', u'nomita-referenco': 'named-reference', u'nenomita-referenco': 'anonymous-reference', u'piednota-referenco': 'footnote-reference', u'citajxo-referenco': 'citation-reference', u'cita\u0135o-referenco': 'citation-reference', u'anstatauxa-referenco': 'substitution-reference', u'anstata\u016da-referenco': 'substitution-reference', u'celo': 'target', u'uri-referenco': 'uri-reference', u'uri': 'uri-reference', u'url': 'uri-reference', u'senanaliza': 'raw', } """Mapping of Esperanto role names to canonical role names for interpreted text. """
jms6520/cf-python
refs/heads/master
vendor/pip-1.3.1/setup.py
37
import codecs import os import re import sys import textwrap from setuptools import setup here = os.path.abspath(os.path.dirname(__file__)) def read(*parts): # intentionally *not* adding an encoding option to open # see here: https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690 return codecs.open(os.path.join(here, *parts), 'r').read() def find_version(*file_paths): version_file = read(*file_paths) version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string.") long_description = "\n" + "\n".join([ read('PROJECT.txt'), read('docs', 'quickstart.txt'), read('CHANGES.txt')]) tests_require = ['nose', 'virtualenv>=1.7', 'scripttest>=1.1.1', 'mock'] setup(name="pip", version=find_version('pip', '__init__.py'), description="A tool for installing and managing Python packages.", long_description=long_description, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Topic :: Software Development :: Build Tools', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.1', 'Programming Language :: Python :: 3.2', ], keywords='easy_install distutils setuptools egg virtualenv', author='The pip developers', author_email='python-virtualenv@groups.google.com', url='http://www.pip-installer.org', license='MIT', packages=['pip', 'pip.commands', 'pip.vcs', 'pip.backwardcompat'], package_data={'pip': ['*.pem']}, entry_points=dict(console_scripts=['pip=pip:main', 'pip-%s=pip:main' % sys.version[:3]]), test_suite='nose.collector', tests_require=tests_require, zip_safe=False, extras_require = { 'testing':tests_require, }, )
dudakov/ACE3
refs/heads/master
docs/tools/combine_events.py
21
#!/usr/bin/env python3 # Author: Jonpas, SilentSpike import os import sys import re ######## GLOBALS ######### WIKI_FRAMEWORK = "wiki/framework" ACE3WEB_WIKI_FRAMEWORK = "http://ace3mod.com/{}".format(WIKI_FRAMEWORK) EXCLUDE = ["events-framework.md"] TEMP_LISTENABLE = "temp\\combined_events_listenable.md" TEMP_CALLABLE = "temp\\combined_events_callable.md" ########################## def extract(type, data, sourceLink): # Extract data match = re.match(r"(`"".+""`)\s*\|.*\|\s*(`\[.+\]`)\s*\|\s*(\w+)", data) name = match.group(1) # Event Name params = match.group(2) # Passed Parameters locality = match.group(3) # Locality print("- {} Event Data:\n - Event Name: {}\n - Parameters: {}\n - Locality: {}\n".format(type, name, params, locality)) # Write to file with open(eval("TEMP_{}".format(type.upper())), "a") as file: file.write("{} | {} | {} | {}\n".format(name, sourceLink, params, locality)) def main(): print(""" ############################ # Combine ACE3 Events List # ############################ """) scriptpath = os.path.realpath(__file__) projectpath = os.path.dirname(os.path.dirname(scriptpath)) wikiframeworkpath = os.path.join(projectpath, WIKI_FRAMEWORK) # Prepare directory and files to write to if not os.path.exists("temp"): os.makedirs("temp") with open(TEMP_LISTENABLE, "w") as fileListenable: fileListenable.writelines([ "Event Name | Source | Passed Parameter(s) | Locality\n" "---------- | ------ | ------------------- | --------\n" ]) with open(TEMP_CALLABLE, "w") as fileCallable: fileCallable.writelines([ "Event Name | Owner | Passed Parameter(s) | Locality\n" "---------- | ------ | ------------------- | --------\n" ]) # Iterate through files in the framework directory for file in os.listdir(wikiframeworkpath): # Skip iteration if file in exclusion list if file in EXCLUDE: print("Excluding: {}\n".format(file)) continue with open(os.path.join(wikiframeworkpath, file)) as fileOpen: for line in fileOpen: matchSubSection = re.match(r"###\s+\d+\.?\d*\s+(Listenable|Callable)", line) # Find main section, prepare data if re.match(r"##\s+\d+\.?\d*\s+Events", line): print("Found Events: {}".format(file)) # Source module (cut out 13 characters at the end, stands for "-framework.md") source = file[:-13] print("- Source Module: {}".format(source)) # Website URL - remove 3 characters at the end (stands for ".md") and add ".html", format into URL webURL = "{}/{}.{}".format(ACE3WEB_WIKI_FRAMEWORK, file[:-3], "html") print("- Website URL: {}".format(webURL)) # Source with Website URL sourceLink = "[{}]({})".format(source, webURL) # Find sub-section, prepare data elif matchSubSection: # Skip 4 lines to get to table contents (sub-loop) for i in range(4): data = next(fileOpen) # Iterate in sub-loop and extract data until empty line is reached while data not in ["\n", "\r\n"]: extract(matchSubSection.group(1), data, sourceLink) # Move to next line, exit if EOF try: data = next(fileOpen) except StopIteration: break if __name__ == "__main__": main()
Grirrane/odoo
refs/heads/master
addons/im_livechat/report/__init__.py
13
import im_livechat_report