code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if self._poll is None: raise RuntimeError('poll instance is closed') remove_callback(self, handle) if handle.extra & READABLE: self._readers -= 1 if handle.extra & WRITABLE: self._writers -= 1 self._sync()
def remove_callback(self, handle)
Remove a callback.
5.412226
5.239643
1.032938
if self._poll is None: raise RuntimeError('poll instance is closed') if not has_callback(self, handle): raise ValueError('no such callback') if events & ~(READABLE|WRITABLE): raise ValueError('illegal event mask: {}'.format(events)) if handle.extra == events: return if handle.extra & READABLE: self._readers -= 1 if handle.extra & WRITABLE: self._writers -= 1 if events & READABLE: self._readers += 1 if events & WRITABLE: self._writers += 1 handle.extra = events self._sync()
def update_callback(self, handle, events)
Update the event mask for a callback.
3.216679
3.021597
1.064563
if self._poll is None: return self._poll.close() self._poll = None self._readers = 0 self._writers = 0 self._events = 0 clear_callbacks(self)
def close(self)
Close the poll instance.
4.019728
3.09591
1.298399
if self._mpoll is None: raise RuntimeError('Poller instance is closed') try: mpoll = self._mpoll[fd] except KeyError: mpoll = self._mpoll[fd] = MultiPoll(self._loop, fd) handle = mpoll.add_callback(events, callback) return handle
def add_callback(self, fd, events, callback)
Add a new callback. The file descriptor *fd* will be watched for the events specified by the *events* parameter, which should be a bitwise OR of the constants ``READABLE`` and ``WRITABLE``. Whenever one or more of the specified events occur, *callback* will be called with a single integer argument containing the bitwise OR of the current events. The return value of this method is an opaque handle that can be used to remove the callback or to update the events associated with it.
4.066051
4.444027
0.914947
if self._mpoll is None: raise RuntimeError('Poller instance is closed') mpoll = self._mpoll.get(fd) if mpoll is None: raise ValueError('not watching fd {}'.format(fd)) mpoll.remove_callback(handle)
def remove_callback(self, fd, handle)
Remove a callback added by :meth:`~Poller.add_callback`. If this is the last callback that is registered for the fd then this will deallocate the ``MultiPoll`` instance and close the libuv handle.
4.926264
4.182283
1.177889
if self._mpoll is None: raise RuntimeError('Poller instance is closed') mpoll = self._mpoll.get(fd) if mpoll is None: raise ValueError('not watching fd {}'.format(fd)) mpoll.update_callback(handle, events)
def update_callback(self, fd, handle, events)
Update the event mask associated with an existing callback. If you want to temporarily disable a callback then you can use this method with an *events* argument of ``0``. This is more efficient than removing the callback and adding it again later.
5.102634
5.451367
0.936028
if self._mpoll is None: return for mpoll in self._mpoll.values(): mpoll.close() self._mpoll.clear() self._mpoll = None
def close(self)
Close all active poll instances and remove all callbacks.
3.867625
2.796741
1.382904
if not self.authOrder: raise DBusAuthenticationFailed() self.authMech = self.authOrder.pop() if self.authMech == 'DBUS_COOKIE_SHA1': self.sendAuthMessage('AUTH ' + self.authMech + ' ' + hexlify(getpass.getuser())) elif self.authMech == 'ANONYMOUS': self.sendAuthMessage('AUTH ' + self.authMech + ' ' + hexlify("txdbus")) else: self.sendAuthMessage('AUTH ' + self.authMech)
def authTryNextMethod(self)
Tries the next authentication method or raises a failure if all mechanisms have been tried.
4.156387
3.990497
1.041571
# XXX Ensure we obtain the correct directory for the # authenticating user and that that user actually # owns the keyrings directory if self.cookie_dir is None: cookie_dir = os.path.expanduser('~/.dbus-keyrings') else: cookie_dir = self.cookie_dir dstat = os.stat(cookie_dir) if dstat.st_mode & 0x36: # 066 raise Exception('User keyrings directory is writeable by other users. Aborting authentication') import pwd if dstat.st_uid != pwd.getpwuid(os.geteuid()).pw_uid: raise Exception('Keyrings directory is not owned by the current user. Aborting authentication!') f = open(os.path.join(cookie_dir, cookie_context), 'r') try: for line in f: try: k_id, k_time, k_cookie_hex = line.split() if k_id == cookie_id: return k_cookie_hex except: pass finally: f.close()
def _authGetDBusCookie(self, cookie_context, cookie_id)
Reads the requested cookie_id from the cookie_context file
3.794742
3.754402
1.010745
callbacks = obj._callbacks node = Node(callback, args) # Store a single callback directly in _callbacks if callbacks is None: obj._callbacks = node return node # Otherwise use a dllist. if not isinstance(callbacks, dllist): obj._callbacks = dllist() obj._callbacks.insert(callbacks) callbacks = obj._callbacks callbacks.insert(node) return node
def add_callback(obj, callback, args=())
Add a callback to an object.
4.518449
4.327332
1.044165
callbacks = obj._callbacks if callbacks is handle: obj._callbacks = None elif isinstance(callbacks, dllist): callbacks.remove(handle) if not callbacks: obj._callbacks = None
def remove_callback(obj, handle)
Remove a callback from an object.
3.977632
3.828177
1.039041
callbacks = obj._callbacks if not callbacks: return False if isinstance(callbacks, Node): return handle is callbacks else: return handle in callbacks
def has_callback(obj, handle)
Return whether a callback is currently registered for an object.
4.888912
4.705249
1.039034
callbacks = obj._callbacks if not callbacks: return if isinstance(callbacks, Node): node = callbacks obj._callbacks = None else: node = callbacks.first callbacks.remove(node) if not callbacks: obj._callbacks = None return node.data, node.extra
def pop_callback(obj)
Pop a single callback.
3.836607
3.750497
1.02296
callbacks = obj._callbacks if isinstance(callbacks, dllist): # Help the garbage collector by clearing all links. callbacks.clear() obj._callbacks = None
def clear_callbacks(obj)
Remove all callbacks from an object.
8.230541
8.154208
1.009361
callbacks = obj._callbacks if isinstance(callbacks, Node): node = callbacks try: if not func(node.data, node.extra): obj._callbacks = None except Exception: if log is None: log = logging.get_logger() log.exception('uncaught exception in callback') elif isinstance(callbacks, dllist): for node in callbacks: try: if func(node.data, node.extra): continue callbacks.remove(node) except Exception: if log is None: log = logging.get_logger() log.exception('uncaught exception in callback') if not callbacks: obj._callbacks = None
def walk_callbacks(obj, func, log=None)
Call func(callback, args) for all callbacks and keep only those callbacks for which the function returns True.
2.637789
2.735944
0.964124
def run_callback(callback, args): return callback(*args) return walk_callbacks(obj, run_callback, log)
def run_callbacks(obj, log=None)
Run callbacks.
5.478526
5.731492
0.955864
class AutoConfigModelSerializer(ModelSerializer): class Meta(object): model = configuration_model fields = '__all__' def create(self, validated_data): if "changed_by_username" in self.context: model = get_user_model() validated_data['changed_by'] = model.objects.get(username=self.context["changed_by_username"]) return super(AutoConfigModelSerializer, self).create(validated_data) return AutoConfigModelSerializer
def get_serializer_class(configuration_model)
Returns a ConfigurationModel serializer class for the supplied configuration_model.
2.852315
2.684978
1.062323
parsed_json = JSONParser().parse(stream) serializer_class = get_serializer_class(apps.get_model(parsed_json["model"])) list_serializer = serializer_class(data=parsed_json["data"], context={"changed_by_username": username}, many=True) if list_serializer.is_valid(): model_class = serializer_class.Meta.model for data in reversed(list_serializer.validated_data): if model_class.equal_to_current(data): list_serializer.validated_data.remove(data) entries_created = len(list_serializer.validated_data) list_serializer.save() return entries_created else: raise Exception(list_serializer.error_messages)
def deserialize_json(stream, username)
Given a stream containing JSON, deserializers the JSON into ConfigurationModel instances. The stream is expected to be in the following format: { "model": "config_models.ExampleConfigurationModel", "data": [ { "enabled": True, "color": "black" ... }, { "enabled": False, "color": "yellow" ... }, ... ] } If the provided stream does not contain valid JSON for the ConfigurationModel specified, an Exception will be raised. Arguments: stream: The stream of JSON, as described above. username: The username of the user making the change. This must match an existing user. Returns: the number of created entries
3.261116
3.242826
1.00564
return [ f.name for f in self.model._meta.get_fields() if not f.one_to_many ]
def get_displayable_field_names(self)
Return all field names, excluding reverse foreign key relationships.
3.252331
2.413803
1.347389
if queryset.count() != 1: self.message_user(request, _("Please select a single configuration to revert to.")) return target = queryset[0] target.id = None self.save_model(request, target, None, False) self.message_user(request, _("Reverted configuration.")) return HttpResponseRedirect( reverse( 'admin:{}_{}_change'.format( self.model._meta.app_label, self.model._meta.model_name, ), args=(target.id,), ) )
def revert(self, request, queryset)
Admin action to revert a configuration back to the selected value
2.391793
2.248127
1.063904
show_all = self.used_parameters.get(self.parameter_name) == "1" return ( { 'display': _('Current Configuration'), 'selected': not show_all, 'query_string': changelist.get_query_string({}, [self.parameter_name]), }, { 'display': _('All (Show History)'), 'selected': show_all, 'query_string': changelist.get_query_string({self.parameter_name: "1"}, []), } )
def choices(self, changelist)
Returns choices ready to be output in the template.
3.314711
3.241578
1.022561
if request.GET.get(ShowHistoryFilter.parameter_name) == '1': queryset = self.model.objects.with_active_flag() else: # Show only the most recent row for each key. queryset = self.model.objects.current_set() ordering = self.get_ordering(request) if ordering: return queryset.order_by(*ordering) return queryset
def get_queryset(self, request)
Annote the queryset with an 'is_active' property that's true iff that row is the most recently added row for that particular set of KEY_FIELDS values. Filter the queryset to show only is_active rows by default.
4.547584
3.455109
1.316191
if not inst.is_active: return u'--' update_url = reverse('admin:{}_{}_add'.format(self.model._meta.app_label, self.model._meta.model_name)) update_url += "?source={}".format(inst.pk) return u'<a href="{}">{}</a>'.format(update_url, _('Update'))
def edit_link(self, inst)
Edit link for the change view
2.957096
2.828411
1.045497
ctx = original_submit_row(context) if context.get('readonly', False): ctx.update({ 'show_delete_link': False, 'show_save_as_new': False, 'show_save_and_add_another': False, 'show_save_and_continue': False, 'show_save': False, }) else: return ctx
def submit_row(context)
Overrides 'django.contrib.admin.templatetags.admin_modify.submit_row'. Manipulates the context going into that function by hiding all of the buttons in the submit row if the key `readonly` is set in the context.
2.789362
2.246245
1.241789
return self.values(*self.model.KEY_FIELDS).annotate(max=models.Max('pk')).values('max')
def _current_ids_subquery(self)
Internal helper method to return an SQL string that will get the IDs of all the current entries (i.e. the most recent entry for each unique set of key values). Only useful if KEY_FIELDS is set.
13.402846
6.803348
1.970037
assert self.model.KEY_FIELDS != (), "Just use model.current() if there are no KEY_FIELDS" return self.get_queryset().filter( pk__in=self._current_ids_subquery() ).annotate( is_active=models.Value(1, output_field=models.IntegerField()) )
def current_set(self)
A queryset for the active configuration entries only. Only useful if KEY_FIELDS is set. Active means the means recent entries for each unique combination of keys. It does not necessaryily mean enbled.
8.319753
5.472334
1.52033
if self.model.KEY_FIELDS: return self.get_queryset().annotate( is_active=models.ExpressionWrapper( models.Q(pk__in=self._current_ids_subquery()), output_field=models.IntegerField(), ) ) return self.get_queryset().annotate( is_active=models.ExpressionWrapper( models.Q(pk=self.model.current().pk), output_field=models.IntegerField(), ) )
def with_active_flag(self)
A query set where each result is annotated with an 'is_active' field that indicates if it's the most recent entry for that combination of keys.
3.142613
2.608645
1.204692
# Always create a new entry, instead of updating an existing model self.pk = None # pylint: disable=invalid-name super(ConfigurationModel, self).save( force_insert, force_update, using, update_fields ) cache.delete(self.cache_key_name(*[getattr(self, key) for key in self.KEY_FIELDS])) if self.KEY_FIELDS: cache.delete(self.key_values_cache_key_name())
def save(self, force_insert=False, force_update=False, using=None, update_fields=None)
Clear the cached value when saving a new configuration entry
4.437711
4.077076
1.088454
if cls.KEY_FIELDS != (): if len(args) != len(cls.KEY_FIELDS): raise TypeError( "cache_key_name() takes exactly {} arguments ({} given)".format(len(cls.KEY_FIELDS), len(args)) ) # pylint: disable=unicode-builtin return 'configuration/{}/current/{}'.format(cls.__name__, ','.join(six.text_type(arg) for arg in args)) else: return 'configuration/{}/current'.format(cls.__name__)
def cache_key_name(cls, *args)
Return the name of the key to use to cache the current configuration
3.240886
3.050729
1.062331
cached = cache.get(cls.cache_key_name(*args)) if cached is not None: return cached key_dict = dict(zip(cls.KEY_FIELDS, args)) try: current = cls.objects.filter(**key_dict).order_by('-change_date')[0] except IndexError: current = cls(**key_dict) cache.set(cls.cache_key_name(*args), current, cls.cache_timeout) return current
def current(cls, *args)
Return the active configuration entry, either from cache, from the database, or by creating a new empty entry (which is not persisted).
2.659567
2.575112
1.032796
key_fields = key_fields or cls.KEY_FIELDS return 'configuration/{}/key_values/{}'.format(cls.__name__, ','.join(key_fields))
def key_values_cache_key_name(cls, *key_fields)
Key for fetching unique key values from the cache
5.082156
5.136251
0.989468
flat = kwargs.pop('flat', False) assert not kwargs, "'flat' is the only kwarg accepted" key_fields = key_fields or cls.KEY_FIELDS cache_key = cls.key_values_cache_key_name(*key_fields) cached = cache.get(cache_key) if cached is not None: return cached values = list(cls.objects.values_list(*key_fields, flat=flat).order_by().distinct()) cache.set(cache_key, values, cls.cache_timeout) return values
def key_values(cls, *key_fields, **kwargs)
Get the set of unique values in the configuration table for the given key[s]. Calling cls.current(*value) for each value in the resulting list should always produce an entry, though any such entry may have enabled=False. Arguments: key_fields: The positional arguments are the KEY_FIELDS to return. For example if you had a course embargo configuration where each entry was keyed on (country, course), then you might want to know "What countries have embargoes configured?" with cls.key_values('country'), or "Which courses have country restrictions?" with cls.key_values('course'). You can also leave this unspecified for the default, which returns the distinct combinations of all keys. flat: If you pass flat=True as a kwarg, it has the same effect as in Django's 'values_list' method: Instead of returning a list of lists, you'll get one list of values. This makes sense to use whenever there is only one key being queried. Return value: List of lists of each combination of keys found in the database. e.g. [("Italy", "course-v1:SomeX+some+2015"), ...] for the course embargo example
2.765166
2.767583
0.999127
for field in self._meta.get_fields(): if not field.many_to_many and field.name not in fields_to_ignore: if getattr(instance, field.name) != getattr(self, field.name): return False return True
def fields_equal(self, instance, fields_to_ignore=("id", "change_date", "changed_by"))
Compares this instance's fields to the supplied instance to test for equality. This will ignore any fields in `fields_to_ignore`. Note that this method ignores many-to-many fields. Args: instance: the model instance to compare fields_to_ignore: List of fields that should not be compared for equality. By default includes `id`, `change_date`, and `changed_by`. Returns: True if the checked fields are all equivalent, else False
2.1824
2.068392
1.055119
# Remove many-to-many relationships from json. # They require an instance to be already saved. info = model_meta.get_field_info(cls) for field_name, relation_info in info.relations.items(): if relation_info.to_many and (field_name in json): json.pop(field_name) new_instance = cls(**json) key_field_args = tuple(getattr(new_instance, key) for key in cls.KEY_FIELDS) current = cls.current(*key_field_args) # If current.id is None, no entry actually existed and the "current" method created it. if current.id is not None: return current.fields_equal(new_instance, fields_to_ignore) return False
def equal_to_current(cls, json, fields_to_ignore=("id", "change_date", "changed_by"))
Compares for equality this instance to a model instance constructed from the supplied JSON. This will ignore any fields in `fields_to_ignore`. Note that this method cannot handle fields with many-to-many associations, as those can only be set on a saved model instance (and saving the model instance will create a new entry). All many-to-many field entries will be removed before the equality comparison is done. Args: json: json representing an entry to compare fields_to_ignore: List of fields that should not be compared for equality. By default includes `id`, `change_date`, and `changed_by`. Returns: True if the checked fields are all equivalent, else False
4.851859
4.514251
1.074787
def _create_atomic_wrapper(*args, **kwargs): # When a view call fails due to a permissions error, it raises an exception. # An uncaught exception breaks the DB transaction for any following DB operations # unless it's wrapped in a atomic() decorator or context manager. with transaction.atomic(): return wrapped_func(*args, **kwargs) return _create_atomic_wrapper
def create_atomic_wrapper(cls, wrapped_func)
Returns a wrapped function.
7.557894
7.05335
1.071533
view = super(AtomicMixin, cls).as_view(**initkwargs) return cls.create_atomic_wrapper(view)
def as_view(cls, **initkwargs)
Overrides as_view to add atomic transaction.
5.628776
4.395269
1.280644
def _decorator(func): @wraps(func) def _inner(*args, **kwargs): if not config_model.current().enabled: return HttpResponseNotFound() return func(*args, **kwargs) return _inner return _decorator
def require_config(config_model)
View decorator that enables/disables a view based on configuration. Arguments: config_model (ConfigurationModel subclass): The class of the configuration model to check. Returns: HttpResponse: 404 if the configuration model is disabled, otherwise returns the response from the decorated view.
3.626687
3.329017
1.089417
''' Retrieve a list of OSciMap4 tile responses and merge them into one. get_tiles() retrieves data and performs basic integrity checks. ''' tile = VectorTile(extents) for layer in feature_layers: tile.addFeatures(layer['features'], layer['name']) tile.complete() data = tile.out.SerializeToString() file.write(struct.pack(">I", len(data))) file.write(data)
def merge(file, feature_layers)
Retrieve a list of OSciMap4 tile responses and merge them into one. get_tiles() retrieves data and performs basic integrity checks.
11.179223
4.244046
2.634096
if shape.type in ('Polygon', 'MultiPolygon') and not shape.is_valid: shape = shape.buffer(0) # return value from buffer is usually valid, but it's # not clear from the docs whether this is guaranteed, # so return None if not. if not shape.is_valid: return None return shape
def _make_valid_if_necessary(shape)
attempt to correct invalid shapes if necessary After simplification, even when preserving topology, invalid shapes can be returned. This appears to only occur with polygon types. As an optimization, we only check if the polygon types are valid.
5.484289
4.694207
1.16831
props_size = 0 if src_props: for k, v in src_props.items(): if v is not None: props_size += len(k) + _sizeof(v) dest_props[k] = v return props_size
def _accumulate_props(dest_props, src_props)
helper to accumulate a dict of properties Mutates dest_props by adding the non None src_props and returns the new size
2.632172
2.434381
1.081249
from tilequeue.tile import coord_children_subrange from tilequeue.tile import metatile_zoom_from_size assert tile_size >= 256 assert tile_size <= 256 * (1 << metatile_zoom) assert _is_power_of_2(tile_size) # delta is how many zoom levels _lower_ we want the child tiles, based on # their tile size. 256px tiles are defined as being at nominal zoom, so # delta = 0 for them. delta = metatile_zoom_from_size(tile_size // 256) zoom = nominal_zoom - delta return list(coord_children_subrange(coord, zoom, zoom))
def metatile_children_with_size(coord, metatile_zoom, nominal_zoom, tile_size)
Return a list of all the coords which are children of the input metatile at `coord` with zoom `metatile_zoom` (i.e: 0 for a single tile metatile, 1 for 2x2, 2 for 4x4, etc...) with size `tile_size` corrected for the `nominal_zoom`. For example, in a single tile metatile, the `tile_size` must be 256 and the returned list contains only `coord`. For an 8x8 metatile (`metatile_zoom = 3`), requesting the 512px children would give a list of the 4x4 512px children at `coord.zoom + 2` with nominal zoom `nominal_zoom`. Correcting for nominal zoom means that some tiles may have coordinate zooms lower than they would otherwise be. For example, the 0/0/0 tile with metatile zoom 3 (8x8 256px tiles) would have 4x4 512px tiles at coordinate zoom 2 and nominal zoom 3. At nominal zoom 2, there would be 2x2 512px tiles at coordinate zoom 1.
5.165899
4.938378
1.046072
from tilequeue.tile import metatile_zoom_from_size tile_size_by_zoom = {} nominal_zoom = coord.zoom + metatile_zoom # check that the tile sizes are correct and within range. for tile_size in cfg_tile_sizes: assert tile_size >= 256 assert tile_size <= 256 * (1 << metatile_zoom) assert _is_power_of_2(tile_size) if coord.zoom >= max_zoom: # all the tile_sizes down to 256 at the nominal zoom. tile_sizes = [] tile_sizes.extend(cfg_tile_sizes) lowest_tile_size = min(tile_sizes) while lowest_tile_size > 256: lowest_tile_size //= 2 tile_sizes.append(lowest_tile_size) tile_size_by_zoom[nominal_zoom] = tile_sizes elif coord.zoom <= 0: # the tile_sizes, plus max(tile_sizes) size at nominal zooms decreasing # down to 0 (or as close as we can get) tile_size_by_zoom[nominal_zoom] = cfg_tile_sizes max_tile_size = max(cfg_tile_sizes) max_tile_zoom = metatile_zoom_from_size(max_tile_size // 256) assert max_tile_zoom <= metatile_zoom for delta in range(0, metatile_zoom - max_tile_zoom): z = nominal_zoom - (delta + 1) tile_size_by_zoom[z] = [max_tile_size] else: # the tile_sizes at nominal zoom only. tile_size_by_zoom[nominal_zoom] = cfg_tile_sizes return tile_size_by_zoom
def calculate_sizes_by_zoom(coord, metatile_zoom, cfg_tile_sizes, max_zoom)
Returns a map of nominal zoom to the list of tile sizes to generate at that zoom. This is because we want to generate different metatile contents at different zoom levels. At the most detailed zoom level, we want to generate the smallest tiles possible, as this allows "overzooming" by simply extracting the smaller tiles. At the minimum zoom, we want to get as close as we can to zero nominal zoom by using any "unused" space in the metatile for larger tile sizes that we're not generating. For example, with 1x1 metatiles, the tile size is always 256px, and the function will return {coord.zoom: [256]} Note that max_zoom should be the maximum *coordinate* zoom, not nominal zoom.
2.771817
2.670427
1.037968
tile_sizes_by_zoom = calculate_sizes_by_zoom( coord, metatile_zoom, cfg_tile_sizes, max_zoom) cut_coords_by_zoom = {} for nominal_zoom, tile_sizes in tile_sizes_by_zoom.iteritems(): cut_coords = [] for tile_size in tile_sizes: cut_coords.extend(metatile_children_with_size( coord, metatile_zoom, nominal_zoom, tile_size)) cut_coords_by_zoom[nominal_zoom] = cut_coords return cut_coords_by_zoom
def calculate_cut_coords_by_zoom( coord, metatile_zoom, cfg_tile_sizes, max_zoom)
Returns a map of nominal zoom to the list of cut coordinates at that nominal zoom. Note that max_zoom should be the maximum coordinate zoom, not nominal zoom.
2.179627
1.994183
1.092992
''' Simple emulation of function `os.replace(..)` from modern version of Python. Implementation is not fully atomic, but enough for us. ''' orig_os_replace_func = getattr(os, 'replace', None) if orig_os_replace_func is not None: # not need for emulation: we using modern version of Python. # fully atomic for this case orig_os_replace_func(src, dst) return if os.name == 'posix': # POSIX requirement: `os.rename(..)` works as `os.replace(..)` # fully atomic for this case os.rename(src, dst) return # simple emulation for `os.name == 'nt'` and other marginal # operation systems. not fully atomic implementation for this # case try: # trying atomic `os.rename(..)` without `os.remove(..)` or # other operations os.rename(src, dst) error = None except OSError as e: error = e if error is None: return for i in range(5): # some number of tries may be failed # because we may be in concurrent environment with other # processes/threads try: os.remove(dst) except OSError: # destination was not exist # or concurrent process/thread is removing it in parallel with us pass try: os.rename(src, dst) error = None except OSError as e: error = e continue break if error is not None: raise_from(OSError('failed to replace'), error)
def os_replace(src, dst)
Simple emulation of function `os.replace(..)` from modern version of Python. Implementation is not fully atomic, but enough for us.
5.415154
4.31255
1.255673
if fmt and fmt == zip_format: return metatiles_are_equal(tile_data_1, tile_data_2) else: return tile_data_1 == tile_data_2
def tiles_are_equal(tile_data_1, tile_data_2, fmt)
Returns True if the tile data is equal in tile_data_1 and tile_data_2. For most formats, this is a simple byte-wise equality check. For zipped metatiles, we need to check the contents, as the zip format includes metadata such as timestamps and doesn't control file ordering.
3.810288
3.166131
1.203452
existing_data = store.read_tile(coord, format) if not existing_data or \ not tiles_are_equal(existing_data, tile_data, format): store.write_tile(tile_data, coord, format) return True else: return False
def write_tile_if_changed(store, tile_data, coord, format)
Only write tile data if different from existing. Try to read the tile data from the store first. If the existing data matches, don't write. Returns whether the tile was written.
2.720916
2.582234
1.053706
key = yamlkeys[0] rest = yamlkeys[1:] if len(rest) == 0: # no rest means we found the key to update. container[key] = value elif key in container: # still need to find the leaf in the tree, so recurse. _override_cfg(container[key], rest, value) else: # need to create a sub-tree down to the leaf to insert into. subtree = {} _override_cfg(subtree, rest, value) container[key] = subtree
def _override_cfg(container, yamlkeys, value)
Override a hierarchical key in the config, setting it to the value. Note that yamlkeys should be a non-empty list of strings.
3.587188
3.454906
1.038288
if zoom_start <= coord.zoom: yield coord for child_coord in coord_children_range(coord, zoom_stop): if zoom_start <= child_coord.zoom: yield child_coord
def coord_pyramid(coord, zoom_start, zoom_stop)
generate full pyramid for coord Generate the full pyramid for a single coordinate. Note that zoom_stop is exclusive.
3.784694
3.997567
0.946749
for coord in coords: for child in coord_pyramid(coord, zoom_start, zoom_stop): yield child
def coord_pyramids(coords, zoom_start, zoom_stop)
generate full pyramid for coords Generate the full pyramid for the list of coords. Note that zoom_stop is exclusive.
4.218143
4.748399
0.888329
logger = make_logger(cfg, 'enqueue_tiles_of_interest') logger.info('Enqueueing tiles of interest') logger.info('Fetching tiles of interest ...') tiles_of_interest = peripherals.toi.fetch_tiles_of_interest() n_toi = len(tiles_of_interest) logger.info('Fetching tiles of interest ... done') rawr_yaml = cfg.yml.get('rawr') assert rawr_yaml, 'Missing rawr yaml' group_by_zoom = rawr_yaml.get('group-zoom') assert group_by_zoom, 'Missing rawr group-zoom' assert isinstance(group_by_zoom, int), 'Invalid rawr group-zoom' if args.zoom_start is None: zoom_start = group_by_zoom else: zoom_start = args.zoom_start if args.zoom_stop is None: zoom_stop = cfg.max_zoom + 1 # +1 because exclusive else: zoom_stop = args.zoom_stop assert zoom_start >= group_by_zoom assert zoom_stop > zoom_start ungrouped = [] coords_at_group_zoom = set() for coord_int in tiles_of_interest: coord = coord_unmarshall_int(coord_int) if coord.zoom < zoom_start: ungrouped.append(coord) if coord.zoom >= group_by_zoom: coord_at_group_zoom = coord.zoomTo(group_by_zoom).container() coords_at_group_zoom.add(coord_at_group_zoom) pyramids = coord_pyramids(coords_at_group_zoom, zoom_start, zoom_stop) coords_to_enqueue = chain(ungrouped, pyramids) queue_writer = peripherals.queue_writer n_queued, n_in_flight = queue_writer.enqueue_batch(coords_to_enqueue) logger.info('%d enqueued - %d in flight' % (n_queued, n_in_flight)) logger.info('%d tiles of interest processed' % n_toi)
def tilequeue_enqueue_full_pyramid_from_toi(cfg, peripherals, args)
enqueue a full pyramid from the z10 toi
2.823842
2.851995
0.990129
from tilequeue.stats import RawrTileEnqueueStatsHandler from tilequeue.rawr import make_rawr_enqueuer_from_cfg logger = make_logger(cfg, 'enqueue_random_pyramids') rawr_yaml = cfg.yml.get('rawr') assert rawr_yaml, 'Missing rawr yaml' group_by_zoom = rawr_yaml.get('group-zoom') assert group_by_zoom, 'Missing rawr group-zoom' assert isinstance(group_by_zoom, int), 'Invalid rawr group-zoom' if args.zoom_start is None: zoom_start = group_by_zoom else: zoom_start = args.zoom_start if args.zoom_stop is None: zoom_stop = cfg.max_zoom + 1 # +1 because exclusive else: zoom_stop = args.zoom_stop assert zoom_start >= group_by_zoom assert zoom_stop > zoom_start gridsize = args.gridsize total_samples = getattr(args, 'n-samples') samples_per_cell = total_samples / (gridsize * gridsize) tileset_dim = 2 ** group_by_zoom scale_factor = float(tileset_dim) / float(gridsize) stats = make_statsd_client_from_cfg(cfg) stats_handler = RawrTileEnqueueStatsHandler(stats) rawr_enqueuer = make_rawr_enqueuer_from_cfg( cfg, logger, stats_handler, peripherals.msg_marshaller) for grid_y in xrange(gridsize): tile_y_min = int(grid_y * scale_factor) tile_y_max = int((grid_y+1) * scale_factor) for grid_x in xrange(gridsize): tile_x_min = int(grid_x * scale_factor) tile_x_max = int((grid_x+1) * scale_factor) cell_samples = set() for i in xrange(samples_per_cell): while True: rand_x = randrange(tile_x_min, tile_x_max) rand_y = randrange(tile_y_min, tile_y_max) sample = rand_x, rand_y if sample in cell_samples: continue cell_samples.add(sample) break # enqueue a cell at a time # the queue mapper expects to be able to read the entirety of the # input into memory first for x, y in cell_samples: coord = Coordinate(zoom=group_by_zoom, column=x, row=y) pyramid = coord_pyramid(coord, zoom_start, zoom_stop) rawr_enqueuer(pyramid)
def tilequeue_enqueue_random_pyramids(cfg, peripherals, args)
enqueue random pyramids
2.79613
2.792126
1.001434
count_by_zoom = defaultdict(int) total = 0 for coord_int in toi_set: coord = coord_unmarshall_int(coord_int) count_by_zoom[coord.zoom] += 1 total += 1 peripherals.stats.gauge('tiles-of-interest.count', total) for zoom, count in count_by_zoom.items(): peripherals.stats.gauge( 'tiles-of-interest.by-zoom.z{:02d}'.format(zoom), count )
def emit_toi_stats(toi_set, peripherals)
Calculates new TOI stats and emits them via statsd.
3.758862
3.458613
1.086812
logger = make_logger(cfg, 'load_tiles_of_interest') toi_filename = "toi.txt" logger.info('Loading tiles of interest from %s ... ', toi_filename) with open(toi_filename, 'r') as f: new_toi = load_set_from_fp(f) logger.info('Loading tiles of interest from %s ... done', toi_filename) logger.info('Setting new TOI (with %s tiles) ... ', len(new_toi)) peripherals.toi.set_tiles_of_interest(new_toi) emit_toi_stats(new_toi, peripherals) logger.info('Setting new TOI (with %s tiles) ... done', len(new_toi)) logger.info('Loading tiles of interest ... done')
def tilequeue_load_tiles_of_interest(cfg, peripherals)
Given a newline-delimited file containing tile coordinates in `zoom/column/row` format, load those tiles into the tiles of interest.
2.958919
3.060934
0.966672
store = _make_store(cfg) format = lookup_format_by_extension('zip') layer = 'all' assert peripherals.toi, 'Missing toi' toi = peripherals.toi.fetch_tiles_of_interest() for coord in store.list_tiles(format, layer): coord_int = coord_marshall_int(coord) if coord_int not in toi: print serialize_coord(coord)
def tilequeue_stuck_tiles(cfg, peripherals)
Check which files exist on s3 but are not in toi.
9.486851
7.987507
1.187711
logger = make_logger(cfg, 'tile_status') # friendly warning to avoid confusion when this command outputs nothing # at all when called with no positional arguments. if not args.coords: logger.warning('No coordinates given on the command line.') return # pre-load TOI to avoid having to do it for each coordinate toi = None if peripherals.toi: toi = peripherals.toi.fetch_tiles_of_interest() # TODO: make these configurable! tile_format = lookup_format_by_extension('zip') store = _make_store(cfg) for coord_str in args.coords: coord = deserialize_coord(coord_str) # input checking! make sure that the coordinate is okay to use in # the rest of the code. if not coord: logger.warning('Could not deserialize %r as coordinate', coord_str) continue if not coord_is_valid(coord): logger.warning('Coordinate is not valid: %r (parsed from %r)', coord, coord_str) continue # now we think we probably have a valid coordinate. go look up # whether it exists in various places. logger.info("=== %s ===", coord_str) coord_int = coord_marshall_int(coord) if peripherals.inflight_mgr: is_inflight = peripherals.inflight_mgr.is_inflight(coord) logger.info('inflight: %r', is_inflight) if toi: in_toi = coord_int in toi logger.info('in TOI: %r' % (in_toi,)) data = store.read_tile(coord, tile_format) logger.info('tile in store: %r', bool(data))
def tilequeue_tile_status(cfg, peripherals, args)
Report the status of the given tiles in the store, queue and TOI.
5.141945
4.932639
1.042433
from tilequeue.stats import RawrTileEnqueueStatsHandler from tilequeue.rawr import make_rawr_enqueuer_from_cfg msg_marshall_yaml = cfg.yml.get('message-marshall') assert msg_marshall_yaml, 'Missing message-marshall config' msg_marshaller = make_message_marshaller(msg_marshall_yaml) logger = make_logger(cfg, 'rawr_enqueue') stats = make_statsd_client_from_cfg(cfg) stats_handler = RawrTileEnqueueStatsHandler(stats) rawr_enqueuer = make_rawr_enqueuer_from_cfg( cfg, logger, stats_handler, msg_marshaller) with open(args.expiry_path) as fh: coords = create_coords_generator_from_tiles_file(fh) rawr_enqueuer(coords)
def tilequeue_rawr_enqueue(cfg, args)
command to take tile expiry path and enqueue for rawr tile generation
3.870537
3.569071
1.084466
rawr_yaml = cfg.yml.get('rawr') assert rawr_yaml is not None, 'Missing rawr configuration in yaml' rawr_postgresql_yaml = rawr_yaml.get('postgresql') assert rawr_postgresql_yaml, 'Missing rawr postgresql config' from raw_tiles.formatter.msgpack import Msgpack from raw_tiles.gen import RawrGenerator from raw_tiles.source.conn import ConnectionContextManager from raw_tiles.source import parse_sources from raw_tiles.source import DEFAULT_SOURCES as DEFAULT_RAWR_SOURCES from tilequeue.rawr import RawrS3Sink from tilequeue.rawr import RawrStoreSink import boto3 # pass through the postgresql yaml config directly conn_ctx = ConnectionContextManager(rawr_postgresql_yaml) rawr_source_list = rawr_yaml.get('sources', DEFAULT_RAWR_SOURCES) assert isinstance(rawr_source_list, list), \ 'RAWR source list should be a list' assert len(rawr_source_list) > 0, \ 'RAWR source list should be non-empty' rawr_store = rawr_yaml.get('store') if rawr_store: store = make_store( rawr_store, credentials=cfg.subtree('aws credentials')) rawr_sink = RawrStoreSink(store) else: rawr_sink_yaml = rawr_yaml.get('sink') assert rawr_sink_yaml, 'Missing rawr sink config' sink_type = rawr_sink_yaml.get('type') assert sink_type, 'Missing rawr sink type' if sink_type == 's3': s3_cfg = rawr_sink_yaml.get('s3') assert s3_cfg, 'Missing s3 config' bucket = s3_cfg.get('bucket') assert bucket, 'Missing rawr sink bucket' sink_region = s3_cfg.get('region') assert sink_region, 'Missing rawr sink region' prefix = s3_cfg.get('prefix') assert prefix, 'Missing rawr sink prefix' extension = s3_cfg.get('extension') assert extension, 'Missing rawr sink extension' tags = s3_cfg.get('tags') from tilequeue.store import make_s3_tile_key_generator tile_key_gen = make_s3_tile_key_generator(s3_cfg) s3_client = boto3.client('s3', region_name=sink_region) rawr_sink = RawrS3Sink( s3_client, bucket, prefix, extension, tile_key_gen, tags) elif sink_type == 'none': from tilequeue.rawr import RawrNullSink rawr_sink = RawrNullSink() else: assert 0, 'Unknown rawr sink type %s' % sink_type rawr_source = parse_sources(rawr_source_list) rawr_formatter = Msgpack() rawr_gen = RawrGenerator(rawr_source, rawr_formatter, rawr_sink) return rawr_gen, conn_ctx
def _tilequeue_rawr_setup(cfg)
command to read from rawr queue and generate rawr tiles
2.354143
2.336058
1.007742
tiles_of_interest = peripherals.toi.fetch_tiles_of_interest() coords = map(coord_unmarshall_int, tiles_of_interest) _tilequeue_rawr_seed(cfg, peripherals, coords)
def tilequeue_rawr_seed_toi(cfg, peripherals)
command to read the toi and enqueue the corresponding rawr tiles
5.822131
5.856933
0.994058
rawr_yaml = cfg.yml.get('rawr') assert rawr_yaml is not None, 'Missing rawr configuration in yaml' group_by_zoom = rawr_yaml.get('group-zoom') assert group_by_zoom is not None, 'Missing group-zoom rawr config' max_coord = 2 ** group_by_zoom # creating the list of all coordinates here might be a lot of memory, but # if we handle the TOI okay then we should be okay with z10. if the group # by zoom is much larger, then it might start running into problems. coords = [] for x in xrange(0, max_coord): for y in xrange(0, max_coord): coords.append(Coordinate(zoom=group_by_zoom, column=x, row=y)) _tilequeue_rawr_seed(cfg, peripherals, coords)
def tilequeue_rawr_seed_all(cfg, peripherals)
command to enqueue all the tiles at the group-by zoom
5.456368
4.896401
1.114363
precision = precision_for_zoom(zoom) fs = create_layer_feature_collection(features, precision) json.dump(fs, out)
def encode_single_layer(out, features, zoom)
Encode a list of (WKB|shapely, property dict, id) features into a GeoJSON stream. If no id is available, pass in None Geometries in the features list are assumed to be lon, lats.
9.871971
9.983368
0.988842
precision = precision_for_zoom(zoom) geojson = {} for layer_name, features in features_by_layer.items(): fs = create_layer_feature_collection(features, precision) geojson[layer_name] = fs json.dump(geojson, out)
def encode_multiple_layers(out, features_by_layer, zoom)
features_by_layer should be a dict: layer_name -> feature tuples
4.0434
3.634582
1.11248
if geometry['type'] in ('Point', 'MultiPoint'): return elif geometry['type'] == 'LineString': for arc_index, old_arc in enumerate(geometry['arcs']): geometry['arcs'][arc_index] = len(merged_arcs) merged_arcs.append(old_arcs[old_arc]) elif geometry['type'] == 'Polygon': for ring in geometry['arcs']: for arc_index, old_arc in enumerate(ring): ring[arc_index] = len(merged_arcs) merged_arcs.append(old_arcs[old_arc]) elif geometry['type'] == 'MultiLineString': for part in geometry['arcs']: for arc_index, old_arc in enumerate(part): part[arc_index] = len(merged_arcs) merged_arcs.append(old_arcs[old_arc]) elif geometry['type'] == 'MultiPolygon': for part in geometry['arcs']: for ring in part: for arc_index, old_arc in enumerate(ring): ring[arc_index] = len(merged_arcs) merged_arcs.append(old_arcs[old_arc]) else: raise NotImplementedError("Can't do %s geometries" % geometry['type'])
def update_arc_indexes(geometry, merged_arcs, old_arcs)
Updated geometry arc indexes, and add arcs to merged_arcs along the way. Arguments are modified in-place, and nothing is returned.
1.538767
1.547889
0.994107
tx, ty = bounds[0], bounds[1] sx, sy = (bounds[2] - bounds[0]) / size, (bounds[3] - bounds[1]) / size def forward(lon, lat): return int(round((lon - tx) / sx)), int(round((lat - ty) / sy)) return dict(translate=(tx, ty), scale=(sx, sy)), forward
def get_transform(bounds, size=4096)
Return a TopoJSON transform dictionary and a point-transforming function. Size is the tile size in pixels and sets the implicit output resolution.
2.874446
2.7725
1.036771
coords = [transform(x, y) for (x, y) in line.coords] pairs = zip(coords[:], coords[1:]) diffs = [(x2 - x1, y2 - y1) for ((x1, y1), (x2, y2)) in pairs] return coords[:1] + [(x, y) for (x, y) in diffs if (x, y) != (0, 0)]
def diff_encode(line, transform)
Differentially encode a shapely linestring or ring.
2.607384
2.555699
1.020223
transform, forward = get_transform(bounds, size=size) arcs = [] geometries_by_layer = {} for layer, features in features_by_layer.iteritems(): geometries = [] for shape, props, fid in features: if shape.type == 'GeometryCollection': continue geometry = dict(properties=props) if fid is not None: geometry['id'] = fid elif shape.type == 'Point': geometry.update(dict( type='Point', coordinates=forward(shape.x, shape.y))) elif shape.type == 'LineString': geometry.update(dict(type='LineString', arcs=[len(arcs)])) arcs.append(diff_encode(shape, forward)) elif shape.type == 'Polygon': geometry.update(dict(type='Polygon', arcs=[])) rings = [shape.exterior] + list(shape.interiors) for ring in rings: geometry['arcs'].append([len(arcs)]) arcs.append(diff_encode(ring, forward)) elif shape.type == 'MultiPoint': geometry.update(dict(type='MultiPoint', coordinates=[])) for point in shape.geoms: geometry['coordinates'].append(forward(point.x, point.y)) elif shape.type == 'MultiLineString': geometry.update(dict(type='MultiLineString', arcs=[])) for line in shape.geoms: geometry['arcs'].append([len(arcs)]) arcs.append(diff_encode(line, forward)) elif shape.type == 'MultiPolygon': geometry.update(dict(type='MultiPolygon', arcs=[])) for polygon in shape.geoms: rings = [polygon.exterior] + list(polygon.interiors) polygon_arcs = [] for ring in rings: polygon_arcs.append([len(arcs)]) arcs.append(diff_encode(ring, forward)) geometry['arcs'].append(polygon_arcs) else: raise NotImplementedError("Can't do %s geometries" % shape.type) geometries.append(geometry) geometries_by_layer[layer] = dict( type='GeometryCollection', geometries=geometries, ) result = dict( type='Topology', transform=transform, objects=geometries_by_layer, arcs=arcs, ) json.dump(result, file)
def encode(file, features_by_layer, bounds, size=4096)
Encode a dict of layername: (shape, props, id) features into a TopoJSON stream. If no id is available, pass in None Geometries in the features list are assumed to be unprojected lon, lats. Bounds are given in geographic coordinates as (xmin, ymin, xmax, ymax). Size is the number of integer coordinates which span the extent of the tile.
1.808148
1.806943
1.000667
min_point = 'ST_MakePoint(%.12f, %.12f)' % (bounds[0], bounds[1]) max_point = 'ST_MakePoint(%.12f, %.12f)' % (bounds[2], bounds[3]) bbox_no_srid = 'ST_MakeBox2D(%s, %s)' % (min_point, max_point) bbox = 'ST_SetSrid(%s, %d)' % (bbox_no_srid, srid) bbox_filter = \ '((%(col)s && %(bbox)s) AND (' \ ' st_overlaps(%(col)s, %(bbox)s) OR' \ ' st_contains(%(bbox)s, %(col)s)' \ '))' \ % dict(col=geometry_col_name, bbox=bbox) return bbox_filter
def jinja_filter_bbox_overlaps(bounds, geometry_col_name, srid=3857)
Check whether the boundary of the geometry intersects with the bounding box. Note that the usual meaning of "overlaps" in GIS terminology is that the boundaries of the box and polygon intersect, but not the interiors. This means that if the box or polygon is completely within the other, then st_overlaps will be false. However, that's not what we want. This is used for boundary testing, and while we don't want to pull out a whole country boundary if the bounding box is fully within it, we _do_ want to if the country boundary is within the bounding box. Therefore, this test has an extra "or st_contains" test to also pull in any boundaries which are completely within the bounding box.
2.209101
2.236792
0.987621
sources = parse_source_data(query_cfg) queries_generator = make_queries_generator( sources, template_path, reload_templates) return DataFetcher( postgresql_conn_info, queries_generator, io_pool)
def make_db_data_fetcher(postgresql_conn_info, template_path, reload_templates, query_cfg, io_pool)
Returns an object which is callable with the zoom and unpadded bounds and which returns a list of rows.
4.068848
4.529128
0.898373
assert parent is not None, \ "Parent tile must be provided and not None to make a metatile." if len(tiles) == 0: return [] if date_time is None: date_time = gmtime()[0:6] layer = tiles[0]['layer'] buf = StringIO.StringIO() with zipfile.ZipFile(buf, mode='w') as z: for tile in tiles: assert tile['layer'] == layer coord = tile['coord'] # change in zoom level from parent to coord. since parent should # be a parent, its zoom should always be equal or smaller to that # of coord. delta_z = coord.zoom - parent.zoom assert delta_z >= 0, "Coordinates must be descendents of parent" # change in row/col coordinates are relative to the upper left # coordinate at that zoom. both should be positive. delta_row = coord.row - (int(parent.row) << delta_z) delta_column = coord.column - (int(parent.column) << delta_z) assert delta_row >= 0, \ "Coordinates must be contained by their parent, but " + \ "row is not." assert delta_column >= 0, \ "Coordinates must be contained by their parent, but " + \ "column is not." tile_name = '%d/%d/%d.%s' % \ (delta_z, delta_column, delta_row, tile['format'].extension) tile_data = tile['tile'] info = zipfile.ZipInfo(tile_name, date_time) z.writestr(info, tile_data, zipfile.ZIP_DEFLATED) return [dict(tile=buf.getvalue(), format=zip_format, coord=parent, layer=layer)]
def make_multi_metatile(parent, tiles, date_time=None)
Make a metatile containing a list of tiles all having the same layer, with coordinates relative to the given parent. Set date_time to a 6-tuple of (year, month, day, hour, minute, second) to set the timestamp for members. Otherwise the current wall clock time is used.
3.424563
3.294612
1.039444
if a.zoom < b.zoom: b = b.zoomTo(a.zoom).container() elif a.zoom > b.zoom: a = a.zoomTo(b.zoom).container() while a.row != b.row or a.column != b.column: a = a.zoomBy(-1).container() b = b.zoomBy(-1).container() # by this point a == b. return a
def common_parent(a, b)
Find the common parent tile of both a and b. The common parent is the tile at the highest zoom which both a and b can be transformed into by lowering their zoom levels.
3.571378
2.987128
1.195589
parent = None for t in tiles: if parent is None: parent = t else: parent = common_parent(parent, t) return parent
def _parent_tile(tiles)
Find the common parent tile for a sequence of tiles.
3.464434
2.654801
1.304969
groups = defaultdict(list) for tile in tiles: key = tile['layer'] groups[key].append(tile) metatiles = [] for group in groups.itervalues(): parent = _parent_tile(t['coord'] for t in group) metatiles.extend(make_multi_metatile(parent, group, date_time)) return metatiles
def make_metatiles(size, tiles, date_time=None)
Group by layers, and make metatiles out of all the tiles which share those properties relative to the "top level" tile which is parent of them all. Provide a 6-tuple date_time to set the timestamp on each tile within the metatile, or leave it as None to use the current time.
4.013924
3.420928
1.173344
ext = fmt.extension if offset is None: tile_name = '0/0/0.%s' % ext else: tile_name = '%d/%d/%d.%s' % (offset.zoom, offset.column, offset.row, ext) with zipfile.ZipFile(io, mode='r') as zf: if tile_name in zf.namelist(): return zf.read(tile_name) else: return None
def extract_metatile(io, fmt, offset=None)
Extract the tile at the given offset (defaults to 0/0/0) and format from the metatile in the file-like object io.
2.661887
2.440129
1.09088
names_1 = set(zip_1.namelist()) names_2 = set(zip_2.namelist()) if names_1 != names_2: return False for n in names_1: bytes_1 = zip_1.read(n) bytes_2 = zip_2.read(n) if bytes_1 != bytes_2: return False return True
def _metatile_contents_equal(zip_1, zip_2)
Given two open zip files as arguments, this returns True if the zips both contain the same set of files, having the same names, and each file within the zip is byte-wise identical to the one with the same name in the other zip.
1.756386
1.655247
1.061102
try: buf_1 = StringIO.StringIO(tile_data_1) buf_2 = StringIO.StringIO(tile_data_2) with zipfile.ZipFile(buf_1, mode='r') as zip_1: with zipfile.ZipFile(buf_2, mode='r') as zip_2: return _metatile_contents_equal(zip_1, zip_2) except (StandardError, zipfile.BadZipFile, zipfile.LargeZipFile): # errors, such as files not being proper zip files, or missing # some attributes or contents that we expect, are treated as not # equal. pass return False
def metatiles_are_equal(tile_data_1, tile_data_2)
Return True if the two tiles are both zipped metatiles and contain the same set of files with the same contents. This ignores the timestamp of the individual files in the zip files, as well as their order or any other metadata.
3.175475
2.980545
1.065401
return dict( z=int_if_exact(coord.zoom), x=int_if_exact(coord.column), y=int_if_exact(coord.row), )
def make_coord_dict(coord)
helper function to make a dict from a coordinate for logging
4.02433
3.776985
1.065487
features_by_layer = {} for feature_layer in feature_layers: layer_name = feature_layer['name'] features = feature_layer['features'] features_by_layer[layer_name] = features return features_by_layer
def convert_feature_layers_to_dict(feature_layers)
takes a list of 'feature_layer' objects and converts to a dict keyed by the layer name
1.957736
1.876
1.043569
groups = [] for i in range(len(self.zoom_range_items)): groups.append([]) # first group the coordinates based on their queue for coord in coords: for i, zri in enumerate(self.zoom_range_items): toi_match = zri.in_toi is None or \ (coord in self.toi_set) == zri.in_toi if zri.start <= coord.zoom < zri.end and toi_match: groups[i].append(coord) break # now, we need to just verify that for each particular group, # should they be further grouped, eg by a particular zoom 10 # tile for i, zri in enumerate(self.zoom_range_items): group = groups[i] if not group: continue if zri.group_by_zoom is None: for coord in group: yield CoordGroup([coord], zri.queue_id) else: by_parent_coords = defaultdict(list) for coord in group: if coord.zoom >= zri.group_by_zoom: group_coord = coord.zoomTo(zri.group_by_zoom) group_key = coord_marshall_int(group_coord) by_parent_coords[group_key].append(coord) else: # this means that a coordinate belonged to a # particular queue but the zoom was lower than # the group by zoom # this probably shouldn't happen # should it be an assert instead? yield CoordGroup([coord], zri.queue_id) for group_key, coords in by_parent_coords.iteritems(): yield CoordGroup(coords, zri.queue_id)
def group(self, coords)
return CoordGroups that can be used to send to queues Each CoordGroup represents a message that can be sent to a particular queue, stamped with the queue_id. The list of coords, which can be 1, is what should get used for the payload for each queue message.
4.560393
4.222362
1.080057
parent = None for coord in coords: assert parent_zoom <= coord.zoom coord_parent = coord.zoomTo(parent_zoom).container() if parent is None: parent = coord_parent else: assert parent == coord_parent assert parent is not None, 'No coords?' return parent
def common_parent(coords, parent_zoom)
Return the common parent for coords Also check that all coords do indeed share the same parent coordinate.
4.003246
3.88588
1.030203
assert isinstance(coord, Coordinate) coord = coord.container() return Tile(int(coord.zoom), int(coord.column), int(coord.row))
def convert_coord_object(coord)
Convert ModestMaps.Core.Coordinate -> raw_tiles.tile.Tile
5.855038
3.866125
1.514446
assert isinstance(tile, Tile) return Coordinate(zoom=tile.z, column=tile.x, row=tile.y)
def unconvert_coord_object(tile)
Convert rawr_tiles.tile.Tile -> ModestMaps.Core.Coordinate
4.155983
3.508317
1.184609
if date_time is None: date_time = gmtime()[0:6] buf = StringIO() with zipfile.ZipFile(buf, mode='w') as z: for fmt_data in rawr_tile.all_formatted_data: zip_info = zipfile.ZipInfo(fmt_data.name, date_time) z.writestr(zip_info, fmt_data.data, zipfile.ZIP_DEFLATED) return buf.getvalue()
def make_rawr_zip_payload(rawr_tile, date_time=None)
make a zip file from the rawr tile formatted data
2.60798
2.555312
1.020611
# the io we get from S3 is streaming, so we can't seek on it, but zipfile # seems to require that. so we buffer it all in memory. RAWR tiles are # generally up to around 100MB in size, which should be safe to store in # RAM. from tilequeue.query.common import Table from io import BytesIO zfh = zipfile.ZipFile(BytesIO(payload), 'r') def get_table(table_name): # need to extract the whole compressed file from zip reader, as it # doesn't support .tell() on the filelike, which gzip requires. data = zfh.open(table_name, 'r').read() unpacker = Unpacker(file_like=BytesIO(data)) source = table_sources[table_name] return Table(source, unpacker) return get_table
def unpack_rawr_zip_payload(table_sources, payload)
unpack a zipfile and turn it into a callable "tables" object.
7.877991
7.529075
1.046342
from time import sleep backoff_interval = 1 backoff_factor = 2 for try_counter in xrange(0, num_tries): failed_messages = self.send_without_retry(payloads) # success! if not failed_messages: payloads = [] break # output some information about the failures for debugging # purposes. we expect failures to be quite rare, so we can be # pretty verbose. if logger: for msg in failed_messages: logger.warning("Failed to send message on try %d: Id=%r, " "SenderFault=%r, Code=%r, Message=%r" % (try_counter, msg['Id'], msg.get('SenderFault'), msg.get('Code'), msg.get('Message'))) # wait a little while, in case the problem is that we're talking # too fast. sleep(backoff_interval) backoff_interval *= backoff_factor # filter out the failed payloads for retry retry_payloads = [] for msg in failed_messages: i = int(msg['Id']) retry_payloads.append(payloads[i]) payloads = retry_payloads if payloads: raise Exception('Messages failed to send to sqs after %d ' 'retries: %s' % (num_tries, len(payloads)))
def send(self, payloads, logger, num_tries=5)
Enqueue payloads to the SQS queue, retrying failed messages with exponential backoff.
4.017818
3.823894
1.050714
resp = self.sqs_client.receive_message( QueueUrl=self.queue_url, MaxNumberOfMessages=1, AttributeNames=('SentTimestamp',), WaitTimeSeconds=self.recv_wait_time_seconds, ) if resp['ResponseMetadata']['HTTPStatusCode'] != 200: raise Exception('Invalid status code from sqs: %s' % resp['ResponseMetadata']['HTTPStatusCode']) msgs = resp.get('Messages') if not msgs: return None assert len(msgs) == 1 msg = msgs[0] payload = msg['Body'] handle = msg['ReceiptHandle'] timestamp = msg['Attributes']['SentTimestamp'] metadata = dict(timestamp=timestamp) msg_handle = MessageHandle(handle, payload, metadata) return msg_handle
def read(self)
read a single message from the queue
2.538161
2.439846
1.040295
self.sqs_client.delete_message( QueueUrl=self.queue_url, ReceiptHandle=msg_handle.handle, )
def done(self, msg_handle)
acknowledge completion of message
3.582879
3.361649
1.06581
# also return back whether the response was cached # useful for metrics is_cached = False get_options = dict( Bucket=self.bucket, Key=self.key, ) if self.etag: get_options['IfNoneMatch'] = self.etag try: resp = self.s3_client.get_object(**get_options) except Exception as e: # boto3 client treats 304 responses as exceptions if isinstance(e, ClientError): resp = getattr(e, 'response', None) assert resp else: raise e status_code = resp['ResponseMetadata']['HTTPStatusCode'] if status_code == 304: assert self.prev_toi toi = self.prev_toi is_cached = True elif status_code == 200: body = resp['Body'] try: gzip_payload = body.read() finally: try: body.close() except Exception: pass gzip_file_obj = StringIO(gzip_payload) toi = load_set_from_gzipped_fp(gzip_file_obj) self.prev_toi = toi self.etag = resp['ETag'] else: assert 0, 'Unknown status code from toi get: %s' % status_code return toi, is_cached
def tiles_of_interest(self)
conditionally get the toi from s3
3.513704
3.194893
1.099788
# returns tuple of (handle, error), either of which can be None track_result = msg_tracker.done(coord_handle) queue_handle = track_result.queue_handle if not queue_handle: return None, None tile_queue = queue_mapper.get_queue(queue_handle.queue_id) assert tile_queue, \ 'Missing tile_queue: %s' % queue_handle.queue_id parent_tile = None if track_result.all_done: parent_tile = track_result.parent_tile try: tile_queue.job_done(queue_handle.handle) except Exception as e: stacktrace = format_stacktrace_one_line() tile_proc_logger.error_job_done( 'tile_queue.job_done', e, stacktrace, coord, parent_tile, ) return queue_handle, e if parent_tile is not None: # we completed a tile pyramid and should log appropriately start_time = timing_state['start'] stop_time = convert_seconds_to_millis(time.time()) tile_proc_logger.log_processed_pyramid( parent_tile, start_time, stop_time) stats_handler.processed_pyramid( parent_tile, start_time, stop_time) else: try: tile_queue.job_progress(queue_handle.handle) except Exception as e: stacktrace = format_stacktrace_one_line() err_details = {"queue_handle": queue_handle.handle} if isinstance(e, JobProgressException): err_details = e.err_details tile_proc_logger.error_job_progress( 'tile_queue.job_progress', e, stacktrace, coord, parent_tile, err_details, ) return queue_handle, e return queue_handle, None
def _ack_coord_handle( coord, coord_handle, queue_mapper, msg_tracker, timing_state, tile_proc_logger, stats_handler)
share code for acknowledging a coordinate
3.058302
3.045681
1.004144
rounded = round(num) delta = abs(num - rounded) if delta < eps: return int(rounded) else: return int(resolution(num))
def _snapping_round(num, eps, resolution)
Return num snapped to within eps of an integer, or int(resolution(num)).
5.10112
2.963173
1.721506
if shape.geom_type == 'LineString': return [shape] elif shape.geom_type == 'MultiLineString': return shape.geoms elif shape.geom_type == 'GeometryCollection': lines = [] for geom in shape.geoms: lines.extend(_explode_lines(geom)) return lines return []
def _explode_lines(shape)
Return a list of LineStrings which make up the shape.
2.029773
1.649335
1.230662
lines = _explode_lines(shape) if len(lines) == 1: return lines[0] else: return MultiLineString(lines)
def _lines_only(shape)
Extract the lines (LineString, MultiLineString) from any geometry. We expect the input to be mostly lines, such as the result of an intersection between a line and a polygon. The main idea is to remove points, and any other geometry which might throw a wrench in the works.
3.845772
2.858893
1.345196
assert shape.geom_type in ('Polygon', 'MultiPolygon') if shape.geom_type == 'Polygon': return orient(shape) else: polys = [] for geom in shape.geoms: polys.append(orient(geom)) return MultiPolygon(polys)
def _orient(shape)
The Shapely version of the orient function appears to only work on Polygons, and fails on MultiPolygons. This is a quick wrapper to allow orienting of either.
2.580954
2.26767
1.138153
self.coordinates = [] self.index = [] self.position = 0 self.lastX = 0 self.lastY = 0 self.isPoly = False self.isPoint = True; self.dropped = 0; self.first = True # Used for exception strings self._current_string = geometry reader = _ExtendedUnPacker(geometry) # Start the parsing self._dispatchNextType(reader)
def parseGeometry(self, geometry)
A factory method for creating objects of the correct OpenGIS type.
11.484156
11.513235
0.997474
# Need to check endianess here! endianness = reader.unpack_byte() if endianness == 0: reader.setEndianness('XDR') elif endianness == 1: reader.setEndianness('NDR') else: raise ExceptionWKBParser("Invalid endianness in WKB format.\n"\ "The parser can only cope with XDR/big endian WKB format.\n"\ "To force the WKB format to be in XDR use AsBinary(<fieldname>,'XDR'") geotype = reader.unpack_uint32() mask = geotype & 0x80000000 # This is used to mask of the dimension flag. srid = geotype & 0x20000000 # ignore srid ... if srid != 0: reader.unpack_uint32() dimensions = 2 if mask == 0: dimensions = 2 else: dimensions = 3 geotype = geotype & 0x1FFFFFFF # Despatch to a method on the type id. if self._typemap.has_key(geotype): self._typemap[geotype](reader, dimensions) else: raise ExceptionWKBParser('Error type to dispatch with geotype = %s \n'\ 'Invalid geometry in WKB string: %s' % (str(geotype), str(self._current_string),))
def _dispatchNextType(self,reader)
Read a type id from the binary stream (reader) and call the correct method to parse it.
5.958612
5.880015
1.013367
if not buffer_cfg: return bounds format_buffer_cfg = buffer_cfg.get(format.extension) if format_buffer_cfg is None: return bounds geometry_type = normalize_geometry_type(geometry_type) per_layer_cfg = format_buffer_cfg.get('layer', {}).get(layer_name) if per_layer_cfg is not None: layer_geom_pixels = per_layer_cfg.get(geometry_type) if layer_geom_pixels is not None: assert isinstance(layer_geom_pixels, Number) result = bounds_buffer( bounds, meters_per_pixel_dim * layer_geom_pixels) return result by_geometry_pixels = format_buffer_cfg.get('geometry', {}).get( geometry_type) if by_geometry_pixels is not None: assert isinstance(by_geometry_pixels, Number) result = bounds_buffer( bounds, meters_per_pixel_dim * by_geometry_pixels) return result return bounds
def calc_buffered_bounds( format, bounds, meters_per_pixel_dim, layer_name, geometry_type, buffer_cfg)
Calculate the buffered bounds per format per layer based on config.
2.147363
2.068911
1.03792
polys = [] for poly in shape.geoms: if tile_bounds.intersects(poly): if not clip_bounds.contains(poly): poly = clip_bounds.intersection(poly) # the intersection operation can make the resulting polygon # invalid. including it in a MultiPolygon would make that # invalid too. instead, we skip it, and hope it wasn't too # important. if not poly.is_valid: continue if poly.type == 'Polygon': polys.append(poly) elif poly.type == 'MultiPolygon': polys.extend(poly.geoms) return geometry.MultiPolygon(polys)
def _intersect_multipolygon(shape, tile_bounds, clip_bounds)
Return the parts of the MultiPolygon shape which overlap the tile_bounds, each clipped to the clip_bounds. This can be used to extract only the parts of a multipolygon which are actually visible in the tile, while keeping those parts which extend beyond the tile clipped to avoid huge polygons.
3.632454
3.547003
1.024091
shape_buf_bounds = geometry.box(*buffer_padded_bounds) if not shape_buf_bounds.intersects(shape): return None if is_clipped: # now we know that we should include the geometry, but # if the geometry should be clipped, we'll clip to the # layer-specific padded bounds layer_padded_bounds = calculate_padded_bounds( clip_factor, buffer_padded_bounds) if shape.type == 'MultiPolygon': shape = _intersect_multipolygon( shape, shape_buf_bounds, layer_padded_bounds) else: try: shape = shape.intersection(layer_padded_bounds) except shapely.errors.TopologicalError: return None return shape
def _clip_shape(shape, buffer_padded_bounds, is_clipped, clip_factor)
Return the shape clipped to a clip_factor expansion of buffer_padded_bounds if is_clipped is True. Otherwise return the original shape, or None if the shape does not intersect buffer_padded_bounds at all. This is used to reduce the size of the geometries which are encoded in the tiles by removing things which aren't in the tile, and clipping those which are to the clip_factor expanded bounding box.
3.758218
3.566989
1.053611
if self._now is None: # Compute the current time only once per instance self._now = datetime.utcnow() return self._now
def now(self)
Capture time.
5.407541
5.200904
1.039731
expiration = getattr(settings, 'OAUTH_ID_TOKEN_EXPIRATION', 30) expires = self.now + timedelta(seconds=expiration) return timegm(expires.utctimetuple())
def claim_exp(self, data)
Required expiration time.
5.006607
4.746584
1.054781
# Validate URLs for url_to_validate in (url, redirect_uri): try: URLValidator()(url_to_validate) except ValidationError: raise CommandError("URLs provided are invalid. Please provide valid application and redirect URLs.") # Validate and map client type to the appropriate django-oauth2-provider constant client_type = client_type.lower() client_type = { 'confidential': CONFIDENTIAL, 'public': PUBLIC }.get(client_type) if client_type is None: raise CommandError("Client type provided is invalid. Please use one of 'confidential' or 'public'.") self.fields = { # pylint: disable=attribute-defined-outside-init 'url': url, 'redirect_uri': redirect_uri, 'client_type': client_type, }
def _clean_required_args(self, url, redirect_uri, client_type)
Validate and clean the command's arguments. Arguments: url (str): Client's application URL. redirect_uri (str): Client application's OAuth2 callback URI. client_type (str): Client's type, indicating whether the Client application is capable of maintaining the confidentiality of its credentials (e.g., running on a secure server) or is incapable of doing so (e.g., running in a browser). Raises: CommandError, if the URLs provided are invalid, or if the client type provided is invalid.
3.05426
2.856003
1.069418
for key in ('username', 'client_name', 'client_id', 'client_secret', 'trusted', 'logout_uri'): value = options.get(key) if value is not None: self.fields[key] = value username = self.fields.pop('username', None) if username is not None: try: user_model = get_user_model() self.fields['user'] = user_model.objects.get(username=username) except user_model.DoesNotExist: raise CommandError("User matching the provided username does not exist.") # The keyword argument 'name' conflicts with that of `call_command()`. We instead # use 'client_name' up to this point, then swap it out for the expected field, 'name'. client_name = self.fields.pop('client_name', None) if client_name is not None: self.fields['name'] = client_name logout_uri = self.fields.get('logout_uri') if logout_uri: try: URLValidator()(logout_uri) except ValidationError: raise CommandError("The logout_uri is invalid.")
def _parse_options(self, options)
Parse the command's options. Arguments: options (dict): Options with which the command was called. Raises: CommandError, if a user matching the provided username does not exist.
3.211727
3.163229
1.015332