_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q14100
|
ColorCode._init_hex
|
train
|
def _init_hex(self, hexval: str) -> None:
""" Initialize from a hex value string. """
self.hexval = hex2termhex(fix_hex(hexval))
self.code = hex2term(self.hexval)
self.rgb = hex2rgb(self.hexval)
|
python
|
{
"resource": ""
}
|
q14101
|
ColorCode._init_rgb
|
train
|
def _init_rgb(self, r: int, g: int, b: int) -> None:
""" Initialize from red, green, blue args. """
if self.rgb_mode:
self.rgb = (r, g, b)
self.hexval = rgb2hex(r, g, b)
else:
self.rgb = hex2rgb(rgb2termhex(r, g, b))
self.hexval = rgb2termhex(r, g, b)
self.code = hex2term(self.hexval)
|
python
|
{
"resource": ""
}
|
q14102
|
ColorCode.from_code
|
train
|
def from_code(cls, code: int) -> 'ColorCode':
""" Return a ColorCode from a terminal code. """
c = cls()
c._init_code(code)
return c
|
python
|
{
"resource": ""
}
|
q14103
|
ColorCode.from_hex
|
train
|
def from_hex(cls, hexval: str) -> 'ColorCode':
""" Return a ColorCode from a hex string. """
c = cls()
c._init_hex(hexval)
return c
|
python
|
{
"resource": ""
}
|
q14104
|
ColorCode.from_rgb
|
train
|
def from_rgb(cls, r: int, g: int, b: int) -> 'ColorCode':
""" Return a ColorCode from a RGB tuple. """
c = cls()
c._init_rgb(r, g, b)
return c
|
python
|
{
"resource": ""
}
|
q14105
|
commit
|
train
|
def commit(func):
'''Used as a decorator for automatically making session commits'''
def wrap(**kwarg):
with session_withcommit() as session:
a = func(**kwarg)
session.add(a)
return session.query(songs).order_by(
songs.song_id.desc()).first().song_id
return wrap
|
python
|
{
"resource": ""
}
|
q14106
|
get_hub
|
train
|
def get_hub():
"""Return the instance of the hub."""
try:
hub = _local.hub
except AttributeError:
# The Hub can only be instantiated from the root fiber. No other fibers
# can run until the Hub is there, so the root will always be the first
# one to call get_hub().
assert fibers.current().parent is None
hub = _local.hub = Hub()
return hub
|
python
|
{
"resource": ""
}
|
q14107
|
switch_back.switch
|
train
|
def switch(self, value=None):
"""Switch back to the origin fiber. The fiber is switch in next time
the event loop runs."""
if self._hub is None or not self._fiber.is_alive():
return
self._hub.run_callback(self._fiber.switch, value)
self._hub = self._fiber = None
|
python
|
{
"resource": ""
}
|
q14108
|
switch_back.throw
|
train
|
def throw(self, typ, val=None, tb=None):
"""Throw an exception into the origin fiber. The exception is thrown
the next time the event loop runs."""
# The might seem redundant with self._fiber.cancel(exc), but it isn't
# as self._fiber might be a "raw" fibers.Fiber() that doesn't have a
# cancel() method.
if self._hub is None or not self._fiber.is_alive():
return
self._hub.run_callback(self._fiber.throw, typ, val, tb)
self._hub = self._fiber = None
|
python
|
{
"resource": ""
}
|
q14109
|
Hub.close
|
train
|
def close(self):
"""Close the hub and wait for it to be closed.
This may only be called in the root fiber. After this call returned,
Gruvi cannot be used anymore in the current thread. The main use case
for calling this method is to clean up resources in a multi-threaded
program where you want to exit a thead but not yet the entire process.
"""
if self._loop is None:
return
if fibers.current().parent is not None:
raise RuntimeError('close() may only be called in the root fiber')
elif compat.get_thread_ident() != self._thread:
raise RuntimeError('cannot close() from a different thread')
self._closing = True
self._interrupt_loop()
# Note how we are switching to the Hub without a switchback condition
# being in place. This works because the hub is our child and upon
# a child fiber exit its parent is switched in.
self.switch()
|
python
|
{
"resource": ""
}
|
q14110
|
Hub.switch
|
train
|
def switch(self):
"""Switch to the hub.
This method pauses the current fiber and runs the event loop. The
caller should ensure that it has set up appropriate callbacks so that
it will get scheduled again, preferably using :class:`switch_back`. In
this case then return value of this method will be an ``(args,
kwargs)`` tuple containing the arguments passed to the switch back
instance.
If this method is called from the root fiber then there are two
additional cases. If the hub exited due to a call to :meth:`close`,
then this method returns None. And if the hub exited due to a
exception, that exception is re-raised here.
"""
if self._loop is None or not self.is_alive():
raise RuntimeError('hub is closed/dead')
elif fibers.current() is self:
raise RuntimeError('cannot switch to myself')
elif compat.get_thread_ident() != self._thread:
raise RuntimeError('cannot switch from a different thread')
value = super(Hub, self).switch()
# A fiber exit will cause its parent to be switched to. All fibers in
# the system should be children of the Hub, *except* the Hub itself
# which is a child of the root fiber. So do an explicit check here to
# see if the Hub exited unexpectedly, and if so raise an error.
if fibers.current().parent is None and not self.is_alive() \
and self._loop is not None:
raise RuntimeError('hub exited unexpectedly')
return value
|
python
|
{
"resource": ""
}
|
q14111
|
Hub._run_callbacks
|
train
|
def _run_callbacks(self):
"""Run registered callbacks."""
for i in range(len(self._callbacks)):
callback, args = self._callbacks.popleft()
try:
callback(*args)
except Exception:
self._log.exception('Ignoring exception in callback:')
|
python
|
{
"resource": ""
}
|
q14112
|
Hub.run_callback
|
train
|
def run_callback(self, callback, *args):
"""Queue a callback.
The *callback* will be called with positional arguments *args* in the
next iteration of the event loop. If you add multiple callbacks, they
will be called in the order that you added them. The callback will run
in the Hub's fiber.
This method is thread-safe: it is allowed to queue a callback from a
different thread than the one running the Hub.
"""
if self._loop is None:
raise RuntimeError('hub is closed')
elif not callable(callback):
raise TypeError('"callback": expecting a callable')
self._callbacks.append((callback, args)) # thread-safe
self._interrupt_loop()
|
python
|
{
"resource": ""
}
|
q14113
|
message_info
|
train
|
def message_info(message):
"""Return a string describing a message, for debugging purposes."""
method = message.get('method')
msgid = message.get('id')
error = message.get('error')
if method and msgid is not None:
return 'method call "{}", id = "{}"'.format(method, msgid)
elif method:
return 'notification "{}"'.format(method)
elif error is not None and msgid is not None:
return 'error reply to id = "{}"'.format(msgid)
elif error is not None:
code = error.get('code', '(none)')
return 'error reply: {}'.format(errorcode.get(code, code))
else:
return 'method return for id = "{}"'.format(msgid)
|
python
|
{
"resource": ""
}
|
q14114
|
JsonRpcVersion.next_id
|
train
|
def next_id(self):
"""Return a unique message ID."""
msgid = self._id_template.format(self._next_id)
self._next_id += 1
return msgid
|
python
|
{
"resource": ""
}
|
q14115
|
JsonRpcProtocol.send_message
|
train
|
def send_message(self, message):
"""Send a raw JSON-RPC message.
The *message* argument must be a dictionary containing a valid JSON-RPC
message according to the version passed into the constructor.
"""
if self._error:
raise compat.saved_exc(self._error)
elif self._transport is None:
raise JsonRpcError('not connected')
self._version.check_message(message)
self._writer.write(serialize(message))
|
python
|
{
"resource": ""
}
|
q14116
|
JsonRpcProtocol.call_method
|
train
|
def call_method(self, method, *args):
"""Call a JSON-RPC method and wait for its result.
The *method* is called with positional arguments *args*.
On success, the ``result`` field from the JSON-RPC response is
returned. On error, a :class:`JsonRpcError` is raised, which you can
use to access the ``error`` field of the JSON-RPC response.
"""
message = self._version.create_request(method, args)
msgid = message['id']
try:
with switch_back(self._timeout) as switcher:
self._method_calls[msgid] = switcher
self.send_message(message)
args, _ = self._hub.switch()
finally:
self._method_calls.pop(msgid, None)
response = args[0]
assert response['id'] == msgid
error = response.get('error')
if error is not None:
raise JsonRpcError('error response calling "{}"'.format(method), error)
return response['result']
|
python
|
{
"resource": ""
}
|
q14117
|
JsonRpcProtocol.send_notification
|
train
|
def send_notification(self, method, *args):
"""Send a JSON-RPC notification.
The notification *method* is sent with positional arguments *args*.
"""
message = self._version.create_request(method, args, notification=True)
self.send_message(message)
|
python
|
{
"resource": ""
}
|
q14118
|
JsonRpcProtocol.send_response
|
train
|
def send_response(self, request, result=None, error=None):
"""Respond to a JSON-RPC method call.
This is a response to the message in *request*. If *error* is not
provided, then this is a succesful response, and the value in *result*,
which may be ``None``, is passed back to the client. if *error* is
provided and not ``None`` then an error is sent back. In this case
*error* must be a dictionary as specified by the JSON-RPC spec.
"""
message = self._version.create_response(request, result, error)
self.send_message(message)
|
python
|
{
"resource": ""
}
|
q14119
|
unmarshal
|
train
|
def unmarshal( compoundSignature, data, offset = 0, lendian = True ):
"""
Unmarshals DBus encoded data.
@type compoundSignature: C{string}
@param compoundSignature: DBus signature specifying the encoded value types
@type data: C{string}
@param data: Binary data
@type offset: C{int}
@param offset: Offset within data at which data for compoundSignature
starts (used during recursion)
@type lendian: C{bool}
@param lendian: True if data is encoded in little-endian format
@returns: (number_of_bytes_decoded, list_of_values)
"""
values = list()
start_offset = offset
for ct in genCompleteTypes( compoundSignature ):
tcode = ct[0]
offset += len(pad[tcode]( offset ))
nbytes, value = unmarshallers[ tcode ]( ct, data, offset, lendian )
offset += nbytes
values.append( value )
return offset - start_offset, values
|
python
|
{
"resource": ""
}
|
q14120
|
spawn
|
train
|
def spawn(func, *args, **kwargs):
"""Spawn a new fiber.
A new :class:`Fiber` is created with main function *func* and positional
arguments *args*. The keyword arguments are passed to the :class:`Fiber`
constructor, not to the main function. The fiber is then scheduled to start
by calling its :meth:`~Fiber.start` method.
The fiber instance is returned.
"""
fiber = Fiber(func, args, **kwargs)
fiber.start()
return fiber
|
python
|
{
"resource": ""
}
|
q14121
|
Fiber.start
|
train
|
def start(self):
"""Schedule the fiber to be started in the next iteration of the
event loop."""
target = getattr(self._target, '__qualname__', self._target.__name__)
self._log.debug('starting fiber {}, target {}', self.name, target)
self._hub.run_callback(self.switch)
|
python
|
{
"resource": ""
}
|
q14122
|
Fiber.cancel
|
train
|
def cancel(self, message=None):
"""Schedule the fiber to be cancelled in the next iteration of the
event loop.
Cancellation works by throwing a :class:`~gruvi.Cancelled` exception
into the fiber. If *message* is provided, it will be set as the value
of the exception.
"""
if not self.is_alive():
return
if message is None:
message = 'cancelled by Fiber.cancel()'
self._hub.run_callback(self.throw, Cancelled, Cancelled(message))
|
python
|
{
"resource": ""
}
|
q14123
|
MetaMusic.align_matches
|
train
|
def align_matches(self, matches: list)->Optional[dict]:
"""
Finds hash matches that align in time with other matches and finds
consensus about which hashes are "true" signal from the audio.
Returns a dictionary with match information.
"""
# align by diffs
diff_counter: dict = {}
largest = 0
largest_count = 0
song_id = -1
for sid, diff in matches:
if diff not in diff_counter:
diff_counter[diff] = {}
if sid not in diff_counter[diff]:
diff_counter[diff][sid] = 0
diff_counter[diff][sid] += 1
if diff_counter[diff][sid] > largest_count:
largest = diff
largest_count = diff_counter[diff][sid]
song_id = sid
# extract idenfication
song = database.get_song_by_id(song_id)
if song:
songname = song.song_name
else:
return None
# return match info
nseconds = round(
float(largest) / fingerprint.DEFAULT_FS *
fingerprint.DEFAULT_WINDOW_SIZE * fingerprint.DEFAULT_OVERLAP_RATIO,
5
)
song = {
'song_id': song_id,
'song_name': songname,
MetaMusic.CONFIDENCE: largest_count,
MetaMusic.OFFSET: int(largest),
'offset_seconds': nseconds,
'file_sha1': binascii.hexlify(song.file_sha1).decode('utf-8'),
}
return song
|
python
|
{
"resource": ""
}
|
q14124
|
saddr
|
train
|
def saddr(address):
"""Return a string representation for an address.
The *address* paramater can be a pipe name, an IP address tuple, or a
socket address.
The return value is always a ``str`` instance.
"""
if isinstance(address, six.string_types):
return address
elif isinstance(address, tuple) and len(address) >= 2 and ':' in address[0]:
return '[{}]:{}'.format(address[0], address[1])
elif isinstance(address, tuple) and len(address) >= 2:
return '{}:{}'.format(*address)
else:
raise TypeError('illegal address type: {!s}'.format(type(address)))
|
python
|
{
"resource": ""
}
|
q14125
|
paddr
|
train
|
def paddr(address):
"""Parse a string representation of an address.
This function is the inverse of :func:`saddr`.
"""
if not isinstance(address, six.string_types):
raise TypeError('expecting a string')
if address.startswith('['):
p1 = address.find(']:')
if p1 == -1:
raise ValueError
return (address[1:p1], int(address[p1+2:]))
elif ':' in address:
p1 = address.find(':')
return (address[:p1], int(address[p1+1:]))
else:
return address
|
python
|
{
"resource": ""
}
|
q14126
|
get_codes
|
train
|
def get_codes(s: Union[str, 'ChainedBase']) -> List[str]:
""" Grab all escape codes from a string.
Returns a list of all escape codes.
"""
return codegrabpat.findall(str(s))
|
python
|
{
"resource": ""
}
|
q14127
|
get_indices
|
train
|
def get_indices(s: Union[str, 'ChainedBase']) -> Dict[int, str]:
""" Retrieve a dict of characters and escape codes with their real index
into the string as the key.
"""
codes = get_code_indices(s)
if not codes:
# This function is not for non-escape-code stuff, but okay.
return {i: c for i, c in enumerate(s)}
indices = {}
for codeindex in sorted(codes):
code = codes[codeindex]
if codeindex == 0:
indices[codeindex] = code
continue
# Grab characters before codeindex.
start = max(indices or {0: ''}, key=int)
startcode = indices.get(start, '')
startlen = start + len(startcode)
indices.update({i: s[i] for i in range(startlen, codeindex)})
indices[codeindex] = code
if not indices:
return {i: c for i, c in enumerate(s)}
lastindex = max(indices, key=int)
lastitem = indices[lastindex]
start = lastindex + len(lastitem)
textlen = len(s)
if start < (textlen - 1):
# Grab chars after last code.
indices.update({i: s[i] for i in range(start, textlen)})
return indices
|
python
|
{
"resource": ""
}
|
q14128
|
MultiPoll.remove_callback
|
train
|
def remove_callback(self, handle):
"""Remove a callback."""
if self._poll is None:
raise RuntimeError('poll instance is closed')
remove_callback(self, handle)
if handle.extra & READABLE:
self._readers -= 1
if handle.extra & WRITABLE:
self._writers -= 1
self._sync()
|
python
|
{
"resource": ""
}
|
q14129
|
MultiPoll.update_callback
|
train
|
def update_callback(self, handle, events):
"""Update the event mask for a callback."""
if self._poll is None:
raise RuntimeError('poll instance is closed')
if not has_callback(self, handle):
raise ValueError('no such callback')
if events & ~(READABLE|WRITABLE):
raise ValueError('illegal event mask: {}'.format(events))
if handle.extra == events:
return
if handle.extra & READABLE:
self._readers -= 1
if handle.extra & WRITABLE:
self._writers -= 1
if events & READABLE:
self._readers += 1
if events & WRITABLE:
self._writers += 1
handle.extra = events
self._sync()
|
python
|
{
"resource": ""
}
|
q14130
|
MultiPoll.close
|
train
|
def close(self):
"""Close the poll instance."""
if self._poll is None:
return
self._poll.close()
self._poll = None
self._readers = 0
self._writers = 0
self._events = 0
clear_callbacks(self)
|
python
|
{
"resource": ""
}
|
q14131
|
Poller.update_callback
|
train
|
def update_callback(self, fd, handle, events):
"""Update the event mask associated with an existing callback.
If you want to temporarily disable a callback then you can use this
method with an *events* argument of ``0``. This is more efficient than
removing the callback and adding it again later.
"""
if self._mpoll is None:
raise RuntimeError('Poller instance is closed')
mpoll = self._mpoll.get(fd)
if mpoll is None:
raise ValueError('not watching fd {}'.format(fd))
mpoll.update_callback(handle, events)
|
python
|
{
"resource": ""
}
|
q14132
|
Poller.close
|
train
|
def close(self):
"""Close all active poll instances and remove all callbacks."""
if self._mpoll is None:
return
for mpoll in self._mpoll.values():
mpoll.close()
self._mpoll.clear()
self._mpoll = None
|
python
|
{
"resource": ""
}
|
q14133
|
ClientAuthenticator.authTryNextMethod
|
train
|
def authTryNextMethod(self):
"""
Tries the next authentication method or raises a failure if all mechanisms
have been tried.
"""
if not self.authOrder:
raise DBusAuthenticationFailed()
self.authMech = self.authOrder.pop()
if self.authMech == 'DBUS_COOKIE_SHA1':
self.sendAuthMessage('AUTH ' + self.authMech + ' ' +
hexlify(getpass.getuser()))
elif self.authMech == 'ANONYMOUS':
self.sendAuthMessage('AUTH ' + self.authMech + ' ' +
hexlify("txdbus"))
else:
self.sendAuthMessage('AUTH ' + self.authMech)
|
python
|
{
"resource": ""
}
|
q14134
|
ClientAuthenticator._authGetDBusCookie
|
train
|
def _authGetDBusCookie(self, cookie_context, cookie_id):
"""
Reads the requested cookie_id from the cookie_context file
"""
# XXX Ensure we obtain the correct directory for the
# authenticating user and that that user actually
# owns the keyrings directory
if self.cookie_dir is None:
cookie_dir = os.path.expanduser('~/.dbus-keyrings')
else:
cookie_dir = self.cookie_dir
dstat = os.stat(cookie_dir)
if dstat.st_mode & 0x36: # 066
raise Exception('User keyrings directory is writeable by other users. Aborting authentication')
import pwd
if dstat.st_uid != pwd.getpwuid(os.geteuid()).pw_uid:
raise Exception('Keyrings directory is not owned by the current user. Aborting authentication!')
f = open(os.path.join(cookie_dir, cookie_context), 'r')
try:
for line in f:
try:
k_id, k_time, k_cookie_hex = line.split()
if k_id == cookie_id:
return k_cookie_hex
except:
pass
finally:
f.close()
|
python
|
{
"resource": ""
}
|
q14135
|
add_callback
|
train
|
def add_callback(obj, callback, args=()):
"""Add a callback to an object."""
callbacks = obj._callbacks
node = Node(callback, args)
# Store a single callback directly in _callbacks
if callbacks is None:
obj._callbacks = node
return node
# Otherwise use a dllist.
if not isinstance(callbacks, dllist):
obj._callbacks = dllist()
obj._callbacks.insert(callbacks)
callbacks = obj._callbacks
callbacks.insert(node)
return node
|
python
|
{
"resource": ""
}
|
q14136
|
remove_callback
|
train
|
def remove_callback(obj, handle):
"""Remove a callback from an object."""
callbacks = obj._callbacks
if callbacks is handle:
obj._callbacks = None
elif isinstance(callbacks, dllist):
callbacks.remove(handle)
if not callbacks:
obj._callbacks = None
|
python
|
{
"resource": ""
}
|
q14137
|
has_callback
|
train
|
def has_callback(obj, handle):
"""Return whether a callback is currently registered for an object."""
callbacks = obj._callbacks
if not callbacks:
return False
if isinstance(callbacks, Node):
return handle is callbacks
else:
return handle in callbacks
|
python
|
{
"resource": ""
}
|
q14138
|
pop_callback
|
train
|
def pop_callback(obj):
"""Pop a single callback."""
callbacks = obj._callbacks
if not callbacks:
return
if isinstance(callbacks, Node):
node = callbacks
obj._callbacks = None
else:
node = callbacks.first
callbacks.remove(node)
if not callbacks:
obj._callbacks = None
return node.data, node.extra
|
python
|
{
"resource": ""
}
|
q14139
|
clear_callbacks
|
train
|
def clear_callbacks(obj):
"""Remove all callbacks from an object."""
callbacks = obj._callbacks
if isinstance(callbacks, dllist):
# Help the garbage collector by clearing all links.
callbacks.clear()
obj._callbacks = None
|
python
|
{
"resource": ""
}
|
q14140
|
run_callbacks
|
train
|
def run_callbacks(obj, log=None):
"""Run callbacks."""
def run_callback(callback, args):
return callback(*args)
return walk_callbacks(obj, run_callback, log)
|
python
|
{
"resource": ""
}
|
q14141
|
get_serializer_class
|
train
|
def get_serializer_class(configuration_model):
""" Returns a ConfigurationModel serializer class for the supplied configuration_model. """
class AutoConfigModelSerializer(ModelSerializer):
"""Serializer class for configuration models."""
class Meta(object):
"""Meta information for AutoConfigModelSerializer."""
model = configuration_model
fields = '__all__'
def create(self, validated_data):
if "changed_by_username" in self.context:
model = get_user_model()
validated_data['changed_by'] = model.objects.get(username=self.context["changed_by_username"])
return super(AutoConfigModelSerializer, self).create(validated_data)
return AutoConfigModelSerializer
|
python
|
{
"resource": ""
}
|
q14142
|
deserialize_json
|
train
|
def deserialize_json(stream, username):
"""
Given a stream containing JSON, deserializers the JSON into ConfigurationModel instances.
The stream is expected to be in the following format:
{ "model": "config_models.ExampleConfigurationModel",
"data":
[
{ "enabled": True,
"color": "black"
...
},
{ "enabled": False,
"color": "yellow"
...
},
...
]
}
If the provided stream does not contain valid JSON for the ConfigurationModel specified,
an Exception will be raised.
Arguments:
stream: The stream of JSON, as described above.
username: The username of the user making the change. This must match an existing user.
Returns: the number of created entries
"""
parsed_json = JSONParser().parse(stream)
serializer_class = get_serializer_class(apps.get_model(parsed_json["model"]))
list_serializer = serializer_class(data=parsed_json["data"], context={"changed_by_username": username}, many=True)
if list_serializer.is_valid():
model_class = serializer_class.Meta.model
for data in reversed(list_serializer.validated_data):
if model_class.equal_to_current(data):
list_serializer.validated_data.remove(data)
entries_created = len(list_serializer.validated_data)
list_serializer.save()
return entries_created
else:
raise Exception(list_serializer.error_messages)
|
python
|
{
"resource": ""
}
|
q14143
|
ConfigurationModelAdmin.get_displayable_field_names
|
train
|
def get_displayable_field_names(self):
"""
Return all field names, excluding reverse foreign key relationships.
"""
return [
f.name
for f in self.model._meta.get_fields()
if not f.one_to_many
]
|
python
|
{
"resource": ""
}
|
q14144
|
ConfigurationModelAdmin.revert
|
train
|
def revert(self, request, queryset):
"""
Admin action to revert a configuration back to the selected value
"""
if queryset.count() != 1:
self.message_user(request, _("Please select a single configuration to revert to."))
return
target = queryset[0]
target.id = None
self.save_model(request, target, None, False)
self.message_user(request, _("Reverted configuration."))
return HttpResponseRedirect(
reverse(
'admin:{}_{}_change'.format(
self.model._meta.app_label,
self.model._meta.model_name,
),
args=(target.id,),
)
)
|
python
|
{
"resource": ""
}
|
q14145
|
ShowHistoryFilter.choices
|
train
|
def choices(self, changelist):
""" Returns choices ready to be output in the template. """
show_all = self.used_parameters.get(self.parameter_name) == "1"
return (
{
'display': _('Current Configuration'),
'selected': not show_all,
'query_string': changelist.get_query_string({}, [self.parameter_name]),
},
{
'display': _('All (Show History)'),
'selected': show_all,
'query_string': changelist.get_query_string({self.parameter_name: "1"}, []),
}
)
|
python
|
{
"resource": ""
}
|
q14146
|
KeyedConfigurationModelAdmin.get_queryset
|
train
|
def get_queryset(self, request):
"""
Annote the queryset with an 'is_active' property that's true iff that row is the most
recently added row for that particular set of KEY_FIELDS values.
Filter the queryset to show only is_active rows by default.
"""
if request.GET.get(ShowHistoryFilter.parameter_name) == '1':
queryset = self.model.objects.with_active_flag()
else:
# Show only the most recent row for each key.
queryset = self.model.objects.current_set()
ordering = self.get_ordering(request)
if ordering:
return queryset.order_by(*ordering)
return queryset
|
python
|
{
"resource": ""
}
|
q14147
|
KeyedConfigurationModelAdmin.edit_link
|
train
|
def edit_link(self, inst):
""" Edit link for the change view """
if not inst.is_active:
return u'--'
update_url = reverse('admin:{}_{}_add'.format(self.model._meta.app_label, self.model._meta.model_name))
update_url += "?source={}".format(inst.pk)
return u'<a href="{}">{}</a>'.format(update_url, _('Update'))
|
python
|
{
"resource": ""
}
|
q14148
|
submit_row
|
train
|
def submit_row(context):
"""
Overrides 'django.contrib.admin.templatetags.admin_modify.submit_row'.
Manipulates the context going into that function by hiding all of the buttons
in the submit row if the key `readonly` is set in the context.
"""
ctx = original_submit_row(context)
if context.get('readonly', False):
ctx.update({
'show_delete_link': False,
'show_save_as_new': False,
'show_save_and_add_another': False,
'show_save_and_continue': False,
'show_save': False,
})
else:
return ctx
|
python
|
{
"resource": ""
}
|
q14149
|
ConfigurationModelManager.current_set
|
train
|
def current_set(self):
"""
A queryset for the active configuration entries only. Only useful if KEY_FIELDS is set.
Active means the means recent entries for each unique combination of keys. It does not
necessaryily mean enbled.
"""
assert self.model.KEY_FIELDS != (), "Just use model.current() if there are no KEY_FIELDS"
return self.get_queryset().filter(
pk__in=self._current_ids_subquery()
).annotate(
is_active=models.Value(1, output_field=models.IntegerField())
)
|
python
|
{
"resource": ""
}
|
q14150
|
ConfigurationModelManager.with_active_flag
|
train
|
def with_active_flag(self):
"""
A query set where each result is annotated with an 'is_active' field that indicates
if it's the most recent entry for that combination of keys.
"""
if self.model.KEY_FIELDS:
return self.get_queryset().annotate(
is_active=models.ExpressionWrapper(
models.Q(pk__in=self._current_ids_subquery()),
output_field=models.IntegerField(),
)
)
return self.get_queryset().annotate(
is_active=models.ExpressionWrapper(
models.Q(pk=self.model.current().pk),
output_field=models.IntegerField(),
)
)
|
python
|
{
"resource": ""
}
|
q14151
|
ConfigurationModel.save
|
train
|
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Clear the cached value when saving a new configuration entry
"""
# Always create a new entry, instead of updating an existing model
self.pk = None # pylint: disable=invalid-name
super(ConfigurationModel, self).save(
force_insert,
force_update,
using,
update_fields
)
cache.delete(self.cache_key_name(*[getattr(self, key) for key in self.KEY_FIELDS]))
if self.KEY_FIELDS:
cache.delete(self.key_values_cache_key_name())
|
python
|
{
"resource": ""
}
|
q14152
|
ConfigurationModel.cache_key_name
|
train
|
def cache_key_name(cls, *args):
"""Return the name of the key to use to cache the current configuration"""
if cls.KEY_FIELDS != ():
if len(args) != len(cls.KEY_FIELDS):
raise TypeError(
"cache_key_name() takes exactly {} arguments ({} given)".format(len(cls.KEY_FIELDS), len(args))
)
# pylint: disable=unicode-builtin
return 'configuration/{}/current/{}'.format(cls.__name__, ','.join(six.text_type(arg) for arg in args))
else:
return 'configuration/{}/current'.format(cls.__name__)
|
python
|
{
"resource": ""
}
|
q14153
|
ConfigurationModel.key_values_cache_key_name
|
train
|
def key_values_cache_key_name(cls, *key_fields):
""" Key for fetching unique key values from the cache """
key_fields = key_fields or cls.KEY_FIELDS
return 'configuration/{}/key_values/{}'.format(cls.__name__, ','.join(key_fields))
|
python
|
{
"resource": ""
}
|
q14154
|
ConfigurationModel.fields_equal
|
train
|
def fields_equal(self, instance, fields_to_ignore=("id", "change_date", "changed_by")):
"""
Compares this instance's fields to the supplied instance to test for equality.
This will ignore any fields in `fields_to_ignore`.
Note that this method ignores many-to-many fields.
Args:
instance: the model instance to compare
fields_to_ignore: List of fields that should not be compared for equality. By default
includes `id`, `change_date`, and `changed_by`.
Returns: True if the checked fields are all equivalent, else False
"""
for field in self._meta.get_fields():
if not field.many_to_many and field.name not in fields_to_ignore:
if getattr(instance, field.name) != getattr(self, field.name):
return False
return True
|
python
|
{
"resource": ""
}
|
q14155
|
ConfigurationModel.equal_to_current
|
train
|
def equal_to_current(cls, json, fields_to_ignore=("id", "change_date", "changed_by")):
"""
Compares for equality this instance to a model instance constructed from the supplied JSON.
This will ignore any fields in `fields_to_ignore`.
Note that this method cannot handle fields with many-to-many associations, as those can only
be set on a saved model instance (and saving the model instance will create a new entry).
All many-to-many field entries will be removed before the equality comparison is done.
Args:
json: json representing an entry to compare
fields_to_ignore: List of fields that should not be compared for equality. By default
includes `id`, `change_date`, and `changed_by`.
Returns: True if the checked fields are all equivalent, else False
"""
# Remove many-to-many relationships from json.
# They require an instance to be already saved.
info = model_meta.get_field_info(cls)
for field_name, relation_info in info.relations.items():
if relation_info.to_many and (field_name in json):
json.pop(field_name)
new_instance = cls(**json)
key_field_args = tuple(getattr(new_instance, key) for key in cls.KEY_FIELDS)
current = cls.current(*key_field_args)
# If current.id is None, no entry actually existed and the "current" method created it.
if current.id is not None:
return current.fields_equal(new_instance, fields_to_ignore)
return False
|
python
|
{
"resource": ""
}
|
q14156
|
AtomicMixin.create_atomic_wrapper
|
train
|
def create_atomic_wrapper(cls, wrapped_func):
"""Returns a wrapped function."""
def _create_atomic_wrapper(*args, **kwargs):
"""Actual wrapper."""
# When a view call fails due to a permissions error, it raises an exception.
# An uncaught exception breaks the DB transaction for any following DB operations
# unless it's wrapped in a atomic() decorator or context manager.
with transaction.atomic():
return wrapped_func(*args, **kwargs)
return _create_atomic_wrapper
|
python
|
{
"resource": ""
}
|
q14157
|
AtomicMixin.as_view
|
train
|
def as_view(cls, **initkwargs):
"""Overrides as_view to add atomic transaction."""
view = super(AtomicMixin, cls).as_view(**initkwargs)
return cls.create_atomic_wrapper(view)
|
python
|
{
"resource": ""
}
|
q14158
|
merge
|
train
|
def merge(file, feature_layers):
''' Retrieve a list of OSciMap4 tile responses and merge them into one.
get_tiles() retrieves data and performs basic integrity checks.
'''
tile = VectorTile(extents)
for layer in feature_layers:
tile.addFeatures(layer['features'], layer['name'])
tile.complete()
data = tile.out.SerializeToString()
file.write(struct.pack(">I", len(data)))
file.write(data)
|
python
|
{
"resource": ""
}
|
q14159
|
_make_valid_if_necessary
|
train
|
def _make_valid_if_necessary(shape):
"""
attempt to correct invalid shapes if necessary
After simplification, even when preserving topology, invalid
shapes can be returned. This appears to only occur with polygon
types. As an optimization, we only check if the polygon types are
valid.
"""
if shape.type in ('Polygon', 'MultiPolygon') and not shape.is_valid:
shape = shape.buffer(0)
# return value from buffer is usually valid, but it's
# not clear from the docs whether this is guaranteed,
# so return None if not.
if not shape.is_valid:
return None
return shape
|
python
|
{
"resource": ""
}
|
q14160
|
_accumulate_props
|
train
|
def _accumulate_props(dest_props, src_props):
"""
helper to accumulate a dict of properties
Mutates dest_props by adding the non None src_props and returns
the new size
"""
props_size = 0
if src_props:
for k, v in src_props.items():
if v is not None:
props_size += len(k) + _sizeof(v)
dest_props[k] = v
return props_size
|
python
|
{
"resource": ""
}
|
q14161
|
calculate_sizes_by_zoom
|
train
|
def calculate_sizes_by_zoom(coord, metatile_zoom, cfg_tile_sizes, max_zoom):
"""
Returns a map of nominal zoom to the list of tile sizes to generate at that
zoom.
This is because we want to generate different metatile contents at
different zoom levels. At the most detailed zoom level, we want to generate
the smallest tiles possible, as this allows "overzooming" by simply
extracting the smaller tiles. At the minimum zoom, we want to get as close
as we can to zero nominal zoom by using any "unused" space in the metatile
for larger tile sizes that we're not generating.
For example, with 1x1 metatiles, the tile size is always 256px, and the
function will return {coord.zoom: [256]}
Note that max_zoom should be the maximum *coordinate* zoom, not nominal
zoom.
"""
from tilequeue.tile import metatile_zoom_from_size
tile_size_by_zoom = {}
nominal_zoom = coord.zoom + metatile_zoom
# check that the tile sizes are correct and within range.
for tile_size in cfg_tile_sizes:
assert tile_size >= 256
assert tile_size <= 256 * (1 << metatile_zoom)
assert _is_power_of_2(tile_size)
if coord.zoom >= max_zoom:
# all the tile_sizes down to 256 at the nominal zoom.
tile_sizes = []
tile_sizes.extend(cfg_tile_sizes)
lowest_tile_size = min(tile_sizes)
while lowest_tile_size > 256:
lowest_tile_size //= 2
tile_sizes.append(lowest_tile_size)
tile_size_by_zoom[nominal_zoom] = tile_sizes
elif coord.zoom <= 0:
# the tile_sizes, plus max(tile_sizes) size at nominal zooms decreasing
# down to 0 (or as close as we can get)
tile_size_by_zoom[nominal_zoom] = cfg_tile_sizes
max_tile_size = max(cfg_tile_sizes)
max_tile_zoom = metatile_zoom_from_size(max_tile_size // 256)
assert max_tile_zoom <= metatile_zoom
for delta in range(0, metatile_zoom - max_tile_zoom):
z = nominal_zoom - (delta + 1)
tile_size_by_zoom[z] = [max_tile_size]
else:
# the tile_sizes at nominal zoom only.
tile_size_by_zoom[nominal_zoom] = cfg_tile_sizes
return tile_size_by_zoom
|
python
|
{
"resource": ""
}
|
q14162
|
calculate_cut_coords_by_zoom
|
train
|
def calculate_cut_coords_by_zoom(
coord, metatile_zoom, cfg_tile_sizes, max_zoom):
"""
Returns a map of nominal zoom to the list of cut coordinates at that
nominal zoom.
Note that max_zoom should be the maximum coordinate zoom, not nominal
zoom.
"""
tile_sizes_by_zoom = calculate_sizes_by_zoom(
coord, metatile_zoom, cfg_tile_sizes, max_zoom)
cut_coords_by_zoom = {}
for nominal_zoom, tile_sizes in tile_sizes_by_zoom.iteritems():
cut_coords = []
for tile_size in tile_sizes:
cut_coords.extend(metatile_children_with_size(
coord, metatile_zoom, nominal_zoom, tile_size))
cut_coords_by_zoom[nominal_zoom] = cut_coords
return cut_coords_by_zoom
|
python
|
{
"resource": ""
}
|
q14163
|
tiles_are_equal
|
train
|
def tiles_are_equal(tile_data_1, tile_data_2, fmt):
"""
Returns True if the tile data is equal in tile_data_1 and tile_data_2. For
most formats, this is a simple byte-wise equality check. For zipped
metatiles, we need to check the contents, as the zip format includes
metadata such as timestamps and doesn't control file ordering.
"""
if fmt and fmt == zip_format:
return metatiles_are_equal(tile_data_1, tile_data_2)
else:
return tile_data_1 == tile_data_2
|
python
|
{
"resource": ""
}
|
q14164
|
write_tile_if_changed
|
train
|
def write_tile_if_changed(store, tile_data, coord, format):
"""
Only write tile data if different from existing.
Try to read the tile data from the store first. If the existing
data matches, don't write. Returns whether the tile was written.
"""
existing_data = store.read_tile(coord, format)
if not existing_data or \
not tiles_are_equal(existing_data, tile_data, format):
store.write_tile(tile_data, coord, format)
return True
else:
return False
|
python
|
{
"resource": ""
}
|
q14165
|
_override_cfg
|
train
|
def _override_cfg(container, yamlkeys, value):
"""
Override a hierarchical key in the config, setting it to the value.
Note that yamlkeys should be a non-empty list of strings.
"""
key = yamlkeys[0]
rest = yamlkeys[1:]
if len(rest) == 0:
# no rest means we found the key to update.
container[key] = value
elif key in container:
# still need to find the leaf in the tree, so recurse.
_override_cfg(container[key], rest, value)
else:
# need to create a sub-tree down to the leaf to insert into.
subtree = {}
_override_cfg(subtree, rest, value)
container[key] = subtree
|
python
|
{
"resource": ""
}
|
q14166
|
coord_pyramid
|
train
|
def coord_pyramid(coord, zoom_start, zoom_stop):
"""
generate full pyramid for coord
Generate the full pyramid for a single coordinate. Note that zoom_stop is
exclusive.
"""
if zoom_start <= coord.zoom:
yield coord
for child_coord in coord_children_range(coord, zoom_stop):
if zoom_start <= child_coord.zoom:
yield child_coord
|
python
|
{
"resource": ""
}
|
q14167
|
coord_pyramids
|
train
|
def coord_pyramids(coords, zoom_start, zoom_stop):
"""
generate full pyramid for coords
Generate the full pyramid for the list of coords. Note that zoom_stop is
exclusive.
"""
for coord in coords:
for child in coord_pyramid(coord, zoom_start, zoom_stop):
yield child
|
python
|
{
"resource": ""
}
|
q14168
|
tilequeue_enqueue_full_pyramid_from_toi
|
train
|
def tilequeue_enqueue_full_pyramid_from_toi(cfg, peripherals, args):
"""enqueue a full pyramid from the z10 toi"""
logger = make_logger(cfg, 'enqueue_tiles_of_interest')
logger.info('Enqueueing tiles of interest')
logger.info('Fetching tiles of interest ...')
tiles_of_interest = peripherals.toi.fetch_tiles_of_interest()
n_toi = len(tiles_of_interest)
logger.info('Fetching tiles of interest ... done')
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml, 'Missing rawr yaml'
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom, 'Missing rawr group-zoom'
assert isinstance(group_by_zoom, int), 'Invalid rawr group-zoom'
if args.zoom_start is None:
zoom_start = group_by_zoom
else:
zoom_start = args.zoom_start
if args.zoom_stop is None:
zoom_stop = cfg.max_zoom + 1 # +1 because exclusive
else:
zoom_stop = args.zoom_stop
assert zoom_start >= group_by_zoom
assert zoom_stop > zoom_start
ungrouped = []
coords_at_group_zoom = set()
for coord_int in tiles_of_interest:
coord = coord_unmarshall_int(coord_int)
if coord.zoom < zoom_start:
ungrouped.append(coord)
if coord.zoom >= group_by_zoom:
coord_at_group_zoom = coord.zoomTo(group_by_zoom).container()
coords_at_group_zoom.add(coord_at_group_zoom)
pyramids = coord_pyramids(coords_at_group_zoom, zoom_start, zoom_stop)
coords_to_enqueue = chain(ungrouped, pyramids)
queue_writer = peripherals.queue_writer
n_queued, n_in_flight = queue_writer.enqueue_batch(coords_to_enqueue)
logger.info('%d enqueued - %d in flight' % (n_queued, n_in_flight))
logger.info('%d tiles of interest processed' % n_toi)
|
python
|
{
"resource": ""
}
|
q14169
|
tilequeue_enqueue_random_pyramids
|
train
|
def tilequeue_enqueue_random_pyramids(cfg, peripherals, args):
"""enqueue random pyramids"""
from tilequeue.stats import RawrTileEnqueueStatsHandler
from tilequeue.rawr import make_rawr_enqueuer_from_cfg
logger = make_logger(cfg, 'enqueue_random_pyramids')
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml, 'Missing rawr yaml'
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom, 'Missing rawr group-zoom'
assert isinstance(group_by_zoom, int), 'Invalid rawr group-zoom'
if args.zoom_start is None:
zoom_start = group_by_zoom
else:
zoom_start = args.zoom_start
if args.zoom_stop is None:
zoom_stop = cfg.max_zoom + 1 # +1 because exclusive
else:
zoom_stop = args.zoom_stop
assert zoom_start >= group_by_zoom
assert zoom_stop > zoom_start
gridsize = args.gridsize
total_samples = getattr(args, 'n-samples')
samples_per_cell = total_samples / (gridsize * gridsize)
tileset_dim = 2 ** group_by_zoom
scale_factor = float(tileset_dim) / float(gridsize)
stats = make_statsd_client_from_cfg(cfg)
stats_handler = RawrTileEnqueueStatsHandler(stats)
rawr_enqueuer = make_rawr_enqueuer_from_cfg(
cfg, logger, stats_handler, peripherals.msg_marshaller)
for grid_y in xrange(gridsize):
tile_y_min = int(grid_y * scale_factor)
tile_y_max = int((grid_y+1) * scale_factor)
for grid_x in xrange(gridsize):
tile_x_min = int(grid_x * scale_factor)
tile_x_max = int((grid_x+1) * scale_factor)
cell_samples = set()
for i in xrange(samples_per_cell):
while True:
rand_x = randrange(tile_x_min, tile_x_max)
rand_y = randrange(tile_y_min, tile_y_max)
sample = rand_x, rand_y
if sample in cell_samples:
continue
cell_samples.add(sample)
break
# enqueue a cell at a time
# the queue mapper expects to be able to read the entirety of the
# input into memory first
for x, y in cell_samples:
coord = Coordinate(zoom=group_by_zoom, column=x, row=y)
pyramid = coord_pyramid(coord, zoom_start, zoom_stop)
rawr_enqueuer(pyramid)
|
python
|
{
"resource": ""
}
|
q14170
|
emit_toi_stats
|
train
|
def emit_toi_stats(toi_set, peripherals):
"""
Calculates new TOI stats and emits them via statsd.
"""
count_by_zoom = defaultdict(int)
total = 0
for coord_int in toi_set:
coord = coord_unmarshall_int(coord_int)
count_by_zoom[coord.zoom] += 1
total += 1
peripherals.stats.gauge('tiles-of-interest.count', total)
for zoom, count in count_by_zoom.items():
peripherals.stats.gauge(
'tiles-of-interest.by-zoom.z{:02d}'.format(zoom),
count
)
|
python
|
{
"resource": ""
}
|
q14171
|
tilequeue_stuck_tiles
|
train
|
def tilequeue_stuck_tiles(cfg, peripherals):
"""
Check which files exist on s3 but are not in toi.
"""
store = _make_store(cfg)
format = lookup_format_by_extension('zip')
layer = 'all'
assert peripherals.toi, 'Missing toi'
toi = peripherals.toi.fetch_tiles_of_interest()
for coord in store.list_tiles(format, layer):
coord_int = coord_marshall_int(coord)
if coord_int not in toi:
print serialize_coord(coord)
|
python
|
{
"resource": ""
}
|
q14172
|
tilequeue_tile_status
|
train
|
def tilequeue_tile_status(cfg, peripherals, args):
"""
Report the status of the given tiles in the store, queue and TOI.
"""
logger = make_logger(cfg, 'tile_status')
# friendly warning to avoid confusion when this command outputs nothing
# at all when called with no positional arguments.
if not args.coords:
logger.warning('No coordinates given on the command line.')
return
# pre-load TOI to avoid having to do it for each coordinate
toi = None
if peripherals.toi:
toi = peripherals.toi.fetch_tiles_of_interest()
# TODO: make these configurable!
tile_format = lookup_format_by_extension('zip')
store = _make_store(cfg)
for coord_str in args.coords:
coord = deserialize_coord(coord_str)
# input checking! make sure that the coordinate is okay to use in
# the rest of the code.
if not coord:
logger.warning('Could not deserialize %r as coordinate', coord_str)
continue
if not coord_is_valid(coord):
logger.warning('Coordinate is not valid: %r (parsed from %r)',
coord, coord_str)
continue
# now we think we probably have a valid coordinate. go look up
# whether it exists in various places.
logger.info("=== %s ===", coord_str)
coord_int = coord_marshall_int(coord)
if peripherals.inflight_mgr:
is_inflight = peripherals.inflight_mgr.is_inflight(coord)
logger.info('inflight: %r', is_inflight)
if toi:
in_toi = coord_int in toi
logger.info('in TOI: %r' % (in_toi,))
data = store.read_tile(coord, tile_format)
logger.info('tile in store: %r', bool(data))
|
python
|
{
"resource": ""
}
|
q14173
|
tilequeue_rawr_enqueue
|
train
|
def tilequeue_rawr_enqueue(cfg, args):
"""command to take tile expiry path and enqueue for rawr tile generation"""
from tilequeue.stats import RawrTileEnqueueStatsHandler
from tilequeue.rawr import make_rawr_enqueuer_from_cfg
msg_marshall_yaml = cfg.yml.get('message-marshall')
assert msg_marshall_yaml, 'Missing message-marshall config'
msg_marshaller = make_message_marshaller(msg_marshall_yaml)
logger = make_logger(cfg, 'rawr_enqueue')
stats = make_statsd_client_from_cfg(cfg)
stats_handler = RawrTileEnqueueStatsHandler(stats)
rawr_enqueuer = make_rawr_enqueuer_from_cfg(
cfg, logger, stats_handler, msg_marshaller)
with open(args.expiry_path) as fh:
coords = create_coords_generator_from_tiles_file(fh)
rawr_enqueuer(coords)
|
python
|
{
"resource": ""
}
|
q14174
|
_tilequeue_rawr_setup
|
train
|
def _tilequeue_rawr_setup(cfg):
"""command to read from rawr queue and generate rawr tiles"""
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml is not None, 'Missing rawr configuration in yaml'
rawr_postgresql_yaml = rawr_yaml.get('postgresql')
assert rawr_postgresql_yaml, 'Missing rawr postgresql config'
from raw_tiles.formatter.msgpack import Msgpack
from raw_tiles.gen import RawrGenerator
from raw_tiles.source.conn import ConnectionContextManager
from raw_tiles.source import parse_sources
from raw_tiles.source import DEFAULT_SOURCES as DEFAULT_RAWR_SOURCES
from tilequeue.rawr import RawrS3Sink
from tilequeue.rawr import RawrStoreSink
import boto3
# pass through the postgresql yaml config directly
conn_ctx = ConnectionContextManager(rawr_postgresql_yaml)
rawr_source_list = rawr_yaml.get('sources', DEFAULT_RAWR_SOURCES)
assert isinstance(rawr_source_list, list), \
'RAWR source list should be a list'
assert len(rawr_source_list) > 0, \
'RAWR source list should be non-empty'
rawr_store = rawr_yaml.get('store')
if rawr_store:
store = make_store(
rawr_store, credentials=cfg.subtree('aws credentials'))
rawr_sink = RawrStoreSink(store)
else:
rawr_sink_yaml = rawr_yaml.get('sink')
assert rawr_sink_yaml, 'Missing rawr sink config'
sink_type = rawr_sink_yaml.get('type')
assert sink_type, 'Missing rawr sink type'
if sink_type == 's3':
s3_cfg = rawr_sink_yaml.get('s3')
assert s3_cfg, 'Missing s3 config'
bucket = s3_cfg.get('bucket')
assert bucket, 'Missing rawr sink bucket'
sink_region = s3_cfg.get('region')
assert sink_region, 'Missing rawr sink region'
prefix = s3_cfg.get('prefix')
assert prefix, 'Missing rawr sink prefix'
extension = s3_cfg.get('extension')
assert extension, 'Missing rawr sink extension'
tags = s3_cfg.get('tags')
from tilequeue.store import make_s3_tile_key_generator
tile_key_gen = make_s3_tile_key_generator(s3_cfg)
s3_client = boto3.client('s3', region_name=sink_region)
rawr_sink = RawrS3Sink(
s3_client, bucket, prefix, extension, tile_key_gen, tags)
elif sink_type == 'none':
from tilequeue.rawr import RawrNullSink
rawr_sink = RawrNullSink()
else:
assert 0, 'Unknown rawr sink type %s' % sink_type
rawr_source = parse_sources(rawr_source_list)
rawr_formatter = Msgpack()
rawr_gen = RawrGenerator(rawr_source, rawr_formatter, rawr_sink)
return rawr_gen, conn_ctx
|
python
|
{
"resource": ""
}
|
q14175
|
tilequeue_rawr_seed_toi
|
train
|
def tilequeue_rawr_seed_toi(cfg, peripherals):
"""command to read the toi and enqueue the corresponding rawr tiles"""
tiles_of_interest = peripherals.toi.fetch_tiles_of_interest()
coords = map(coord_unmarshall_int, tiles_of_interest)
_tilequeue_rawr_seed(cfg, peripherals, coords)
|
python
|
{
"resource": ""
}
|
q14176
|
tilequeue_rawr_seed_all
|
train
|
def tilequeue_rawr_seed_all(cfg, peripherals):
"""command to enqueue all the tiles at the group-by zoom"""
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml is not None, 'Missing rawr configuration in yaml'
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom is not None, 'Missing group-zoom rawr config'
max_coord = 2 ** group_by_zoom
# creating the list of all coordinates here might be a lot of memory, but
# if we handle the TOI okay then we should be okay with z10. if the group
# by zoom is much larger, then it might start running into problems.
coords = []
for x in xrange(0, max_coord):
for y in xrange(0, max_coord):
coords.append(Coordinate(zoom=group_by_zoom, column=x, row=y))
_tilequeue_rawr_seed(cfg, peripherals, coords)
|
python
|
{
"resource": ""
}
|
q14177
|
update_arc_indexes
|
train
|
def update_arc_indexes(geometry, merged_arcs, old_arcs):
""" Updated geometry arc indexes, and add arcs to merged_arcs along the way.
Arguments are modified in-place, and nothing is returned.
"""
if geometry['type'] in ('Point', 'MultiPoint'):
return
elif geometry['type'] == 'LineString':
for arc_index, old_arc in enumerate(geometry['arcs']):
geometry['arcs'][arc_index] = len(merged_arcs)
merged_arcs.append(old_arcs[old_arc])
elif geometry['type'] == 'Polygon':
for ring in geometry['arcs']:
for arc_index, old_arc in enumerate(ring):
ring[arc_index] = len(merged_arcs)
merged_arcs.append(old_arcs[old_arc])
elif geometry['type'] == 'MultiLineString':
for part in geometry['arcs']:
for arc_index, old_arc in enumerate(part):
part[arc_index] = len(merged_arcs)
merged_arcs.append(old_arcs[old_arc])
elif geometry['type'] == 'MultiPolygon':
for part in geometry['arcs']:
for ring in part:
for arc_index, old_arc in enumerate(ring):
ring[arc_index] = len(merged_arcs)
merged_arcs.append(old_arcs[old_arc])
else:
raise NotImplementedError("Can't do %s geometries" % geometry['type'])
|
python
|
{
"resource": ""
}
|
q14178
|
get_transform
|
train
|
def get_transform(bounds, size=4096):
""" Return a TopoJSON transform dictionary and a point-transforming function.
Size is the tile size in pixels and sets the implicit output
resolution.
"""
tx, ty = bounds[0], bounds[1]
sx, sy = (bounds[2] - bounds[0]) / size, (bounds[3] - bounds[1]) / size
def forward(lon, lat):
""" Transform a longitude and latitude to TopoJSON integer space.
"""
return int(round((lon - tx) / sx)), int(round((lat - ty) / sy))
return dict(translate=(tx, ty), scale=(sx, sy)), forward
|
python
|
{
"resource": ""
}
|
q14179
|
diff_encode
|
train
|
def diff_encode(line, transform):
""" Differentially encode a shapely linestring or ring.
"""
coords = [transform(x, y) for (x, y) in line.coords]
pairs = zip(coords[:], coords[1:])
diffs = [(x2 - x1, y2 - y1) for ((x1, y1), (x2, y2)) in pairs]
return coords[:1] + [(x, y) for (x, y) in diffs if (x, y) != (0, 0)]
|
python
|
{
"resource": ""
}
|
q14180
|
jinja_filter_bbox_overlaps
|
train
|
def jinja_filter_bbox_overlaps(bounds, geometry_col_name, srid=3857):
"""
Check whether the boundary of the geometry intersects with the bounding
box.
Note that the usual meaning of "overlaps" in GIS terminology is that the
boundaries of the box and polygon intersect, but not the interiors. This
means that if the box or polygon is completely within the other, then
st_overlaps will be false.
However, that's not what we want. This is used for boundary testing, and
while we don't want to pull out a whole country boundary if the bounding
box is fully within it, we _do_ want to if the country boundary is within
the bounding box.
Therefore, this test has an extra "or st_contains" test to also pull in any
boundaries which are completely within the bounding box.
"""
min_point = 'ST_MakePoint(%.12f, %.12f)' % (bounds[0], bounds[1])
max_point = 'ST_MakePoint(%.12f, %.12f)' % (bounds[2], bounds[3])
bbox_no_srid = 'ST_MakeBox2D(%s, %s)' % (min_point, max_point)
bbox = 'ST_SetSrid(%s, %d)' % (bbox_no_srid, srid)
bbox_filter = \
'((%(col)s && %(bbox)s) AND (' \
' st_overlaps(%(col)s, %(bbox)s) OR' \
' st_contains(%(bbox)s, %(col)s)' \
'))' \
% dict(col=geometry_col_name, bbox=bbox)
return bbox_filter
|
python
|
{
"resource": ""
}
|
q14181
|
make_db_data_fetcher
|
train
|
def make_db_data_fetcher(postgresql_conn_info, template_path, reload_templates,
query_cfg, io_pool):
"""
Returns an object which is callable with the zoom and unpadded bounds and
which returns a list of rows.
"""
sources = parse_source_data(query_cfg)
queries_generator = make_queries_generator(
sources, template_path, reload_templates)
return DataFetcher(
postgresql_conn_info, queries_generator, io_pool)
|
python
|
{
"resource": ""
}
|
q14182
|
common_parent
|
train
|
def common_parent(a, b):
"""
Find the common parent tile of both a and b. The common parent is the tile
at the highest zoom which both a and b can be transformed into by lowering
their zoom levels.
"""
if a.zoom < b.zoom:
b = b.zoomTo(a.zoom).container()
elif a.zoom > b.zoom:
a = a.zoomTo(b.zoom).container()
while a.row != b.row or a.column != b.column:
a = a.zoomBy(-1).container()
b = b.zoomBy(-1).container()
# by this point a == b.
return a
|
python
|
{
"resource": ""
}
|
q14183
|
_parent_tile
|
train
|
def _parent_tile(tiles):
"""
Find the common parent tile for a sequence of tiles.
"""
parent = None
for t in tiles:
if parent is None:
parent = t
else:
parent = common_parent(parent, t)
return parent
|
python
|
{
"resource": ""
}
|
q14184
|
make_metatiles
|
train
|
def make_metatiles(size, tiles, date_time=None):
"""
Group by layers, and make metatiles out of all the tiles which share those
properties relative to the "top level" tile which is parent of them all.
Provide a 6-tuple date_time to set the timestamp on each tile within the
metatile, or leave it as None to use the current time.
"""
groups = defaultdict(list)
for tile in tiles:
key = tile['layer']
groups[key].append(tile)
metatiles = []
for group in groups.itervalues():
parent = _parent_tile(t['coord'] for t in group)
metatiles.extend(make_multi_metatile(parent, group, date_time))
return metatiles
|
python
|
{
"resource": ""
}
|
q14185
|
_metatile_contents_equal
|
train
|
def _metatile_contents_equal(zip_1, zip_2):
"""
Given two open zip files as arguments, this returns True if the zips
both contain the same set of files, having the same names, and each
file within the zip is byte-wise identical to the one with the same
name in the other zip.
"""
names_1 = set(zip_1.namelist())
names_2 = set(zip_2.namelist())
if names_1 != names_2:
return False
for n in names_1:
bytes_1 = zip_1.read(n)
bytes_2 = zip_2.read(n)
if bytes_1 != bytes_2:
return False
return True
|
python
|
{
"resource": ""
}
|
q14186
|
metatiles_are_equal
|
train
|
def metatiles_are_equal(tile_data_1, tile_data_2):
"""
Return True if the two tiles are both zipped metatiles and contain the
same set of files with the same contents. This ignores the timestamp of
the individual files in the zip files, as well as their order or any
other metadata.
"""
try:
buf_1 = StringIO.StringIO(tile_data_1)
buf_2 = StringIO.StringIO(tile_data_2)
with zipfile.ZipFile(buf_1, mode='r') as zip_1:
with zipfile.ZipFile(buf_2, mode='r') as zip_2:
return _metatile_contents_equal(zip_1, zip_2)
except (StandardError, zipfile.BadZipFile, zipfile.LargeZipFile):
# errors, such as files not being proper zip files, or missing
# some attributes or contents that we expect, are treated as not
# equal.
pass
return False
|
python
|
{
"resource": ""
}
|
q14187
|
make_coord_dict
|
train
|
def make_coord_dict(coord):
"""helper function to make a dict from a coordinate for logging"""
return dict(
z=int_if_exact(coord.zoom),
x=int_if_exact(coord.column),
y=int_if_exact(coord.row),
)
|
python
|
{
"resource": ""
}
|
q14188
|
convert_feature_layers_to_dict
|
train
|
def convert_feature_layers_to_dict(feature_layers):
"""takes a list of 'feature_layer' objects and converts to a dict
keyed by the layer name"""
features_by_layer = {}
for feature_layer in feature_layers:
layer_name = feature_layer['name']
features = feature_layer['features']
features_by_layer[layer_name] = features
return features_by_layer
|
python
|
{
"resource": ""
}
|
q14189
|
ZoomRangeAndZoomGroupQueueMapper.group
|
train
|
def group(self, coords):
"""return CoordGroups that can be used to send to queues
Each CoordGroup represents a message that can be sent to a
particular queue, stamped with the queue_id. The list of
coords, which can be 1, is what should get used for the
payload for each queue message.
"""
groups = []
for i in range(len(self.zoom_range_items)):
groups.append([])
# first group the coordinates based on their queue
for coord in coords:
for i, zri in enumerate(self.zoom_range_items):
toi_match = zri.in_toi is None or \
(coord in self.toi_set) == zri.in_toi
if zri.start <= coord.zoom < zri.end and toi_match:
groups[i].append(coord)
break
# now, we need to just verify that for each particular group,
# should they be further grouped, eg by a particular zoom 10
# tile
for i, zri in enumerate(self.zoom_range_items):
group = groups[i]
if not group:
continue
if zri.group_by_zoom is None:
for coord in group:
yield CoordGroup([coord], zri.queue_id)
else:
by_parent_coords = defaultdict(list)
for coord in group:
if coord.zoom >= zri.group_by_zoom:
group_coord = coord.zoomTo(zri.group_by_zoom)
group_key = coord_marshall_int(group_coord)
by_parent_coords[group_key].append(coord)
else:
# this means that a coordinate belonged to a
# particular queue but the zoom was lower than
# the group by zoom
# this probably shouldn't happen
# should it be an assert instead?
yield CoordGroup([coord], zri.queue_id)
for group_key, coords in by_parent_coords.iteritems():
yield CoordGroup(coords, zri.queue_id)
|
python
|
{
"resource": ""
}
|
q14190
|
common_parent
|
train
|
def common_parent(coords, parent_zoom):
"""
Return the common parent for coords
Also check that all coords do indeed share the same parent coordinate.
"""
parent = None
for coord in coords:
assert parent_zoom <= coord.zoom
coord_parent = coord.zoomTo(parent_zoom).container()
if parent is None:
parent = coord_parent
else:
assert parent == coord_parent
assert parent is not None, 'No coords?'
return parent
|
python
|
{
"resource": ""
}
|
q14191
|
convert_coord_object
|
train
|
def convert_coord_object(coord):
"""Convert ModestMaps.Core.Coordinate -> raw_tiles.tile.Tile"""
assert isinstance(coord, Coordinate)
coord = coord.container()
return Tile(int(coord.zoom), int(coord.column), int(coord.row))
|
python
|
{
"resource": ""
}
|
q14192
|
unconvert_coord_object
|
train
|
def unconvert_coord_object(tile):
"""Convert rawr_tiles.tile.Tile -> ModestMaps.Core.Coordinate"""
assert isinstance(tile, Tile)
return Coordinate(zoom=tile.z, column=tile.x, row=tile.y)
|
python
|
{
"resource": ""
}
|
q14193
|
make_rawr_zip_payload
|
train
|
def make_rawr_zip_payload(rawr_tile, date_time=None):
"""make a zip file from the rawr tile formatted data"""
if date_time is None:
date_time = gmtime()[0:6]
buf = StringIO()
with zipfile.ZipFile(buf, mode='w') as z:
for fmt_data in rawr_tile.all_formatted_data:
zip_info = zipfile.ZipInfo(fmt_data.name, date_time)
z.writestr(zip_info, fmt_data.data, zipfile.ZIP_DEFLATED)
return buf.getvalue()
|
python
|
{
"resource": ""
}
|
q14194
|
unpack_rawr_zip_payload
|
train
|
def unpack_rawr_zip_payload(table_sources, payload):
"""unpack a zipfile and turn it into a callable "tables" object."""
# the io we get from S3 is streaming, so we can't seek on it, but zipfile
# seems to require that. so we buffer it all in memory. RAWR tiles are
# generally up to around 100MB in size, which should be safe to store in
# RAM.
from tilequeue.query.common import Table
from io import BytesIO
zfh = zipfile.ZipFile(BytesIO(payload), 'r')
def get_table(table_name):
# need to extract the whole compressed file from zip reader, as it
# doesn't support .tell() on the filelike, which gzip requires.
data = zfh.open(table_name, 'r').read()
unpacker = Unpacker(file_like=BytesIO(data))
source = table_sources[table_name]
return Table(source, unpacker)
return get_table
|
python
|
{
"resource": ""
}
|
q14195
|
SqsQueue.send
|
train
|
def send(self, payloads, logger, num_tries=5):
"""
Enqueue payloads to the SQS queue, retrying failed messages with
exponential backoff.
"""
from time import sleep
backoff_interval = 1
backoff_factor = 2
for try_counter in xrange(0, num_tries):
failed_messages = self.send_without_retry(payloads)
# success!
if not failed_messages:
payloads = []
break
# output some information about the failures for debugging
# purposes. we expect failures to be quite rare, so we can be
# pretty verbose.
if logger:
for msg in failed_messages:
logger.warning("Failed to send message on try %d: Id=%r, "
"SenderFault=%r, Code=%r, Message=%r" %
(try_counter, msg['Id'],
msg.get('SenderFault'), msg.get('Code'),
msg.get('Message')))
# wait a little while, in case the problem is that we're talking
# too fast.
sleep(backoff_interval)
backoff_interval *= backoff_factor
# filter out the failed payloads for retry
retry_payloads = []
for msg in failed_messages:
i = int(msg['Id'])
retry_payloads.append(payloads[i])
payloads = retry_payloads
if payloads:
raise Exception('Messages failed to send to sqs after %d '
'retries: %s' % (num_tries, len(payloads)))
|
python
|
{
"resource": ""
}
|
q14196
|
SqsQueue.read
|
train
|
def read(self):
"""read a single message from the queue"""
resp = self.sqs_client.receive_message(
QueueUrl=self.queue_url,
MaxNumberOfMessages=1,
AttributeNames=('SentTimestamp',),
WaitTimeSeconds=self.recv_wait_time_seconds,
)
if resp['ResponseMetadata']['HTTPStatusCode'] != 200:
raise Exception('Invalid status code from sqs: %s' %
resp['ResponseMetadata']['HTTPStatusCode'])
msgs = resp.get('Messages')
if not msgs:
return None
assert len(msgs) == 1
msg = msgs[0]
payload = msg['Body']
handle = msg['ReceiptHandle']
timestamp = msg['Attributes']['SentTimestamp']
metadata = dict(timestamp=timestamp)
msg_handle = MessageHandle(handle, payload, metadata)
return msg_handle
|
python
|
{
"resource": ""
}
|
q14197
|
SqsQueue.done
|
train
|
def done(self, msg_handle):
"""acknowledge completion of message"""
self.sqs_client.delete_message(
QueueUrl=self.queue_url,
ReceiptHandle=msg_handle.handle,
)
|
python
|
{
"resource": ""
}
|
q14198
|
RawrToiIntersector.tiles_of_interest
|
train
|
def tiles_of_interest(self):
"""conditionally get the toi from s3"""
# also return back whether the response was cached
# useful for metrics
is_cached = False
get_options = dict(
Bucket=self.bucket,
Key=self.key,
)
if self.etag:
get_options['IfNoneMatch'] = self.etag
try:
resp = self.s3_client.get_object(**get_options)
except Exception as e:
# boto3 client treats 304 responses as exceptions
if isinstance(e, ClientError):
resp = getattr(e, 'response', None)
assert resp
else:
raise e
status_code = resp['ResponseMetadata']['HTTPStatusCode']
if status_code == 304:
assert self.prev_toi
toi = self.prev_toi
is_cached = True
elif status_code == 200:
body = resp['Body']
try:
gzip_payload = body.read()
finally:
try:
body.close()
except Exception:
pass
gzip_file_obj = StringIO(gzip_payload)
toi = load_set_from_gzipped_fp(gzip_file_obj)
self.prev_toi = toi
self.etag = resp['ETag']
else:
assert 0, 'Unknown status code from toi get: %s' % status_code
return toi, is_cached
|
python
|
{
"resource": ""
}
|
q14199
|
_ack_coord_handle
|
train
|
def _ack_coord_handle(
coord, coord_handle, queue_mapper, msg_tracker, timing_state,
tile_proc_logger, stats_handler):
"""share code for acknowledging a coordinate"""
# returns tuple of (handle, error), either of which can be None
track_result = msg_tracker.done(coord_handle)
queue_handle = track_result.queue_handle
if not queue_handle:
return None, None
tile_queue = queue_mapper.get_queue(queue_handle.queue_id)
assert tile_queue, \
'Missing tile_queue: %s' % queue_handle.queue_id
parent_tile = None
if track_result.all_done:
parent_tile = track_result.parent_tile
try:
tile_queue.job_done(queue_handle.handle)
except Exception as e:
stacktrace = format_stacktrace_one_line()
tile_proc_logger.error_job_done(
'tile_queue.job_done', e, stacktrace,
coord, parent_tile,
)
return queue_handle, e
if parent_tile is not None:
# we completed a tile pyramid and should log appropriately
start_time = timing_state['start']
stop_time = convert_seconds_to_millis(time.time())
tile_proc_logger.log_processed_pyramid(
parent_tile, start_time, stop_time)
stats_handler.processed_pyramid(
parent_tile, start_time, stop_time)
else:
try:
tile_queue.job_progress(queue_handle.handle)
except Exception as e:
stacktrace = format_stacktrace_one_line()
err_details = {"queue_handle": queue_handle.handle}
if isinstance(e, JobProgressException):
err_details = e.err_details
tile_proc_logger.error_job_progress(
'tile_queue.job_progress', e, stacktrace,
coord, parent_tile, err_details,
)
return queue_handle, e
return queue_handle, None
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.