_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q40000 | validate_nonce | train | def validate_nonce(nonce, secret):
'''
Is the nonce one that was generated by this library using the provided secret?
'''
nonce_components = nonce.split(':', 2)
if not len(nonce_components) == 3:
return False
timestamp = nonce_components[0]
salt = nonce_components[1]
nonce_signature = nonce_components[2]
calculated_nonce = calculate_nonce(timestamp, secret, salt)
if not nonce == calculated_nonce:
return False
return True | python | {
"resource": ""
} |
q40001 | calculate_partial_digest | train | def calculate_partial_digest(username, realm, password):
'''
Calculate a partial digest that may be stored and used to authenticate future
HTTP Digest sessions.
'''
return md5.md5("%s:%s:%s" % (username.encode('utf-8'), realm, password.encode('utf-8'))).hexdigest() | python | {
"resource": ""
} |
q40002 | build_digest_challenge | train | def build_digest_challenge(timestamp, secret, realm, opaque, stale):
'''
Builds a Digest challenge that may be sent as the value of the 'WWW-Authenticate' header
in a 401 or 403 response.
'opaque' may be any value - it will be returned by the client.
'timestamp' will be incorporated and signed in the nonce - it may be retrieved from the
client's authentication request using get_nonce_timestamp()
'''
nonce = calculate_nonce(timestamp, secret)
return 'Digest %s' % format_parts(realm=realm, qop='auth', nonce=nonce,
opaque=opaque, algorithm='MD5',
stale=stale and 'true' or 'false') | python | {
"resource": ""
} |
q40003 | calculate_request_digest | train | def calculate_request_digest(method, partial_digest, digest_response=None,
uri=None, nonce=None, nonce_count=None, client_nonce=None):
'''
Calculates a value for the 'response' value of the client authentication request.
Requires the 'partial_digest' calculated from the realm, username, and password.
Either call it with a digest_response to use the values from an authentication request,
or pass the individual parameters (i.e. to generate an authentication request).
'''
if digest_response:
if uri or nonce or nonce_count or client_nonce:
raise Exception("Both digest_response and one or more "
"individual parameters were sent.")
uri = digest_response.uri
nonce = digest_response.nonce
nonce_count = digest_response.nc
client_nonce=digest_response.cnonce
elif not (uri and nonce and (nonce_count != None) and client_nonce):
raise Exception("Neither digest_response nor all individual parameters were sent.")
ha2 = md5.md5("%s:%s" % (method, uri)).hexdigest()
data = "%s:%s:%s:%s:%s" % (nonce, "%08x" % nonce_count, client_nonce, 'auth', ha2)
kd = md5.md5("%s:%s" % (partial_digest, data)).hexdigest()
return kd | python | {
"resource": ""
} |
q40004 | build_authorization_request | train | def build_authorization_request(username, method, uri, nonce_count, digest_challenge=None,
realm=None, nonce=None, opaque=None, password=None,
request_digest=None, client_nonce=None):
'''
Builds an authorization request that may be sent as the value of the 'Authorization'
header in an HTTP request.
Either a digest_challenge object (as returned from parse_digest_challenge) or its required
component parameters (nonce, realm, opaque) must be provided.
The nonce_count should be the last used nonce_count plus one.
Either the password or the request_digest should be provided - if provided, the password
will be used to generate a request digest. The client_nonce is optional - if not provided,
a random value will be generated.
'''
if not client_nonce:
client_nonce = ''.join([random.choice('0123456789ABCDEF') for x in range(32)])
if digest_challenge and (realm or nonce or opaque):
raise Exception("Both digest_challenge and one or more of realm, nonce, and opaque"
"were sent.")
if digest_challenge:
if isinstance(digest_challenge, types.StringType):
digest_challenge_header = digest_challenge
digest_challenge = parse_digest_challenge(digest_challenge_header)
if not digest_challenge:
raise Exception("The provided digest challenge header could not be parsed: %s" %
digest_challenge_header)
realm = digest_challenge.realm
nonce = digest_challenge.nonce
opaque = digest_challenge.opaque
elif not (realm and nonce and opaque):
raise Exception("Either digest_challenge or realm, nonce, and opaque must be sent.")
if password and request_digest:
raise Exception("Both password and calculated request_digest were sent.")
elif not request_digest:
if not password:
raise Exception("Either password or calculated request_digest must be provided.")
partial_digest = calculate_partial_digest(username, realm, password)
request_digest = calculate_request_digest(method, partial_digest, uri=uri, nonce=nonce,
nonce_count=nonce_count,
client_nonce=client_nonce)
return 'Digest %s' % format_parts(username=username, realm=realm, nonce=nonce, uri=uri,
response=request_digest, algorithm='MD5', opaque=opaque,
qop='auth', nc='%08x' % nonce_count, cnonce=client_nonce) | python | {
"resource": ""
} |
q40005 | parse_digest_challenge | train | def parse_digest_challenge(authentication_header):
'''
Parses the value of a 'WWW-Authenticate' header. Returns an object with properties
corresponding to each of the recognized parameters in the header.
'''
if not is_digest_challenge(authentication_header):
return None
parts = parse_parts(authentication_header[7:], defaults={'algorithm': 'MD5',
'stale': 'false'})
if not _check_required_parts(parts, _REQUIRED_DIGEST_CHALLENGE_PARTS):
return None
parts['stale'] = parts['stale'].lower() == 'true'
digest_challenge = _build_object_from_parts(parts, _REQUIRED_DIGEST_CHALLENGE_PARTS)
if ('MD5', 'auth') != (digest_challenge.algorithm, digest_challenge.qop):
return None
return digest_challenge | python | {
"resource": ""
} |
q40006 | BreakpointGraph.__get_vertex_by_name | train | def __get_vertex_by_name(self, vertex_name):
""" Obtains a vertex object by supplied label
Returns a :class:`bg.vertex.BGVertex` or its subclass instance
:param vertex_name: a vertex label it is identified by.
:type vertex_name: any hashable python object. ``str`` expected.
:return: vertex with supplied label if present in current :class:`BreakpointGraph`, ``None`` otherwise
"""
vertex_class = BGVertex.get_vertex_class_from_vertex_name(vertex_name)
data = vertex_name.split(BlockVertex.NAME_SEPARATOR)
root_name, data = data[0], data[1:]
if issubclass(vertex_class, TaggedVertex):
tags = [entry.split(TaggedVertex.TAG_SEPARATOR) for entry in data]
for tag_entry in tags:
if len(tag_entry) == 1:
tag_entry.append(None)
elif len(tag_entry) > 2:
tag_entry[1:] = [TaggedVertex.TAG_SEPARATOR.join(tag_entry[1:])]
result = vertex_class(root_name)
for tag, value in tags:
if tag == InfinityVertex.NAME_SUFFIX and issubclass(vertex_class, InfinityVertex):
continue
result.add_tag(tag, value)
else:
result = vertex_class(root_name)
if result in self.bg:
adjacencies = self.bg[result]
for key, _ in adjacencies.items():
for ref_key, values in self.bg[key].items():
if ref_key == result:
return ref_key
return list(self.bg[result].keys())[0]
return None | python | {
"resource": ""
} |
q40007 | BreakpointGraph.to_json | train | def to_json(self, schema_info=True):
""" JSON serialization method that account for all information-wise important part of breakpoint graph
"""
genomes = set()
result = {}
result["edges"] = []
for bgedge in self.edges():
genomes |= bgedge.multicolor.colors
result["edges"].append(bgedge.to_json(schema_info=schema_info))
result["vertices"] = [bgvertex.to_json(schema_info=schema_info) for bgvertex in self.nodes()]
result["genomes"] = [bggenome.to_json(schema_info=schema_info) for bggenome in genomes]
return result | python | {
"resource": ""
} |
q40008 | BreakpointGraph.from_json | train | def from_json(cls, data, genomes_data=None, genomes_deserialization_required=True, merge=False):
""" A JSON deserialization operation, that recovers a breakpoint graph from its JSON representation
as information about genomes, that are encoded in breakpoint graph might be available somewhere else, but not the
json object, there is an option to provide it and omit encoding information about genomes.
"""
result = cls()
merge = merge
vertices_dict = {}
genomes_dict = genomes_data if genomes_data is not None and not genomes_deserialization_required else None
if genomes_dict is None:
############################################################################################################
#
# if we need to recover genomes information from breakpoint graph json object
# we are happy to do that
#
############################################################################################################
genomes_dict = {}
try:
source = genomes_data if genomes_data is not None and genomes_deserialization_required else data[
"genomes"]
except KeyError as exc:
raise ValueError("Error during breakpoint graph deserialization. No \"genomes\" information found")
for g_dict in source:
############################################################################################################
#
# if explicitly specified in genome json object, it can be decoded using provided schema name,
# of course a decoding breakpoint graph object shall be aware of such scheme
# (it has to be specified in the `genomes_json_schemas` class wide dict)
#
############################################################################################################
schema_name = g_dict.get(BGGenome_JSON_SCHEMA_JSON_KEY, None)
schema_class = None if schema_name is None else cls.genomes_json_schemas.get(schema_name, None)
genomes_dict[g_dict["g_id"]] = BGGenome.from_json(data=g_dict, json_schema_class=schema_class)
if "vertices" not in data:
############################################################################################################
#
# breakpoint graph can not be decoded without having information about vertices explicitly
# as vertices are referenced in edges object, rather than explicitly provided
#
############################################################################################################
raise ValueError(
"Error during breakpoint graph deserialization. \"vertices\" key is not present in json object")
for vertex_dict in data["vertices"]:
############################################################################################################
#
# if explicitly specified in vertex json object, it can be decoded using provided schema name,
# of course a decoding breakpoint graph object shall be aware of such scheme
# (it has to be specified in the `vertices_json_schemas` class wide dict)
#
############################################################################################################
schema_name = vertex_dict.get(BGVertex_JSON_SCHEMA_JSON_KEY, None)
schema_class = None if schema_name is None else cls.vertices_json_schemas.get(schema_name, None)
try:
############################################################################################################
#
# we try to recover a specific vertex class based on its name.
# it does not overwrite the schema based behaviour
# but provides a correct default schema for a specific vertex type
#
############################################################################################################
vertex_class = BGVertex.get_vertex_class_from_vertex_name(vertex_dict["name"])
except KeyError:
vertex_class = BGVertex
vertices_dict[vertex_dict["v_id"]] = vertex_class.from_json(data=vertex_dict,
json_schema_class=schema_class)
for edge_dict in data["edges"]:
############################################################################################################
#
# if explicitly specified in edge json object, it can be decoded using provided schema name,
# of course a decoding breakpoint graph object shall be aware of such scheme
# (it has to be specified in the `edges_json_schemas` class wide dict)
#
############################################################################################################
schema_name = edge_dict.get(BGEdge_JSON_SCHEMA_JSON_KEY, None)
schema = None if schema_name is None else cls.edges_json_schemas.get(schema_name, None)
edge = BGEdge.from_json(data=edge_dict, json_schema_class=schema)
try:
edge.vertex1 = vertices_dict[edge.vertex1]
edge.vertex2 = vertices_dict[edge.vertex2]
except KeyError:
############################################################################################################
#
# as edge references a pair of vertices, we must be sure respective vertices were decoded
#
############################################################################################################
raise ValueError(
"Error during breakpoint graph deserialization. Deserialized edge references non-present vertex")
if len(edge.multicolor) == 0:
############################################################################################################
#
# edges with empty multicolor are not permitted in breakpoint graphs
#
############################################################################################################
raise ValueError(
"Error during breakpoint graph deserialization. Empty multicolor for deserialized edge")
try:
edge.multicolor = Multicolor(*[genomes_dict[g_id] for g_id in edge.multicolor])
except KeyError:
raise ValueError(
"Error during breakpoint graph deserialization. Deserialized edge reference non-present "
"genome in its multicolor")
result.__add_bgedge(edge, merge=merge)
return result | python | {
"resource": ""
} |
q40009 | get | train | def get(request, obj_id=None):
"""Lists all tags
:returns: json
"""
res = Result()
if obj_id:
if obj_id == '0':
obj = {
'id': 0,
'name': 'TAGLESS',
'artist': False,
}
else:
obj = get_object_or_404(Tag, pk=obj_id).json()
res.append(obj)
return JsonResponse(res.asDict())
else:
if request.GET.get('count'):
itags = Tag.objects.all().annotate(icount=Count('image'))
vtags = Tag.objects.all().annotate(vcount=Count('video'))
for i, tag in enumerate(itags):
tag.count = itags[i].icount + vtags[i].vcount
res.append(tag.json())
else:
for tag in Tag.objects.all():
res.append(tag.json())
return JsonResponse(res.asDict()) | python | {
"resource": ""
} |
q40010 | post | train | def post(request):
"""Creates a tag object
:param name: Name for tag
:type name: str
:returns: json
"""
res = Result()
data = request.POST or json.loads(request.body)['body']
name = data.get('name', None)
if not name:
res.isError = True
res.message = "No name given"
return JsonResponse(res.asDict())
tag = Tag.objects.get_or_create(name=name.lower())[0]
res.append(tag.json())
return JsonResponse(res.asDict()) | python | {
"resource": ""
} |
q40011 | put | train | def put(request, obj_id=None):
"""Adds tags from objects resolved from guids
:param tags: Tags to add
:type tags: list
:param guids: Guids to add tags from
:type guids: list
:returns: json
"""
res = Result()
data = request.PUT or json.loads(request.body)['body']
if obj_id:
# -- Edit the tag
tag = Tag.objects.get(pk=obj_id)
tag.name = data.get('name', tag.name)
tag.artist = data.get('artist', tag.artist)
tag.save()
else:
tags = [_ for _ in data.get('tags', '').split(',') if _]
guids = [_ for _ in data.get('guids', '').split(',') if _]
_manageTags(tags, guids)
return JsonResponse(res.asDict()) | python | {
"resource": ""
} |
q40012 | delete | train | def delete(request, obj_id=None):
"""Removes tags from objects resolved from guids
:param tags: Tags to remove
:type tags: list
:param guids: Guids to remove tags from
:type guids: list
:returns: json
"""
res = Result()
if obj_id:
# -- Delete the tag itself
tag = Tag.objects.get(pk=obj_id)
guids = []
images = Image.objects.filter(tags__id=obj_id)
guids += [_.guid for _ in images]
videos = Video.objects.filter(tags__id=obj_id)
guids += [_.guid for _ in videos]
# -- Remove all tags from objects
_manageTags([tag.id], guids, add=False)
# -- Delete old tags
tag.delete()
else:
tags = [_ for _ in request.DELETE.get('tags', '').split(',') if _]
guids = [_ for _ in request.DELETE.get('guids', '').split(',') if _]
_manageTags(tags, guids, add=False)
return JsonResponse(res.asDict()) | python | {
"resource": ""
} |
q40013 | search | train | def search(request):
"""
Search for Tag objects and returns a Result object with a list of searialize Tag
objects.
:param search: Append a "Search for" tag
:type search: bool
:param zero: Exclude Tags with no items
:type zero: bool
:param artist: Exclude artist tags
:type artist: bool
:returns: json
"""
q = request.GET.get('q', '')
includeSearch = request.GET.get('search', False)
nonZero = request.GET.get('zero', False)
excludeArtist = request.GET.get('artist', False)
if includeSearch:
l = [{'id': 0, 'name': 'Search for: %s' % q}]
else:
l = []
query = Tag.objects.filter(name__icontains=q)
if excludeArtist:
query = query.exclude(artist=True)
if nonZero:
l += [t.json() for t in query if t.count() > 0]
else:
l += [t.json() for t in query]
return JsonResponse(l, safe=False) | python | {
"resource": ""
} |
q40014 | merge | train | def merge(request, obj_id):
"""Merges multiple tags into a single tag and all related objects are reassigned"""
res = Result()
if request.POST:
tags = json.loads(request.POST['tags'])
else:
tags = json.loads(request.body)['body']['tags']
guids = []
images = Image.objects.filter(tags__id__in=tags)
guids += [_.guid for _ in images]
videos = Video.objects.filter(tags__id__in=tags)
guids += [_.guid for _ in videos]
# -- Remove all tags from objects
_manageTags(tags, guids, add=False)
# -- Add merged tag to all objects
_manageTags([obj_id], guids, add=True)
# -- Delete old tags
Tag.objects.filter(pk__in=tags).delete()
return JsonResponse(res.asDict()) | python | {
"resource": ""
} |
q40015 | _manageTags | train | def _manageTags(tagList, guids, add=True):
""" Adds or Removes Guids from Tags """
objects = getObjectsFromGuids(guids)
tags = []
for tag in tagList:
try:
t = Tag.objects.get(pk=int(tag))
except ValueError:
t = Tag.objects.get_or_create(name=tag.lower())[0]
tags.append(t)
if add:
return _addTags(tags, objects)
else:
return _removeTags(tags, objects) | python | {
"resource": ""
} |
q40016 | _addTags | train | def _addTags(tags, objects):
""" Adds tags to objects """
for t in tags:
for o in objects:
o.tags.add(t)
return True | python | {
"resource": ""
} |
q40017 | _removeTags | train | def _removeTags(tags, objects):
""" Removes tags from objects """
for t in tags:
for o in objects:
o.tags.remove(t)
return True | python | {
"resource": ""
} |
q40018 | _short_ts_regexp | train | def _short_ts_regexp():
'''Generates regexp for parsing of
shortened relative timestamps, as shown in the table.'''
ts_re = ['^']
for k in it.chain(_short_ts_days, _short_ts_s):
ts_re.append(r'(?P<{0}>\d+{0}\s*)?'.format(k))
return re.compile(''.join(ts_re), re.I | re.U) | python | {
"resource": ""
} |
q40019 | CuttlePool._get | train | def _get(self, timeout):
"""
Get a resource from the pool. If timeout is ``None`` waits
indefinitely.
:param timeout: Time in seconds to wait for a resource.
:type timeout: int
:return: A resource.
:rtype: :class:`_ResourceTracker`
:raises PoolEmptyError: When timeout has elapsed and unable to
retrieve resource.
"""
with self._lock:
if timeout is None:
while self.empty():
self._not_empty.wait()
else:
time_end = time.time() + timeout
while self.empty():
time_left = time_end - time.time()
if time_left < 0:
raise PoolEmptyError
self._not_empty.wait(time_left)
rtracker = self._reference_queue[self._resource_start]
self._resource_start = (self._resource_start + 1) % self.maxsize
self._available -= 1
return rtracker | python | {
"resource": ""
} |
q40020 | CuttlePool._get_tracker | train | def _get_tracker(self, resource):
"""
Return the resource tracker that is tracking ``resource``.
:param resource: A resource.
:return: A resource tracker.
:rtype: :class:`_ResourceTracker`
"""
with self._lock:
for rt in self._reference_queue:
if rt is not None and resource is rt.resource:
return rt
raise UnknownResourceError('Resource not created by pool') | python | {
"resource": ""
} |
q40021 | CuttlePool._harvest_lost_resources | train | def _harvest_lost_resources(self):
"""Return lost resources to pool."""
with self._lock:
for i in self._unavailable_range():
rtracker = self._reference_queue[i]
if rtracker is not None and rtracker.available():
self.put_resource(rtracker.resource) | python | {
"resource": ""
} |
q40022 | CuttlePool._make_resource | train | def _make_resource(self):
"""
Returns a resource instance.
"""
with self._lock:
for i in self._unavailable_range():
if self._reference_queue[i] is None:
rtracker = _ResourceTracker(
self._factory(**self._factory_arguments))
self._reference_queue[i] = rtracker
self._size += 1
return rtracker
raise PoolFullError | python | {
"resource": ""
} |
q40023 | CuttlePool._put | train | def _put(self, rtracker):
"""
Put a resource back in the queue.
:param rtracker: A resource.
:type rtracker: :class:`_ResourceTracker`
:raises PoolFullError: If pool is full.
:raises UnknownResourceError: If resource can't be found.
"""
with self._lock:
if self._available < self.capacity:
for i in self._unavailable_range():
if self._reference_queue[i] is rtracker:
# i retains its value and will be used to swap with
# first "empty" space in queue.
break
else:
raise UnknownResourceError
j = self._resource_end
rq = self._reference_queue
rq[i], rq[j] = rq[j], rq[i]
self._resource_end = (self._resource_end + 1) % self.maxsize
self._available += 1
self._not_empty.notify()
else:
raise PoolFullError | python | {
"resource": ""
} |
q40024 | CuttlePool._remove | train | def _remove(self, rtracker):
"""
Remove a resource from the pool.
:param rtracker: A resource.
:type rtracker: :class:`_ResourceTracker`
"""
with self._lock:
i = self._reference_queue.index(rtracker)
self._reference_queue[i] = None
self._size -= 1 | python | {
"resource": ""
} |
q40025 | CuttlePool._unavailable_range | train | def _unavailable_range(self):
"""
Return a generator for the indices of the unavailable region of
``_reference_queue``.
"""
with self._lock:
i = self._resource_end
j = self._resource_start
if j < i or self.empty():
j += self.maxsize
for k in range(i, j):
yield k % self.maxsize | python | {
"resource": ""
} |
q40026 | CuttlePool.get_resource | train | def get_resource(self, resource_wrapper=None):
"""
Returns a ``Resource`` instance.
:param resource_wrapper: A Resource subclass.
:return: A ``Resource`` instance.
:raises PoolEmptyError: If attempt to get resource fails or times
out.
"""
rtracker = None
if resource_wrapper is None:
resource_wrapper = self._resource_wrapper
if self.empty():
self._harvest_lost_resources()
try:
rtracker = self._get(0)
except PoolEmptyError:
pass
if rtracker is None:
# Could not find resource, try to make one.
try:
rtracker = self._make_resource()
except PoolFullError:
pass
if rtracker is None:
# Could not find or make resource, so must wait for a resource
# to be returned to the pool.
try:
rtracker = self._get(timeout=self._timeout)
except PoolEmptyError:
pass
if rtracker is None:
raise PoolEmptyError
# Ensure resource is active.
if not self.ping(rtracker.resource):
# Lock here to prevent another thread creating a resource in the
# index that will have this resource removed. This ensures there
# will be space for _make_resource() to place a newly created
# resource.
with self._lock:
self._remove(rtracker)
rtracker = self._make_resource()
# Ensure all resources leave pool with same attributes.
# normalize_connection() is used since it calls
# normalize_resource(), so if a user implements either one, the
# resource will still be normalized. This will be changed in 1.0 to
# call normalize_resource() when normalize_connection() is
# removed.
self.normalize_connection(rtracker.resource)
return rtracker.wrap_resource(self, resource_wrapper) | python | {
"resource": ""
} |
q40027 | CuttlePool.put_resource | train | def put_resource(self, resource):
"""
Adds a resource back to the pool or discards it if the pool is full.
:param resource: A resource object.
:raises UnknownResourceError: If resource was not made by the
pool.
"""
rtracker = self._get_tracker(resource)
try:
self._put(rtracker)
except PoolFullError:
self._remove(rtracker) | python | {
"resource": ""
} |
q40028 | _ResourceTracker.wrap_resource | train | def wrap_resource(self, pool, resource_wrapper):
"""
Return a resource wrapped in ``resource_wrapper``.
:param pool: A pool instance.
:type pool: :class:`CuttlePool`
:param resource_wrapper: A wrapper class for the resource.
:type resource_wrapper: :class:`Resource`
:return: A wrapped resource.
:rtype: :class:`Resource`
"""
resource = resource_wrapper(self.resource, pool)
self._weakref = weakref.ref(resource)
return resource | python | {
"resource": ""
} |
q40029 | Resource.close | train | def close(self):
"""
Returns the resource to the resource pool.
"""
if self._resource is not None:
self._pool.put_resource(self._resource)
self._resource = None
self._pool = None | python | {
"resource": ""
} |
q40030 | Connection.send | train | def send(self, message, fragment_size=None, mask=False):
"""
Send a message. If `fragment_size` is specified, the message is
fragmented into multiple frames whose payload size does not extend
`fragment_size`.
"""
for frame in self.message_to_frames(message, fragment_size, mask):
self.send_frame(frame) | python | {
"resource": ""
} |
q40031 | Connection.handle_control_frame | train | def handle_control_frame(self, frame):
"""
Handle a control frame as defined by RFC 6455.
"""
if frame.opcode == OPCODE_CLOSE:
self.close_frame_received = True
code, reason = frame.unpack_close()
if self.close_frame_sent:
self.onclose(code, reason)
self.sock.close()
raise SocketClosed(True)
else:
self.close_params = (code, reason)
self.send_close_frame(code, reason)
elif frame.opcode == OPCODE_PING:
# Respond with a pong message with identical payload
self.send_frame(ControlFrame(OPCODE_PONG, frame.payload))
elif frame.opcode == OPCODE_PONG:
# Assert that the PONG payload is identical to that of the PING
if not self.ping_sent:
raise PingError('received PONG while no PING was sent')
self.ping_sent = False
if frame.payload != self.ping_payload:
raise PingError('received PONG with invalid payload')
self.ping_payload = None
self.onpong(frame.payload) | python | {
"resource": ""
} |
q40032 | Connection.send_ping | train | def send_ping(self, payload=''):
"""
Send a PING control frame with an optional payload.
"""
self.send_frame(ControlFrame(OPCODE_PING, payload),
lambda: self.onping(payload))
self.ping_payload = payload
self.ping_sent = True | python | {
"resource": ""
} |
q40033 | ProcessingThing.handler | train | def handler(self):
'Parametrized handler function'
return ft.partial(self.base.handler, parameter=self.parameter)\
if self.parameter else self.base.handler | python | {
"resource": ""
} |
q40034 | _Connection.connect | train | def connect(self):
"""Connect to the Redis server if necessary.
:rtype: :class:`~tornado.concurrent.Future`
:raises: :class:`~tredis.exceptions.ConnectError`
:class:`~tredis.exceptinos.RedisError`
"""
future = concurrent.Future()
if self.connected:
raise exceptions.ConnectError('already connected')
LOGGER.debug('%s connecting', self.name)
self.io_loop.add_future(
self._client.connect(self.host, self.port),
lambda f: self._on_connected(f, future))
return future | python | {
"resource": ""
} |
q40035 | _Connection.execute | train | def execute(self, command, future):
"""Execute a command after connecting if necessary.
:param bytes command: command to execute after the connection
is established
:param tornado.concurrent.Future future: future to resolve
when the command's response is received.
"""
LOGGER.debug('execute(%r, %r)', command, future)
if self.connected:
self._write(command, future)
else:
def on_connected(cfuture):
if cfuture.exception():
return future.set_exception(cfuture.exception())
self._write(command, future)
self.io_loop.add_future(self.connect(), on_connected) | python | {
"resource": ""
} |
q40036 | _Connection._on_closed | train | def _on_closed(self):
"""Invoked when the connection is closed"""
LOGGER.error('Redis connection closed')
self.connected = False
self._on_close()
self._stream = None | python | {
"resource": ""
} |
q40037 | _Connection._on_connected | train | def _on_connected(self, stream_future, connect_future):
"""Invoked when the socket stream has connected, setting up the
stream callbacks and invoking the on connect callback if set.
:param stream_future: The connection socket future
:type stream_future: :class:`~tornado.concurrent.Future`
:param stream_future: The connection response future
:type stream_future: :class:`~tornado.concurrent.Future`
:raises: :exc:`tredis.exceptions.ConnectError`
"""
if stream_future.exception():
connect_future.set_exception(
exceptions.ConnectError(stream_future.exception()))
else:
self._stream = stream_future.result()
self._stream.set_close_callback(self._on_closed)
self.connected = True
connect_future.set_result(self) | python | {
"resource": ""
} |
q40038 | _Connection._write | train | def _write(self, command, future):
"""Write a command to the socket
:param Command command: the Command data structure
"""
def on_written():
self._on_written(command, future)
try:
self._stream.write(command.command, callback=on_written)
except iostream.StreamClosedError as error:
future.set_exception(exceptions.ConnectionError(error))
except Exception as error:
LOGGER.exception('unhandled write failure - %r', error)
future.set_exception(exceptions.ConnectionError(error)) | python | {
"resource": ""
} |
q40039 | Client.connect | train | def connect(self):
"""Connect to the Redis server or Cluster.
:rtype: tornado.concurrent.Future
"""
LOGGER.debug('Creating a%s connection to %s:%s (db %s)',
' cluster node'
if self._clustering else '', self._hosts[0]['host'],
self._hosts[0]['port'], self._hosts[0].get(
'db', DEFAULT_DB))
self._connect_future = concurrent.Future()
conn = _Connection(
self._hosts[0]['host'],
self._hosts[0]['port'],
self._hosts[0].get('db', DEFAULT_DB),
self._read,
self._on_closed,
self.io_loop,
cluster_node=self._clustering)
self.io_loop.add_future(conn.connect(), self._on_connected)
return self._connect_future | python | {
"resource": ""
} |
q40040 | Client.close | train | def close(self):
"""Close any open connections to Redis.
:raises: :exc:`tredis.exceptions.ConnectionError`
"""
if not self._connected.is_set():
raise exceptions.ConnectionError('not connected')
self._closing = True
if self._clustering:
for host in self._cluster.keys():
self._cluster[host].close()
elif self._connection:
self._connection.close() | python | {
"resource": ""
} |
q40041 | Client.ready | train | def ready(self):
"""Indicates that the client is connected to the Redis server or
cluster and is ready for use.
:rtype: bool
"""
if self._clustering:
return (all([c.connected for c in self._cluster.values()])
and len(self._cluster))
return (self._connection and self._connection.connected) | python | {
"resource": ""
} |
q40042 | Client._create_cluster_connection | train | def _create_cluster_connection(self, node):
"""Create a connection to a Redis server.
:param node: The node to connect to
:type node: tredis.cluster.ClusterNode
"""
LOGGER.debug('Creating a cluster connection to %s:%s', node.ip,
node.port)
conn = _Connection(
node.ip,
node.port,
0,
self._read,
self._on_closed,
self.io_loop,
cluster_node=True,
read_only='slave' in node.flags,
slots=node.slots)
self.io_loop.add_future(conn.connect(), self._on_connected) | python | {
"resource": ""
} |
q40043 | Client._encode_resp | train | def _encode_resp(self, value):
"""Dynamically build the RESP payload based upon the list provided.
:param mixed value: The list of command parts to encode
:rtype: bytes
"""
if isinstance(value, bytes):
return b''.join(
[b'$',
ascii(len(value)).encode('ascii'), CRLF, value, CRLF])
elif isinstance(value, str): # pragma: nocover
return self._encode_resp(value.encode('utf-8'))
elif isinstance(value, int):
return self._encode_resp(ascii(value).encode('ascii'))
elif isinstance(value, float):
return self._encode_resp(ascii(value).encode('ascii'))
elif isinstance(value, list):
output = [b'*', ascii(len(value)).encode('ascii'), CRLF]
for item in value:
output.append(self._encode_resp(item))
return b''.join(output)
else:
raise ValueError('Unsupported type: {0}'.format(type(value))) | python | {
"resource": ""
} |
q40044 | Client._eval_expectation | train | def _eval_expectation(command, response, future):
"""Evaluate the response from Redis to see if it matches the expected
response.
:param command: The command that is being evaluated
:type command: tredis.client.Command
:param bytes response: The response value to check
:param future: The future representing the execution of the command
:type future: tornado.concurrent.Future
:return:
"""
if isinstance(command.expectation, int) and command.expectation > 1:
future.set_result(response == command.expectation or response)
else:
future.set_result(response == command.expectation) | python | {
"resource": ""
} |
q40045 | Client._execute | train | def _execute(self, parts, expectation=None, format_callback=None):
"""Really execute a redis command
:param list parts: The list of command parts
:param mixed expectation: Optional response expectation
:rtype: :class:`~tornado.concurrent.Future`
:raises: :exc:`~tredis.exceptions.SubscribedError`
"""
future = concurrent.TracebackFuture()
try:
command = self._build_command(parts)
except ValueError as error:
future.set_exception(error)
return future
def on_locked(_):
if self.ready:
if self._clustering:
cmd = Command(command, self._pick_cluster_host(parts),
expectation, format_callback)
else:
LOGGER.debug('Connection: %r', self._connection)
cmd = Command(command, self._connection, expectation,
format_callback)
LOGGER.debug('_execute(%r, %r, %r) on %s', cmd.command,
expectation, format_callback, cmd.connection.name)
cmd.connection.execute(cmd, future)
else:
LOGGER.critical('Lock released & not ready, aborting command')
# Wait until the cluster is ready, letting cluster discovery through
if not self.ready and not self._connected.is_set():
self.io_loop.add_future(
self._connected.wait(),
lambda f: self.io_loop.add_future(self._busy.acquire(), on_locked)
)
else:
self.io_loop.add_future(self._busy.acquire(), on_locked)
# Release the lock when the future is complete
self.io_loop.add_future(future, lambda r: self._busy.release())
return future | python | {
"resource": ""
} |
q40046 | Client._on_cluster_discovery | train | def _on_cluster_discovery(self, future):
"""Invoked when the Redis server has responded to the ``CLUSTER_NODES``
command.
:param future: The future containing the response from Redis
:type future: tornado.concurrent.Future
"""
LOGGER.debug('_on_cluster_discovery(%r)', future)
common.maybe_raise_exception(future)
nodes = future.result()
for node in nodes:
name = '{}:{}'.format(node.ip, node.port)
if name in self._cluster:
LOGGER.debug('Updating cluster connection info for %s:%s',
node.ip, node.port)
self._cluster[name].set_slots(node.slots)
self._cluster[name].set_read_only('slave' in node.flags)
else:
self._create_cluster_connection(node)
self._discovery = True | python | {
"resource": ""
} |
q40047 | Client._on_closed | train | def _on_closed(self):
"""Invoked by connections when they are closed."""
self._connected.clear()
if not self._closing:
if self._on_close_callback:
self._on_close_callback()
else:
raise exceptions.ConnectionError('closed') | python | {
"resource": ""
} |
q40048 | Client._on_cluster_data_moved | train | def _on_cluster_data_moved(self, response, command, future):
"""Process the ``MOVED`` response from a Redis cluster node.
:param bytes response: The response from the Redis server
:param command: The command that was being executed
:type command: tredis.client.Command
:param future: The execution future
:type future: tornado.concurrent.Future
"""
LOGGER.debug('on_cluster_data_moved(%r, %r, %r)', response, command,
future)
parts = response.split(' ')
name = '{}:{}'.format(*common.split_connection_host_port(parts[2]))
LOGGER.debug('Moved to %r', name)
if name not in self._cluster:
raise exceptions.ConnectionError(
'{} is not connected'.format(name))
self._cluster[name].execute(
command._replace(connection=self._cluster[name]), future) | python | {
"resource": ""
} |
q40049 | Client._on_connected | train | def _on_connected(self, future):
"""Invoked when connections have been established. If the client is
in clustering mode, it will kick of the discovery step if needed. If
not, it will select the configured database.
:param future: The connection future
:type future: tornado.concurrent.Future
"""
if future.exception():
self._connect_future.set_exception(future.exception())
return
conn = future.result()
LOGGER.debug('Connected to %s (%r, %r, %r)', conn.name,
self._clustering, self._discovery, self._connected)
if self._clustering:
self._cluster[conn.name] = conn
if not self._discovery:
self.io_loop.add_future(self.cluster_nodes(),
self._on_cluster_discovery)
elif self.ready:
LOGGER.debug('Cluster nodes all connected')
if not self._connect_future.done():
self._connect_future.set_result(True)
self._connected.set()
else:
def on_selected(sfuture):
LOGGER.debug('Initial setup and selection processed')
if sfuture.exception():
self._connect_future.set_exception(sfuture.exception())
else:
self._connect_future.set_result(True)
self._connected.set()
select_future = concurrent.Future()
self.io_loop.add_future(select_future, on_selected)
self._connection = conn
cmd = Command(
self._build_command(['SELECT', str(conn.database)]),
self._connection, None, None)
cmd.connection.execute(cmd, select_future) | python | {
"resource": ""
} |
q40050 | Client._on_read_only_error | train | def _on_read_only_error(self, command, future):
"""Invoked when a Redis node returns an error indicating it's in
read-only mode. It will use the ``INFO REPLICATION`` command to
attempt to find the master server and failover to that, reissuing
the command to that server.
:param command: The command that was being executed
:type command: tredis.client.Command
:param future: The execution future
:type future: tornado.concurrent.Future
"""
failover_future = concurrent.TracebackFuture()
def on_replication_info(_):
common.maybe_raise_exception(failover_future)
LOGGER.debug('Failover closing current read-only connection')
self._closing = True
database = self._connection.database
self._connection.close()
self._connected.clear()
self._connect_future = concurrent.Future()
info = failover_future.result()
LOGGER.debug('Failover connecting to %s:%s', info['master_host'],
info['master_port'])
self._connection = _Connection(
info['master_host'], info['master_port'], database, self._read,
self._on_closed, self.io_loop, self._clustering)
# When the connection is re-established, re-run the command
self.io_loop.add_future(
self._connect_future,
lambda f: self._connection.execute(
command._replace(connection=self._connection), future))
# Use the normal connection processing flow when connecting
self.io_loop.add_future(self._connection.connect(),
self._on_connected)
if self._clustering:
command.connection.set_readonly(True)
LOGGER.debug('%s is read-only, need to failover to new master',
command.connection.name)
cmd = Command(
self._build_command(['INFO', 'REPLICATION']), self._connection,
None, common.format_info_response)
self.io_loop.add_future(failover_future, on_replication_info)
cmd.connection.execute(cmd, failover_future) | python | {
"resource": ""
} |
q40051 | Client._read | train | def _read(self, command, future):
"""Invoked when a command is executed to read and parse its results.
It will loop on the IOLoop until the response is complete and then
set the value of the response in the execution future.
:param command: The command that was being executed
:type command: tredis.client.Command
:param future: The execution future
:type future: tornado.concurrent.Future
"""
response = self._reader.gets()
if response is not False:
if isinstance(response, hiredis.ReplyError):
if response.args[0].startswith('MOVED '):
self._on_cluster_data_moved(response.args[0], command,
future)
elif response.args[0].startswith('READONLY '):
self._on_read_only_error(command, future)
else:
future.set_exception(exceptions.RedisError(response))
elif command.callback is not None:
future.set_result(command.callback(response))
elif command.expectation is not None:
self._eval_expectation(command, response, future)
else:
future.set_result(response)
else:
def on_data(data):
# LOGGER.debug('Read %r', data)
self._reader.feed(data)
self._read(command, future)
command.connection.read(on_data) | python | {
"resource": ""
} |
q40052 | Client._pick_cluster_host | train | def _pick_cluster_host(self, value):
"""Selects the Redis cluster host for the specified value.
:param mixed value: The value to use when looking for the host
:rtype: tredis.client._Connection
"""
crc = crc16.crc16(self._encode_resp(value[1])) % HASH_SLOTS
for host in self._cluster.keys():
for slot in self._cluster[host].slots:
if slot[0] <= crc <= slot[1]:
return self._cluster[host]
LOGGER.debug('Host not found for %r, returning first connection',
value)
host_keys = sorted(list(self._cluster.keys()))
return self._cluster[host_keys[0]] | python | {
"resource": ""
} |
q40053 | parse_lines | train | def parse_lines(stream, separator=None):
"""
Takes each line of a stream, creating a generator that yields
tuples of line, row - where row is the line split by separator
(or by whitespace if separator is None.
:param stream:
:param separator: (optional)
:return: generator
"""
separator = None if separator is None else unicode(separator)
for line in stream:
line = line.rstrip(u'\r\n')
row = [interpret_segment(i) for i in line.split(separator)]
yield line, row | python | {
"resource": ""
} |
q40054 | safe_evaluate | train | def safe_evaluate(command, glob, local):
"""
Continue to attempt to execute the given command, importing objects which
cause a NameError in the command
:param command: command for eval
:param glob: globals dict for eval
:param local: locals dict for eval
:return: command result
"""
while True:
try:
return eval(command, glob, local)
except NameError as e:
match = re.match("name '(.*)' is not defined", e.message)
if not match:
raise e
try:
exec ('import %s' % (match.group(1), )) in glob
except ImportError:
raise e | python | {
"resource": ""
} |
q40055 | int_to_gematria | train | def int_to_gematria(num, gershayim=True):
"""convert integers between 1 an 999 to Hebrew numerals.
- set gershayim flag to False to ommit gershayim
"""
# 1. Lookup in specials
if num in specialnumbers['specials']:
retval = specialnumbers['specials'][num]
return _add_gershayim(retval) if gershayim else retval
# 2. Generate numeral normally
parts = []
rest = str(num)
while rest:
digit = int(rest[0])
rest = rest[1:]
if digit == 0:
continue
power = 10 ** len(rest)
parts.append(specialnumbers['numerals'][power * digit])
retval = ''.join(parts)
# 3. Add gershayim
return _add_gershayim(retval) if gershayim else retval | python | {
"resource": ""
} |
q40056 | get_urls_from_onetab | train | def get_urls_from_onetab(onetab):
"""
Get video urls from a link to the onetab shared page.
Args:
onetab (str): Link to a onetab shared page.
Returns:
list: List of links to the videos.
"""
html = requests.get(onetab).text
soup = BeautifulSoup(html, 'lxml')
divs = soup.findAll('div', {'style': 'padding-left: 24px; '
'padding-top: 8px; '
'position: relative; '
'font-size: 13px;'})
return [div.find('a').attrs['href'] for div in divs] | python | {
"resource": ""
} |
q40057 | cProfileFuncStat.from_dict | train | def from_dict(cls, d):
"""Used to create an instance of this class from a pstats dict item"""
stats = []
for (filename, lineno, name), stat_values in d.iteritems():
if len(stat_values) == 5:
ncalls, ncall_nr, total_time, cum_time, subcall_stats = stat_values
else:
ncalls, ncall_nr, total_time, cum_time = stat_values
subcall_stats = None
stat = cProfileFuncStat(filename, lineno, name, ncalls, ncall_nr, total_time, cum_time, subcall_stats)
stats.append(stat)
return stats | python | {
"resource": ""
} |
q40058 | cProfileParser.exclude_functions | train | def exclude_functions(self, *funcs):
"""
Excludes the contributions from the following functions.
"""
for f in funcs:
f.exclude = True
run_time_s = sum(0 if s.exclude else s.own_time_s for s in self.stats)
cProfileFuncStat.run_time_s = run_time_s | python | {
"resource": ""
} |
q40059 | cProfileParser.get_top | train | def get_top(self, stat, n):
"""Return the top n values when sorting by 'stat'"""
return sorted(self.stats, key=lambda x: getattr(x, stat), reverse=True)[:n] | python | {
"resource": ""
} |
q40060 | cProfileParser.save_pstat | train | def save_pstat(self, path):
"""
Save the modified pstats file
"""
stats = {}
for s in self.stats:
if not s.exclude:
stats.update(s.to_dict())
with open(path, 'wb') as f:
marshal.dump(stats, f) | python | {
"resource": ""
} |
q40061 | safe_int | train | def safe_int(value):
"""
Tries to convert a value to int; returns 0 if conversion failed
"""
try:
result = int(value)
if result < 0:
raise NegativeDurationError(
'Negative values in duration strings are not allowed!'
)
except NegativeDurationError as exc:
raise exc
except (TypeError, ValueError):
result = 0
return result | python | {
"resource": ""
} |
q40062 | _parse | train | def _parse(value, strict=True):
"""
Preliminary duration value parser
strict=True (by default) raises StrictnessError if either hours,
minutes or seconds in duration value exceed allowed values
"""
pattern = r'(?:(?P<hours>\d+):)?(?P<minutes>\d+):(?P<seconds>\d+)'
match = re.match(pattern, value)
if not match:
raise ValueError('Invalid duration value: %s' % value)
hours = safe_int(match.group('hours'))
minutes = safe_int(match.group('minutes'))
seconds = safe_int(match.group('seconds'))
check_tuple((hours, minutes, seconds,), strict)
return (hours, minutes, seconds,) | python | {
"resource": ""
} |
q40063 | to_seconds | train | def to_seconds(value, strict=True, force_int=True):
"""
converts duration value to integer seconds
strict=True (by default) raises StrictnessError if either hours,
minutes or seconds in duration value exceed allowed values
"""
if isinstance(value, int):
return value # assuming it's seconds
elif isinstance(value, timedelta):
seconds = value.total_seconds()
if force_int:
seconds = int(round(seconds))
return seconds
elif isinstance(value, str):
hours, minutes, seconds = _parse(value, strict)
elif isinstance(value, tuple):
check_tuple(value, strict)
hours, minutes, seconds = value
else:
raise TypeError(
'Value %s (type %s) not supported' % (
value, type(value).__name__
)
)
if not (hours or minutes or seconds):
raise ValueError('No hours, minutes or seconds found')
result = hours*3600 + minutes*60 + seconds
return result | python | {
"resource": ""
} |
q40064 | to_timedelta | train | def to_timedelta(value, strict=True):
"""
converts duration string to timedelta
strict=True (by default) raises StrictnessError if either hours,
minutes or seconds in duration string exceed allowed values
"""
if isinstance(value, int):
return timedelta(seconds=value) # assuming it's seconds
elif isinstance(value, timedelta):
return value
elif isinstance(value, str):
hours, minutes, seconds = _parse(value, strict)
elif isinstance(value, tuple):
check_tuple(value, strict)
hours, minutes, seconds = value
else:
raise TypeError(
'Value %s (type %s) not supported' % (
value, type(value).__name__
)
)
return timedelta(hours=hours, minutes=minutes, seconds=seconds) | python | {
"resource": ""
} |
q40065 | to_tuple | train | def to_tuple(value, strict=True, force_int=True):
"""
converts duration value to tuple of integers
strict=True (by default) raises StrictnessError if either hours,
minutes or seconds in duration value exceed allowed values
"""
if isinstance(value, int):
seconds = value
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
elif isinstance(value, str):
hours, minutes, seconds = _fix_tuple(
_parse(value, strict)
)
elif isinstance(value, tuple):
check_tuple(value, strict)
hours, minutes, seconds = _fix_tuple(value)
elif isinstance(value, timedelta):
seconds = value.total_seconds()
if force_int:
seconds = int(round(seconds))
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return (hours, minutes, seconds,) | python | {
"resource": ""
} |
q40066 | name_url | train | def name_url(provider, cloud, method_name):
"""
Get a URL for a method in a driver
"""
snake_parts = method_name.split('_')
if len(snake_parts) <= 1:
return False
# Convention for libcloud is ex_ are extended methods
if snake_parts[0] == 'ex':
extra = True
method_name = method_name.replace('ex_', '', 1)
else:
extra = False
snake_parts = method_name.split('_')
# Try to semantically match the method name to a REST action
if snake_parts[0] in get_sem_verbs:
method = 'GET'
for verb in get_sem_verbs:
method_name = method_name.replace('%s_' % verb, '', 1)
elif snake_parts[0] in delete_sem_verbs:
method = 'DELETE'
elif snake_parts[0] in put_sem_verbs:
method = 'PUT'
else:
method = 'POST'
uri = '/%s/%s/%s%s' % (provider,
cloud,
'extensions/' if extra else '',
method_name)
return (method, uri) | python | {
"resource": ""
} |
q40067 | contains_frame | train | def contains_frame(data):
"""
Read the frame length from the start of `data` and check if the data is
long enough to contain the entire frame.
"""
if len(data) < 2:
return False
b2 = struct.unpack('!B', data[1])[0]
payload_len = b2 & 0x7F
payload_start = 2
if payload_len == 126:
if len(data) > 4:
payload_len = struct.unpack('!H', data[2:4])[0]
payload_start = 4
elif payload_len == 127:
if len(data) > 12:
payload_len = struct.unpack('!Q', data[4:12])[0]
payload_start = 12
return len(data) >= payload_len + payload_start | python | {
"resource": ""
} |
q40068 | ControlFrame.unpack_close | train | def unpack_close(self):
"""
Unpack a close message into a status code and a reason. If no payload
is given, the code is None and the reason is an empty string.
"""
if self.payload:
code = struct.unpack('!H', str(self.payload[:2]))[0]
reason = str(self.payload[2:])
else:
code = None
reason = ''
return code, reason | python | {
"resource": ""
} |
q40069 | SocketReader.readn | train | def readn(self, n):
"""
Keep receiving data until exactly `n` bytes have been read.
"""
data = ''
while len(data) < n:
received = self.sock.recv(n - len(data))
if not len(received):
raise socket.error('no data read from socket')
data += received
return data | python | {
"resource": ""
} |
q40070 | Wp.quality_comparator | train | def quality_comparator(video_data):
"""Custom comparator used to choose the right format based on the resolution."""
def parse_resolution(res: str) -> Tuple[int, ...]:
return tuple(map(int, res.split('x')))
raw_resolution = video_data['resolution']
resolution = parse_resolution(raw_resolution)
return resolution | python | {
"resource": ""
} |
q40071 | read_noise_curve | train | def read_noise_curve(noise_curve, noise_type_in='ASD', noise_type_out='ASD',
add_wd_noise=False, wd_noise='HB_wd_noise', wd_noise_type_in='ASD'):
"""Simple auxillary function that can read noise curves in.
This function can read in noise curves from a provided file or those that are preinstalled
with this installation. All pre-installed noise curves are in the form of
an amplitude spectral density. Information on each one is found in each specific file.
These are located in the `noise_curves` folder.
Pre-installed really just means in the noise_curves folder. Therefore, curves can be added
and called with only a string.
Arguments:
noise_curve (str): Either a file path to a noise curve
or a str represented pre-loaded sensitivity curve. If using pre-loaded curve,
choices are LPA (LISA Phase A), PL (Proposed LISA), CL (Classic LISA),
CLLF (Classic LISA Low Frequency), PLCS (Proposed LISA Constant Slope),
or PLHB (Proposed LISA Higher Break).
See the arXiv paper above for the meaning behind each choice and a plot with each curve.
noise_type_in/noise_type_out (str, optional): Type of noise input/output.
Choices are `ASD`, `PSD`, or `char_strain`. Default for both is `ASD`.
add_wd_noise (bool, optional): If True, include wd noise.
wd_noise (str, optional): File path to wd background noise or string representing
those in the noise curves folder. Default is the Hiscock et al 2000 approximation
of the Hils & Bender 1997 white dwarf background (`HB_wd_noise`).
wd_noise_type_in (str, optional): Type of wd noise input.
The output will be the same as ``noise_type_out``.
Choices are `ASD`, `PSD`, or `char_strain`. Default for both is `ASD`.
Returns:
(tuple of arrays): Frequency and amplitude arrays of type ``noise_type_out``.
"""
possible_noise_types = ['ASD', 'PSD', 'char_strain']
if noise_type_in not in possible_noise_types:
raise ValueError('noise_type_in must be either ASD, PSD, or char_strain.')
if noise_type_out not in possible_noise_types:
raise ValueError('noise_type_out must be either ASD, PSD, or char_strain.')
# find the noise curve file
if noise_curve[-4:] == '.txt':
noise = ascii.read(noise_curve)
else:
cfd = os.path.dirname(os.path.abspath(__file__))
noise = ascii.read(cfd + '/noise_curves/' + noise_curve + '.txt')
# read it in
f_n = np.asarray(noise['f'])
amp_n = np.asarray(noise[noise_type_in])
if noise_type_in != noise_type_out:
amp_n = globals()[noise_type_in.lower() + '_to_' + noise_type_out.lower()](f_n, amp_n)
# add wd_noise if true
if add_wd_noise:
if wd_noise_type_in not in possible_noise_types:
raise ValueError('wd_noise_type_in must be either ASD, PSD, or char_strain.')
if wd_noise[-4:] == '.txt':
wd_data = ascii.read(wd_noise)
else:
cfd = os.path.dirname(os.path.abspath(__file__))
wd_data = ascii.read(cfd + '/noise_curves/' + wd_noise + '.txt')
f_n_wd = np.asarray(wd_data['f'])
amp_n_wd = np.asarray(wd_data[wd_noise_type_in])
if wd_noise_type_in != noise_type_out:
amp_n_wd = globals()[noise_type_in.lower()
+ '_to_' + noise_type_out.lower()](f_n_wd, amp_n_wd)
f_n, amp_n = combine_with_wd_noise(f_n, amp_n, f_n_wd, amp_n_wd)
return f_n, amp_n | python | {
"resource": ""
} |
q40072 | combine_with_wd_noise | train | def combine_with_wd_noise(f_n, amp_n, f_n_wd, amp_n_wd):
"""Combine noise with wd noise.
Combines noise and white dwarf background noise based on greater
amplitude value at each noise curve step.
Args:
f_n (float array): Frequencies of noise curve.
amp_n (float array): Amplitude values of noise curve.
f_n_wd (float array): Frequencies of wd noise.
amp_n_wd (float array): Amplitude values of wd noise.
Returns:
(tuple of float arrays): Amplitude values of combined noise curve.
"""
# interpolate wd noise
amp_n_wd_interp = interpolate.interp1d(f_n_wd, amp_n_wd, bounds_error=False, fill_value=1e-30)
# find points of wd noise amplitude at noise curve frequencies
amp_n_wd = amp_n_wd_interp(f_n)
# keep the greater value at each frequency
amp_n = amp_n*(amp_n >= amp_n_wd) + amp_n_wd*(amp_n < amp_n_wd)
return f_n, amp_n | python | {
"resource": ""
} |
q40073 | show_available_noise_curves | train | def show_available_noise_curves(return_curves=True, print_curves=False):
"""List available sensitivity curves
This function lists the available sensitivity curve strings in noise_curves folder.
Args:
return_curves (bool, optional): If True, return a list of curve options.
print_curves (bool, optional): If True, print each curve option.
Returns:
(optional list of str): List of curve options.
Raises:
ValueError: Both args are False.
"""
if return_curves is False and print_curves is False:
raise ValueError("Both return curves and print_curves are False."
+ " You will not see the options")
cfd = os.path.dirname(os.path.abspath(__file__))
curves = [curve.split('.')[0] for curve in os.listdir(cfd + '/noise_curves/')]
if print_curves:
for f in curves:
print(f)
if return_curves:
return curves
return | python | {
"resource": ""
} |
q40074 | _split_out_of_braces | train | def _split_out_of_braces(s):
"""Generator to split comma seperated string, but not split commas inside
curly braces.
>>> list(_split_out_of_braces("py{26, 27}-django{15, 16}, py32"))
>>>['py{26, 27}-django{15, 16}, py32']
"""
prev = 0
for m in re.finditer(r"{[^{}]*}|\s*,\s*", s):
if not m.group().startswith("{"):
part = s[prev:m.start()]
if part:
yield s[prev:m.start()]
prev = m.end()
part = s[prev:]
if part:
yield part | python | {
"resource": ""
} |
q40075 | expand_factor_conditions | train | def expand_factor_conditions(s, env):
"""If env matches the expanded factor then return value else return ''.
Example
-------
>>> s = 'py{33,34}: docformatter'
>>> expand_factor_conditions(s, Env(name="py34", ...))
"docformatter"
>>> expand_factor_conditions(s, Env(name="py26", ...))
""
"""
try:
factor, value = re.split(r'\s*\:\s*', s)
except ValueError:
return s
if matches_factor_conditions(factor, env):
return value
else:
return '' | python | {
"resource": ""
} |
q40076 | replace_braces | train | def replace_braces(s, env):
"""Makes tox substitutions to s, with respect to environment env.
Example
-------
>>> replace_braces("echo {posargs:{env:USER:} passed no posargs}")
"echo andy passed no posargs"
Note: first "{env:USER:}" is replaced with os.environ.get("USER", ""),
the "{posargs:andy}" is replaced with "andy" (since no posargs were
passed).
"""
def replace(m):
return _replace_match(m, env)
for _ in range(DEPTH):
s = re.sub(r"{[^{}]*}", replace, s)
return s | python | {
"resource": ""
} |
q40077 | _replace_match | train | def _replace_match(m, env):
"""Given a match object, having matched something inside curly braces,
replace the contents if matches one of the supported tox-substitutions."""
# ditch the curly braces
s = m.group()[1:-1].strip()
try:
# get the env attributes e.g. envpython or toxinidir.
# Note: if you ask for a env methodname this will raise
# later on... so don't do that.
return getattr(env, s)
except AttributeError:
pass
for r in [_replace_envvar, _replace_config, _replace_posargs]:
try:
return r(s, env)
except ValueError:
pass
raise NotImplementedError("{%s} not understood in tox.ini file." % s) | python | {
"resource": ""
} |
q40078 | csnr | train | def csnr(freqs, hc, hn, fmrg, fpeak, prefactor=1.0):
"""Calculate the SNR of a frequency domain waveform.
SNRCalculation is a function that takes waveforms (frequencies and hcs)
and a noise curve, and returns SNRs for all binary phases and the whole waveform.
Arguments:
freqs (1D or 2D array of floats): Frequencies corresponding to the waveforms.
Shape is (num binaries, num_points) if 2D.
Shape is (num_points,) if 1D for one binary.
hc (1D or 2D array of floats): Characteristic strain of the waveforms.
Shape is (num binaries, num_points) if 2D.
Shape is (num_points,) if 1D for one binary.
fmrg: (scalar float or 1D array of floats): Merger frequency of each binary separating
inspiral from merger phase. (0.014/M) Shape is (num binaries,)
if more than one binary.
fpeak: (scalar float or 1D array of floats): Peak frequency of each binary separating
merger from ringdown phase. (0.014/M) Shape is (num binaries,)
if more than one binary.
hn: (1D or 2D array of floats): Characteristic strain of the noise.
Shape is (num binaries, num_points) if 2D.
Shape is (num_points,) if 1D for one binary.
prefactor (float, optional): Factor to multiply snr (not snr^2) integral values by.
Default is 1.0.
Returns:
(dict): Dictionary with SNRs from each phase.
"""
cfd = os.path.dirname(os.path.abspath(__file__))
if 'phenomd.cpython-35m-darwin.so' in os.listdir(cfd):
exec_call = cfd + '/phenomd.cpython-35m-darwin.so'
else:
exec_call = cfd + '/phenomd/phenomd.so'
c_obj = ctypes.CDLL(exec_call)
# check dimensionality
remove_axis = False
try:
len(fmrg)
except TypeError:
remove_axis = True
freqs, hc = np.array([freqs]), np.array([hc])
hn, fmrg, fpeak = np.array([hn]), np.array([fmrg]), np.array([fpeak])
# this implimentation in ctypes works with 1D arrays
freqs_in = freqs.flatten()
hc_in = hc.flatten()
hn_in = hn.flatten()
num_binaries, length_of_signal = hc.shape
# prepare outout arrays
snr_cast = ctypes.c_double*num_binaries
snr_all = snr_cast()
snr_ins = snr_cast()
snr_mrg = snr_cast()
snr_rd = snr_cast()
# find SNR values
c_obj.SNR_function(ctypes.byref(snr_all), ctypes.byref(snr_ins),
ctypes.byref(snr_mrg), ctypes.byref(snr_rd),
freqs_in.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
hc_in.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
hn_in.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
fmrg.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
fpeak.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
ctypes.c_int(length_of_signal), ctypes.c_int(num_binaries))
# make into numpy arrays
snr_all, snr_ins, = np.ctypeslib.as_array(snr_all), np.ctypeslib.as_array(snr_ins)
snr_mrg, snr_rd = np.ctypeslib.as_array(snr_mrg), np.ctypeslib.as_array(snr_rd)
# remove axis if one binary
if remove_axis:
snr_all, snr_ins, snr_mrg, snr_rd = snr_all[0], snr_ins[0], snr_mrg[0], snr_rd[0]
# prepare output by multiplying by prefactor
return ({'all': snr_all*prefactor, 'ins': snr_ins*prefactor,
'mrg': snr_mrg*prefactor, 'rd': snr_rd*prefactor}) | python | {
"resource": ""
} |
q40079 | Linear.Areml_eigh | train | def Areml_eigh(self):
"""compute the eigenvalue decomposition of Astar"""
s,U = LA.eigh(self.Areml(),lower=True)
i_pos = (s>1e-10)
s = s[i_pos]
U = U[:,i_pos]
return s,U | python | {
"resource": ""
} |
q40080 | Linear.getGradient | train | def getGradient(self,j):
""" get rotated gradient for fixed effect i """
i = int(self.indicator['term'][j])
r = int(self.indicator['row'][j])
c = int(self.indicator['col'][j])
rv = -np.kron(self.Fstar()[i][:,[r]],self.Astar()[i][[c],:])
return rv | python | {
"resource": ""
} |
q40081 | Linear.XstarT_dot | train | def XstarT_dot(self,M):
""" get dot product of Xhat and M """
if 0:
#TODO: implement this properly
pass
else:
RV = np.dot(self.Xstar().T,M)
return RV | python | {
"resource": ""
} |
q40082 | Linear.getResiduals | train | def getResiduals(self):
""" regress out fixed effects and results residuals """
X = np.zeros((self.N*self.P,self.n_fixed_effs))
ip = 0
for i in range(self.n_terms):
Ki = self.A[i].shape[0]*self.F[i].shape[1]
X[:,ip:ip+Ki] = np.kron(self.A[i].T,self.F[i])
ip += Ki
y = np.reshape(self.Y,(self.Y.size,1),order='F')
RV = regressOut(y,X)
RV = np.reshape(RV,self.Y.shape,order='F')
return RV | python | {
"resource": ""
} |
q40083 | Linear._set_toChange | train | def _set_toChange(x):
""" set variables in list x toChange """
for key in list(x.keys()):
self.toChange[key] = True | python | {
"resource": ""
} |
q40084 | bread | train | def bread(stream):
""" Decode a file or stream to an object.
"""
if hasattr(stream, "read"):
return bdecode(stream.read())
else:
handle = open(stream, "rb")
try:
return bdecode(handle.read())
finally:
handle.close() | python | {
"resource": ""
} |
q40085 | bwrite | train | def bwrite(stream, obj):
""" Encode a given object to a file or stream.
"""
handle = None
if not hasattr(stream, "write"):
stream = handle = open(stream, "wb")
try:
stream.write(bencode(obj))
finally:
if handle:
handle.close() | python | {
"resource": ""
} |
q40086 | Encoder.encode | train | def encode(self, obj):
""" Add the given object to the result.
"""
if isinstance(obj, int_like_types):
self.result.append("i%de" % obj)
elif isinstance(obj, string_types):
self.result.extend([str(len(obj)), ':', str(obj)])
elif hasattr(obj, "__bencode__"):
self.encode(obj.__bencode__())
elif hasattr(obj, "items"):
# Dictionary
self.result.append('d')
for key, val in sorted(obj.items()):
key = str(key)
self.result.extend([str(len(key)), ':', key])
self.encode(val)
self.result.append('e')
else:
# Treat as iterable
try:
items = iter(obj)
except TypeError as exc:
raise BencodeError("Unsupported non-iterable object %r of type %s (%s)" % (
obj, type(obj), exc
))
else:
self.result.append('l')
for item in items:
self.encode(item)
self.result.append('e')
return self.result | python | {
"resource": ""
} |
q40087 | calc_delta_c | train | def calc_delta_c(c200):
"""Calculate characteristic overdensity from concentration.
Parameters
----------
c200 : ndarray or float
Cluster concentration parameter.
Returns
----------
ndarray or float
Cluster characteristic overdensity, of same type as c200.
"""
top = (200. / 3.) * c200**3.
bottom = np.log(1. + c200) - (c200 / (1. + c200))
return (top / bottom) | python | {
"resource": ""
} |
q40088 | ClusterEnsemble.show | train | def show(self, notebook=notebook_display):
"""Display cluster properties and scaling relation parameters."""
print("\nCluster Ensemble:")
if notebook is True:
display(self._df)
elif notebook is False:
print(self._df)
self.massrich_parameters() | python | {
"resource": ""
} |
q40089 | ClusterEnsemble.calc_nfw | train | def calc_nfw(self, rbins, offsets=None, numTh=200, numRoff=200,
numRinner=20, factorRouter=3):
"""Calculates Sigma and DeltaSigma profiles.
Generates the surface mass density (sigma_nfw attribute of parent
object) and differential surface mass density (deltasigma_nfw
attribute of parent object) profiles of each cluster, assuming a
spherical NFW model. Optionally includes the effect of cluster
miscentering offsets.
Parameters
----------
rbins : array_like
Radial bins (in Mpc) for calculating cluster profiles. Should
be 1D, optionally with astropy.units of Mpc.
offsets : array_like, optional
Parameter describing the width (in Mpc) of the Gaussian
distribution of miscentering offsets. Should be 1D, optionally
with astropy.units of Mpc.
Other Parameters
-------------------
numTh : int, optional
Parameter to pass to SurfaceMassDensity(). Number of bins to
use for integration over theta, for calculating offset profiles
(no effect for offsets=None). Default 200.
numRoff : int, optional
Parameter to pass to SurfaceMassDensity(). Number of bins to
use for integration over R_off, for calculating offset profiles
(no effect for offsets=None). Default 200.
numRinner : int, optional
Parameter to pass to SurfaceMassDensity(). Number of bins at
r < min(rbins) to use for integration over Sigma(<r), for
calculating DeltaSigma (no effect for Sigma ever, and no effect
for DeltaSigma if offsets=None). Default 20.
factorRouter : int, optional
Parameter to pass to SurfaceMassDensity(). Factor increase over
number of rbins, at min(r) < r < max(r), of bins that will be
used at for integration over Sigma(<r), for calculating
DeltaSigma (no effect for Sigma, and no effect for DeltaSigma
if offsets=None). Default 3.
"""
if offsets is None:
self._sigoffset = np.zeros(self.number) * units.Mpc
else:
self._sigoffset = utils.check_units_and_type(offsets, units.Mpc,
num=self.number)
self.rbins = utils.check_units_and_type(rbins, units.Mpc)
rhoc = self._rho_crit.to(units.Msun / units.pc**2 / units.Mpc)
smd = SurfaceMassDensity(self.rs, self.delta_c, rhoc,
offsets=self._sigoffset,
rbins=self.rbins,
numTh=numTh,
numRoff=numRoff,
numRinner=numRinner,
factorRouter=factorRouter)
self.sigma_nfw = smd.sigma_nfw()
self.deltasigma_nfw = smd.deltasigma_nfw() | python | {
"resource": ""
} |
q40090 | delaunay_graph | train | def delaunay_graph(X, weighted=False):
'''Delaunay triangulation graph.
'''
e1, e2 = _delaunay_edges(X)
pairs = np.column_stack((e1, e2))
w = paired_distances(X[e1], X[e2]) if weighted else None
return Graph.from_edge_pairs(pairs, num_vertices=X.shape[0], symmetric=True,
weights=w) | python | {
"resource": ""
} |
q40091 | FileReadOut.hdf5_read_out | train | def hdf5_read_out(self):
"""Read out an hdf5 file.
Takes the output of :class:`gwsnrcalc.genconutils.genprocess.GenProcess`
and reads it out to an HDF5 file.
"""
with h5py.File(self.WORKING_DIRECTORY + '/' + self.output_file_name, 'w') as f:
header = f.create_group('header')
header.attrs['Title'] = 'Generated SNR Out'
header.attrs['Author'] = 'Generator by: Michael Katz'
header.attrs['Date/Time'] = str(datetime.datetime.now())
for which in ['x', 'y']:
header.attrs[which + 'val_name'] = getattr(self, which + 'val_name')
header.attrs['num_' + which + '_pts'] = getattr(self, 'num_' + which)
ecc = 'eccentricity' in self.__dict__
if ecc:
name_list = ['observation_time', 'start_frequency', 'start_separation'
'eccentricity']
else:
name_list = ['spin_1', 'spin_2', 'spin', 'end_time']
name_list += ['total_mass', 'mass_ratio', 'start_time', 'luminosity_distance',
'comoving_distance', 'redshift']
for name in name_list:
if name != self.xval_name and name != self.yval_name:
try:
getattr(self, name)
header.attrs[name] = getattr(self, name)
except AttributeError:
pass
if self.added_note != '':
header.attrs['Added note'] = self.added_note
data = f.create_group('data')
# read out x,y values in compressed data set
dset = data.create_dataset(self.x_col_name, data=self.xvals,
dtype='float64', chunks=True,
compression='gzip', compression_opts=9)
dset = data.create_dataset(self.y_col_name, data=self.yvals,
dtype='float64', chunks=True,
compression='gzip', compression_opts=9)
# read out all datasets
for key in self.output_dict.keys():
dset = data.create_dataset(key, data=self.output_dict[key],
dtype='float64', chunks=True,
compression='gzip', compression_opts=9) | python | {
"resource": ""
} |
q40092 | FileReadOut.txt_read_out | train | def txt_read_out(self):
"""Read out txt file.
Takes the output of :class:`gwsnrcalc.genconutils.genprocess.GenProcess`
and reads it out to a txt file.
"""
header = '#Generated SNR Out\n'
header += '#Generator by: Michael Katz\n'
header += '#Date/Time: {}\n'.format(datetime.datetime.now())
for which in ['x', 'y']:
header += '#' + which + 'val_name: {}\n'.format(getattr(self, which + 'val_name'))
header += '#num_' + which + '_pts: {}\n'.format(getattr(self, 'num_' + which))
ecc = 'eccentricity' in self.__dict__
if ecc:
name_list = ['observation_time', 'start_frequency', 'start_separation'
'eccentricity']
else:
name_list = ['spin_1', 'spin_2', 'spin', 'end_time']
name_list += ['total_mass', 'mass_ratio', 'start_time', 'luminosity_distance',
'comoving_distance', 'redshift']
for name in name_list:
if name != self.xval_name and name != self.yval_name:
try:
getattr(self, name)
header += '#{}: {}\n'.format(name, getattr(self, name))
except AttributeError:
pass
if self.added_note != '':
header += '#Added note: ' + self.added_note + '\n'
else:
header += '#Added note: None\n'
header += '#--------------------\n'
header += self.x_col_name + '\t'
header += self.y_col_name + '\t'
for key in self.output_dict.keys():
header += key + '\t'
# read out x,y and the data
x_and_y = np.asarray([self.xvals, self.yvals])
snr_out = np.asarray([self.output_dict[key] for key in self.output_dict.keys()]).T
data_out = np.concatenate([x_and_y.T, snr_out], axis=1)
np.savetxt(self.WORKING_DIRECTORY + '/' + self.output_file_name,
data_out, delimiter='\t', header=header, comments='')
return | python | {
"resource": ""
} |
q40093 | trigger_function_installed | train | def trigger_function_installed(connection: connection):
"""Test whether or not the psycopg2-pgevents trigger function is installed.
Parameters
----------
connection: psycopg2.extensions.connection
Active connection to a PostGreSQL database.
Returns
-------
bool
True if the trigger function is installed, otherwise False.
"""
installed = False
log('Checking if trigger function installed...', logger_name=_LOGGER_NAME)
try:
execute(connection, "SELECT pg_get_functiondef('public.psycopg2_pgevents_create_event'::regproc);")
installed = True
except ProgrammingError as e:
if e.args:
error_stdout = e.args[0].splitlines()
error = error_stdout.pop(0)
if error.endswith('does not exist'):
# Trigger function not installed
pass
else:
# Some other exception; re-raise
raise e
else:
# Some other exception; re-raise
raise e
log('...{}installed'.format('' if installed else 'NOT '), logger_name=_LOGGER_NAME)
return installed | python | {
"resource": ""
} |
q40094 | trigger_installed | train | def trigger_installed(connection: connection, table: str, schema: str='public'):
"""Test whether or not a psycopg2-pgevents trigger is installed for a table.
Parameters
----------
connection: psycopg2.extensions.connection
Active connection to a PostGreSQL database.
table: str
Table whose trigger-existence will be checked.
schema: str
Schema to which the table belongs.
Returns
-------
bool
True if the trigger is installed, otherwise False.
"""
installed = False
log('Checking if {}.{} trigger installed...'.format(schema, table), logger_name=_LOGGER_NAME)
statement = SELECT_TRIGGER_STATEMENT.format(
table=table,
schema=schema
)
result = execute(connection, statement)
if result:
installed = True
log('...{}installed'.format('' if installed else 'NOT '), logger_name=_LOGGER_NAME)
return installed | python | {
"resource": ""
} |
q40095 | install_trigger_function | train | def install_trigger_function(connection: connection, overwrite: bool=False) -> None:
"""Install the psycopg2-pgevents trigger function against the database.
Parameters
----------
connection: psycopg2.extensions.connection
Active connection to a PostGreSQL database.
overwrite: bool
Whether or not to overwrite existing installation of psycopg2-pgevents
trigger function, if existing installation is found.
Returns
-------
None
"""
prior_install = False
if not overwrite:
prior_install = trigger_function_installed(connection)
if not prior_install:
log('Installing trigger function...', logger_name=_LOGGER_NAME)
execute(connection, INSTALL_TRIGGER_FUNCTION_STATEMENT)
else:
log('Trigger function already installed; skipping...', logger_name=_LOGGER_NAME) | python | {
"resource": ""
} |
q40096 | uninstall_trigger_function | train | def uninstall_trigger_function(connection: connection, force: bool=False) -> None:
"""Uninstall the psycopg2-pgevents trigger function from the database.
Parameters
----------
connection: psycopg2.extensions.connection
Active connection to a PostGreSQL database.
force: bool
If True, force the un-registration even if dependent triggers are still
installed. If False, if there are any dependent triggers for the trigger
function, the un-registration will fail.
Returns
-------
None
"""
modifier = ''
if force:
modifier = 'CASCADE'
log('Uninstalling trigger function (cascade={})...'.format(force), logger_name=_LOGGER_NAME)
statement = UNINSTALL_TRIGGER_FUNCTION_STATEMENT.format(modifier=modifier)
execute(connection, statement) | python | {
"resource": ""
} |
q40097 | install_trigger | train | def install_trigger(connection: connection, table: str, schema: str='public', overwrite: bool=False) -> None:
"""Install a psycopg2-pgevents trigger against a table.
Parameters
----------
connection: psycopg2.extensions.connection
Active connection to a PostGreSQL database.
table: str
Table for which the trigger should be installed.
schema: str
Schema to which the table belongs.
overwrite: bool
Whether or not to overwrite existing installation of trigger for the
given table, if existing installation is found.
Returns
-------
None
"""
prior_install = False
if not overwrite:
prior_install = trigger_installed(connection, table, schema)
if not prior_install:
log('Installing {}.{} trigger...'.format(schema, table), logger_name=_LOGGER_NAME)
statement = INSTALL_TRIGGER_STATEMENT.format(
schema=schema,
table=table
)
execute(connection, statement)
else:
log('{}.{} trigger already installed; skipping...'.format(schema, table), logger_name=_LOGGER_NAME) | python | {
"resource": ""
} |
q40098 | uninstall_trigger | train | def uninstall_trigger(connection: connection, table: str, schema: str='public') -> None:
"""Uninstall a psycopg2-pgevents trigger from a table.
Parameters
----------
connection: psycopg2.extensions.connection
Active connection to a PostGreSQL database.
table: str
Table for which the trigger should be uninstalled.
schema: str
Schema to which the table belongs.
Returns
-------
None
"""
log('Uninstalling {}.{} trigger...'.format(schema, table), logger_name=_LOGGER_NAME)
statement = UNINSTALL_TRIGGER_STATEMENT.format(
schema=schema,
table=table
)
execute(connection, statement) | python | {
"resource": ""
} |
q40099 | absolutify | train | def absolutify(url):
"""Takes a URL and prepends the SITE_URL"""
site_url = getattr(settings, 'SITE_URL', False)
# If we don't define it explicitly
if not site_url:
protocol = settings.PROTOCOL
hostname = settings.DOMAIN
port = settings.PORT
if (protocol, port) in (('https://', 443), ('http://', 80)):
site_url = ''.join(map(str, (protocol, hostname)))
else:
site_url = ''.join(map(str, (protocol, hostname, ':', port)))
return site_url + url | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.