Search is not available for this dataset
text stringlengths 75 104k |
|---|
def _string_width(self, s):
"""Get width of a string in the current font"""
s = str(s)
w = 0
for i in s:
w += self.character_widths[i]
return w * self.font_size / 1000.0 |
def getCellVertexes(self, i, j):
"""
Edge coordinates of an hexagon centered in (x,y) having a side of d:
[x-d/2, y+sqrt(3)*d/2] [x+d/2, y+sqrt(3)*d/2]
[x-d, y] [x+d, y]
[x-d/2, y-sqrt(3)*d/2] [x+d/2, y-sqrt(3)*d/2]
"""
# Using unrotated centroid coordinates to avoid an extra computation
x,y = self._getUnrotatedCellCentroidCoords(i, j)
return [
self.rotatePoint(x - self._side, y ),
self.rotatePoint(x - self._side / 2.0, y - self._hexPerp ),
self.rotatePoint(x + self._side / 2.0, y - self._hexPerp ),
self.rotatePoint(x + self._side, y ),
self.rotatePoint(x + self._side / 2.0, y + self._hexPerp ),
self.rotatePoint(x - self._side / 2.0, y + self._hexPerp ),
] |
def rotatePoint(self, pointX, pointY):
"""
Rotates a point relative to the mesh origin by the angle specified in the angle property.
Uses the angle formed between the segment linking the point of interest to the origin and
the parallel intersecting the origin. This angle is called beta in the code.
"""
if(self.angle == 0 or self.angle == None):
return(pointX, pointY)
# 1. Compute the segment length
length = math.sqrt((pointX - self.xll) ** 2 + (pointY - self.yll) ** 2)
# 2. Compute beta
beta = math.acos((pointX - self.xll) / length)
if(pointY < self.yll):
beta = math.pi * 2 - beta
# 3. Compute offsets
offsetX = math.cos(beta) * length - math.cos(self._angle_rd + beta) * length
offsetY = math.sin(self._angle_rd + beta) * length - math.sin(beta) * length
return (pointX - offsetX, pointY + offsetY) |
def set_information(self, title=None, subject=None, author=None, keywords=None, creator=None):
""" Convenience function to add property info, can set any attribute and leave the others blank, it won't over-write
previously set items. """
info_dict = {"title": title, "subject": subject,
"author": author, "keywords": keywords,
"creator": creator}
for att, value in info_dict.iteritems():
if hasattr(self, att):
if value:
setattr(self, att, value)
else:
setattr(self, att, None) |
def set_display_mode(self, zoom='fullpage', layout='continuous'):
""" Set the default viewing options. """
self.zoom_options = ["fullpage", "fullwidth", "real", "default"]
self.layout_options = ["single", "continuous", "two", "default"]
if zoom in self.zoom_options or (isinstance(zoom, int) and 0 < zoom <= 100):
self.zoom_mode = zoom
else:
raise Exception('Incorrect zoom display mode: ' + zoom)
if layout in self.layout_options:
self.layout_mode = layout
else:
raise Exception('Incorrect layout display mode: ' + layout) |
def close(self):
""" Prompt the objects to output pdf code, and save to file. """
self.document._set_page_numbers()
# Places header, pages, page content first.
self._put_header()
self._put_pages()
self._put_resources()
# Information object
self._put_information()
# Catalog object
self._put_catalog()
# Cross-reference object
#self._put_cross_reference()
# Trailer object
self._put_trailer()
if hasattr(self.destination, "write"):
output = self._output_to_io()
elif self.destination == 'string':
output = self._output_to_string()
else:
self._output_to_file()
output = None
return output |
def _put_header(self):
""" Standard first line in a PDF. """
self.session._out('%%PDF-%s' % self.pdf_version)
if self.session.compression:
self.session.buffer += '%' + chr(235) + chr(236) + chr(237) + chr(238) + "\n" |
def _put_pages(self):
""" First, the Document object does the heavy-lifting for the
individual page objects and content.
Then, the overall "Pages" object is generated.
"""
self.document._get_orientation_changes()
self.document._output_pages()
# Pages Object, provides reference to page objects (Kids list).
self.session._add_object(1)
self.session._out('<</Type /Pages')
kids = '/Kids ['
for i in xrange(0, len(self.document.pages)):
kids += str(3 + 2 * i) + ' 0 R '
self.session._out(kids + ']')
self.session._out('/Count %s' % len(self.document.pages))
# Overall size of the default PDF page
self.session._out('/MediaBox [0 0 %.2f %.2f]' %
(self.document.page.width,
self.document.page.height))
self.session._out('>>')
self.session._out('endobj') |
def _put_resource_dict(self):
""" Creates PDF reference to resource objects.
"""
self.session._add_object(2)
self.session._out('<<')
self.session._out('/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]')
self.session._out('/Font <<')
for font in self.document.fonts:
self.session._out('/F%s %s 0 R' % (font.index, font.number))
self.session._out('>>')
if self.document.images:
self.session._out('/XObject <<')
for image in self.document.images:
self.session._out('/I%s %s 0 R' % (image.index, image.number))
self.session._out('>>')
self.session._out('>>')
self.session._out('endobj') |
def _put_information(self):
"""PDF Information object."""
self.session._add_object()
self.session._out('<<')
self.session._out('/Producer ' + self._text_to_string(
'PDFLite, https://github.com/katerina7479'))
if self.title:
self.session._out('/Title ' + self._text_to_string(self.title))
if self.subject:
self.session._out('/Subject ' + self._text_to_string(self.subject))
if self.author:
self.session._out('/Author ' + self._text_to_string(self.author))
if self.keywords:
self.session._out('/Keywords ' +
self._text_to_string(self.keywords))
if self.creator:
self.session._out('/Creator ' + self._text_to_string(self.creator))
self.session._out('/CreationDate ' + self._text_to_string(
'D:' + datetime.now().strftime('%Y%m%d%H%M%S')))
self.session._out('>>')
self.session._out('endobj') |
def _put_catalog(self):
"""Catalog object."""
self.session._add_object()
self.session._out('<<')
self.session._out('/Type /Catalog')
self.session._out('/Pages 1 0 R')
if self.zoom_mode == 'fullpage':
self.session._out('/OpenAction [3 0 R /Fit]')
elif self.zoom_mode == 'fullwidth':
self.session._out('/OpenAction [3 0 R /FitH null]')
elif self.zoom_mode == 'real':
self.session._out('/OpenAction [3 0 R /XYZ null null 1]')
elif not isinstance(self.zoom_mode, basestring):
self.session._out(
'/OpenAction [3 0 R /XYZ null null ' +
(self.zoom_mode / 100) + ']')
if self.layout_mode == 'single':
self.session._out('/PageLayout /SinglePage')
elif self.layout_mode == 'continuous':
self.session._out('/PageLayout /OneColumn')
elif self.layout_mode == 'two':
self.session._out('/PageLayout /TwoColumnLeft')
self.session._out('>>')
self.session._out('endobj') |
def _put_cross_reference(self):
""" Cross Reference Object, calculates
the position in bytes to the start
(first number) of each object in
order by number (zero is special)
from the beginning of the file.
"""
self.session._out('xref')
self.session._out('0 %s' % len(self.session.objects))
self.session._out('0000000000 65535 f ')
for obj in self.session.objects:
if isinstance(obj, basestring):
pass
else:
self.session._out('%010d 00000 n ' % obj.offset) |
def _put_trailer(self):
""" Final Trailer calculations, and end-of-file
reference.
"""
startxref = len(self.session.buffer)
self._put_cross_reference()
md5 = hashlib.md5()
md5.update(datetime.now().strftime('%Y%m%d%H%M%S'))
try:
md5.update(self.filepath)
except TypeError:
pass
if self.title:
md5.update(self.title)
if self.subject:
md5.update(self.subject)
if self.author:
md5.update(self.author)
if self.keywords:
md5.update(self.keywords)
if self.creator:
md5.update(self.creator)
objnum = len(self.session.objects)
self.session._out('trailer')
self.session._out('<<')
self.session._out('/Size %s' % objnum)
self.session._out('/Root %s 0 R' % (objnum - 1))
self.session._out('/Info %s 0 R' % (objnum - 2))
self.session._out('/ID [ <%s> <%s>]' % (md5.hexdigest(),md5.hexdigest()))
self.session._out('>>')
self.session._out('startxref')
self.session._out(startxref)
self.session._out('%%EOF') |
def _output_to_file(self):
""" Save to filepath specified on
init. (Will throw an error if
the document is already open).
"""
f = open(self.filepath, 'wb')
if not f:
raise Exception('Unable to create output file: ', self.filepath)
f.write(self.session.buffer)
f.close() |
def _text_to_string(self, text):
""" Provides for escape characters and converting to
pdf text object (pdf strings are in parantheses).
Mainly for use in the information block here, this
functionality is also present in the text object.
"""
if text:
for i,j in [("\\","\\\\"),(")","\\)"),("(", "\\(")]:
text = text.replace(i, j)
text = "(%s)" % text
else:
text = 'None'
return text |
def floyd(seqs, f=None, start=None, key=lambda x: x):
"""Floyd's Cycle Detector.
See help(cycle_detector) for more context.
Args:
*args: Two iterators issueing the exact same sequence:
-or-
f, start: Function and starting state for finite state machine
Yields:
Values yielded by sequence_a if it terminates, undefined if a
cycle is found.
Raises:
CycleFound if exception is found; if called with f and `start`,
the parametres `first` and `period` will be defined indicating
the offset of start of the cycle and the cycle's period.
"""
tortise, hare = seqs
yield hare.next()
tortise_value = tortise.next()
hare_value = hare.next()
while hare_value != tortise_value:
yield hare_value
yield hare.next()
hare_value = hare.next()
tortise_value = tortise.next()
if f is None:
raise CycleDetected()
hare_value = f(hare_value)
first = 0
tortise_value = start
while key(tortise_value) != key(hare_value):
tortise_value = f(tortise_value)
hare_value = f(hare_value)
first += 1
period = 1
hare_value = f(tortise_value)
while key(tortise_value) != key(hare_value):
hare_value = f(hare_value)
period += 1
raise CycleDetected(period=period, first=first) |
def naive(seqs, f=None, start=None, key=lambda x: x):
"""Naive cycle detector
See help(cycle_detector) for more context.
Args:
sequence: A sequence to detect cyles in.
f, start: Function and starting state for finite state machine
Yields:
Values yielded by sequence_a if it terminates, undefined if a
cycle is found.
Raises:
CycleFound if exception is found. Will always generate a first
and period value no matter which of the `seqs` or `f` interface
is used.
"""
history = {}
for step, value in enumerate(seqs[0]):
keyed = key(value)
yield value
if keyed in history:
raise CycleDetected(
first=history[keyed], period=step - history[keyed])
history[keyed] = step |
def gosper(seqs, f=None, start=None, key=lambda x: x):
"""Gosper's cycle detector
See help(cycle_detector) for more context.
Args:
sequence: A sequence to detect cyles in.
f, start: Function and starting state for finite state machine
Yields:
Values yielded by sequence_a if it terminates, undefined if a
cycle is found.
Raises:
CycleFound if exception is found. Unlike Floyd and Brent's,
Gosper's can only detect period of a cycle. It cannot
compute the first position
"""
tab = []
for c, value in enumerate(seqs[0], start=1):
yield value
try:
e = tab.index(key(value))
raise CycleDetected(
period=c - ((((c >> e) - 1) | 1) << e))
except ValueError:
try:
tab[(c ^ (c - 1)).bit_length() - 1] = key(value)
except IndexError:
tab.append(value) |
def brent(seqs, f=None, start=None, key=lambda x: x):
"""Brent's Cycle Detector.
See help(cycle_detector) for more context.
Args:
*args: Two iterators issueing the exact same sequence:
-or-
f, start: Function and starting state for finite state machine
Yields:
Values yielded by sequence_a if it terminates, undefined if a
cycle is found.
Raises:
CycleFound if exception is found; if called with f and `start`,
the parametres `first` and `period` will be defined indicating
the offset of start of the cycle and the cycle's period.
"""
power = period = 1
tortise, hare = seqs
yield hare.next()
tortise_value = tortise.next()
hare_value = hare.next()
while key(tortise_value) != key(hare_value):
yield hare_value
if power == period:
power *= 2
period = 0
if f:
tortise = f_generator(f, hare_value)
tortise_value = tortise.next()
else:
while tortise_value != hare_value:
tortise_value = tortise.next()
hare_value = hare.next()
period += 1
if f is None:
raise CycleDetected()
first = 0
tortise_value = hare_value = start
for _ in xrange(period):
hare_value = f(hare_value)
while key(tortise_value) != key(hare_value):
tortise_value = f(tortise_value)
hare_value = f(hare_value)
first += 1
raise CycleDetected(period=period, first=first) |
def x_fit(self, test_length):
""" Test to see if the line can has enough space for the given length. """
if (self.x + test_length) >= self.xmax:
return False
else:
return True |
def y_fit(self, test_length):
""" Test to see if the page has enough space for the given text height. """
if (self.y + test_length) >= self.ymax:
return False
else:
return True |
def x_is_greater_than(self, test_ordinate):
""" Comparison for x coordinate"""
self._is_coordinate(test_ordinate)
if self.x > test_ordinate.x:
return True
else:
return False |
def y_is_greater_than(self, test_ordinate):
"""Comparison for y coordinate"""
self._is_coordinate(test_ordinate)
if self.y > test_ordinate.y:
return True
else:
return False |
def copy(self):
""" Create a copy, and return it."""
new_cursor = self.__class__(self.x, self.y)
new_cursor.set_bounds(self.xmin, self.ymin, self.xmax, self.ymax, self.ymaxmax)
new_cursor.set_deltas(self.dx, self.dy)
return new_cursor |
def x_plus(self, dx=None):
""" Mutable x addition. Defaults to set delta value. """
if dx is None:
self.x += self.dx
else:
self.x = self.x + dx |
def y_plus(self, dy=None):
""" Mutable y addition. Defaults to set delta value. """
if dy is None:
self.y += self.dy
else:
self.y = self.y + dy |
def set_page_size(self, layout):
""" Valid choices: 'a3, 'a4', 'a5', 'letter', 'legal', '11x17'.
"""
self.layout = layout.lower()
if self.layout in self.layout_dict:
self.page_size = self.layout_dict[self.layout]
else:
dimensions = self.layout.split('x')
if len(dimensions) == 2:
self.page_size = (float(dimensions[0]) * 72, float(dimensions[1]) * 72)
else:
raise IndexError("Page is two dimensions, given: %s" % len(dimensions)) |
def _draw(self):
""" Don't use this, use document.draw_table """
self._compile()
self.rows[0]._advance_first_row()
self._set_borders()
self._draw_fill()
self._draw_borders()
self._draw_text()
self._set_final_cursor() |
def create(self, name, description=None, color=None):
"""
Creates a new label and returns the response
:param name: The label name
:type name: str
:param description: An optional description for the label. The name is
used if no description is provided.
:type description: str
:param color: The hex color for the label (ex: 'ff0000' for red). If no
color is provided, a random one will be assigned.
:type color: str
:returns: The response of your post
:rtype: dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
data = {
'name': name,
'title': name,
'description': description or name,
'appearance': {
'color': color or random_color()
}
}
# Yes, it's confusing. the `/tags/` endpoint is used for labels
return self._post(
request=ApiActions.CREATE.value,
uri=ApiUri.TAGS.value,
params=data
) |
def list(self):
"""
Get all current labels
:return: The Logentries API response
:rtype: list of dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
return self._post(
request='list',
uri=ApiUri.TAGS.value,
).get('tags') |
def get(self, name):
"""
Get labels by name
:param name: The label name, it must be an exact match.
:type name: str
:return: A list of matching labels. An empty list is returned if there are
not any matches
:rtype: list of dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
labels = self.list()
return [
label
for label
in labels
if name == label.get('name')
] |
def update(self, label):
"""
Update a Label
:param label: The data to update. Must include keys:
* id (str)
* appearance (dict)
* description (str)
* name (str)
* title (str)
:type label: dict
Example:
.. code-block:: python
Labels().update(
label={
'id': 'd9d4596e-49e4-4135-b3b3-847f9e7c1f43',
'appearance': {'color': '278abe'},
'name': 'My Sandbox',
'description': 'My Sandbox',
'title': 'My Sandbox',
}
)
:return:
:rtype: dict
"""
data = {
'id': label['id'],
'name': label['name'],
'appearance': label['appearance'],
'description': label['description'],
'title': label['title'],
}
return self._post(
request=ApiActions.UPDATE.value,
uri=ApiUri.TAGS.value,
params=data
) |
def delete(self, id):
"""
Delete the specified label
:param id: the label's ID
:type id: str
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
return self._post(
request=ApiActions.DELETE.value,
uri=ApiUri.TAGS.value,
params={'id': id}
) |
def create(self, label_id):
"""
Create a new tag
:param label_id: The Label ID (the 'sn' key of the create label response)
:type label_id: str
:returns: The response of your post
:rtype: dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
data = {
'type': 'tagit',
'rate_count': 0,
'rate_range': 'day',
'limit_count': 0,
'limit_range': 'day',
'schedule': [],
'enabled': True,
'args': {
'sn': label_id,
'tag_sn': label_id
}
}
# Yes, it's confusing. the `/actions/` endpoint is used for tags, while
# the /tags/ endpoint is used for labels.
return self._post(
request=ApiActions.CREATE.value,
uri=ApiUri.ACTIONS.value,
params=data
) |
def list(self):
"""
Get all current tags
:return: All tags
:rtype: list of dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
return list(
filter(
lambda x: x.get('type') == 'tagit', # pragma: no cover
self._post(
request=ApiActions.LIST.value,
uri=ApiUri.ACTIONS.value,
).get('actions')
)
) |
def get(self, label_sn):
"""
Get tags by a label's sn key
:param label_sn: A corresponding label's ``sn`` key.
:type label_sn: str or int
:return: A list of matching tags. An empty list is returned if there are
not any matches
:rtype: list of dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
tags = self.list()
return [
tag
for tag
in tags
if str(label_sn) in tag.get('args', {}).values()
] |
def create(self, name, regexes, tag_ids, logs=None):
"""
Create a hook
:param name: The hook's name (should be the same as the tag)
:type name: str
:param regexes: The list of regular expressions that Logentries expects.
Ex: `['user_agent = /curl\/[\d.]*/']` Would match where the
user-agent is curl.
:type regexes: list of str
:param tag_id: The ids of the tags to associate the hook with.
(The 'id' key of the create tag response)
:type tag_id: list of str
:param logs: The logs to add the hook to. Comes from the 'key'
key in the log dict.
:type logs: list of str
:returns: The response of your post
:rtype: dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
data = {
'name': name,
'triggers': regexes,
'sources': logs or [],
'groups': [],
'actions': tag_ids
}
return self._post(
request=ApiActions.CREATE.value,
uri=ApiUri.HOOKS.value,
params=data
) |
def list(self):
"""
Get all current hooks
:return: All hooks
:rtype: list of dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
return self._post(
request=ApiActions.LIST.value,
uri=ApiUri.HOOKS.value,
).get('hooks') |
def get(self, name_or_tag_id):
"""
Get hooks by name or tag_id.
:param name_or_tag_id: The hook's name or associated tag['id']
:type name_or_tag_id: str
:return: A list of matching tags. An empty list is returned if there are
not any matches
:rtype: list of dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
hooks = self.list()
return [
hook
for hook
in hooks
if name_or_tag_id in hook.get('actions')
or name_or_tag_id == hook.get('name')
] |
def update(self, hook):
"""
Update a hook
:param hook: The data to update. Must include keys:
* id (str)
* name (str)
* triggers (list of str)
* sources (list of str)
* groups (list of str)
* actions (list of str)
:type hook: dict
Example:
.. code-block:: python
Hooks().update(
hook={
'id': 'd9d4596e-49e4-4135-b3b3-847f9e7c1f43',
'name': 'My Sandbox',
'triggers': [
'host = you.example.com'
],
'sources': [
'4d42c719-4005-4929-aa4a-994da4b95040'
],
'groups': [],
'actions': [
'9f6adf69-37b9-4a4b-88fb-c3fc4c781a11',
'ddc36d71-33cb-4f4f-be1b-8591814b1946'
],
}
)
:return:
:rtype: dict
"""
data = {
'id': hook['id'],
'name': hook['name'],
'triggers': hook['triggers'],
'sources': hook['sources'],
'groups': hook['groups'],
'actions': hook['actions'],
}
return self._post(
request=ApiActions.UPDATE.value,
uri=ApiUri.HOOKS.value,
params=data
) |
def create(self,
alert_config,
occurrence_frequency_count=None,
occurrence_frequency_unit=None,
alert_frequency_count=None,
alert_frequency_unit=None):
"""
Create a new alert
:param alert_config: A list of AlertConfig classes (Ex:
``[EmailAlertConfig('me@mydomain.com')]``)
:type alert_config: list of
:class:`PagerDutyAlertConfig<logentries_api.alerts.PagerDutyAlertConfig>`,
:class:`WebHookAlertConfig<logentries_api.alerts.WebHookAlertConfig>`,
:class:`EmailAlertConfig<logentries_api.alerts.EmailAlertConfig>`,
:class:`SlackAlertConfig<logentries_api.alerts.SlackAlertConfig>`, or
:class:`HipChatAlertConfig<logentries_api.alerts.HipChatAlertConfig>`
:param occurrence_frequency_count: How many times per
``alert_frequency_unit`` for a match before issuing an alert.
Defaults to 1
:type occurrence_frequency_count: int
:param occurrence_frequency_unit: The time period to monitor for sending
an alert. Must be 'day', or 'hour'. Defaults to 'hour'
:type occurrence_frequency_unit: str
:param alert_frequency_count: How many times per
``alert_frequency_unit`` to issue an alert. Defaults to 1
:type alert_frequency_count: int
:param alert_frequency_unit: How often to regulate sending alerts.
Must be 'day', or 'hour'. Defaults to 'hour'
:type alert_frequency_unit: str
:returns: The response of your post
:rtype: dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
data = {
'rate_count': occurrence_frequency_count or 1,
'rate_range': occurrence_frequency_unit or 'hour',
'limit_count': alert_frequency_count or 1,
'limit_range': alert_frequency_unit or 'hour',
'schedule': [],
'enabled': True,
}
data.update(alert_config.args())
# Yes, it's confusing. the `/actions/` endpoint is used for alerts, while
# the /tags/ endpoint is used for labels.
return self._post(
request=ApiActions.CREATE.value,
uri=ApiUri.ACTIONS.value,
params=data
) |
def get(self, alert_type, alert_args=None):
"""
Get alerts that match the alert type and args.
:param alert_type: The type of the alert. Must be one of 'pagerduty',
'mailto', 'webhook', 'slack', or 'hipchat'
:type alert_type: str
:param alert_args: The args for the alert. The provided args must be a
subset of the actual alert args. If no args are provided, all
alerts matching the ``alert_type`` are returned. For example:
``.get('mailto', alert_args={'direct': 'me@mydomain.com'})`` or
``.get('slack', {'url': 'https://hooks.slack.com/services...'})``
:return: A list of matching alerts. An empty list is returned if there
are not any matches
:rtype: list of dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
alert_args = alert_args or {}
alerts = self.list()
return [
alert
for alert
in alerts
if alert.get('type') == alert_type
and dict_is_subset(alert_args, alert.get('args'))
] |
def update(self, alert):
"""
Update an alert
:param alert: The data to update. Must include keys:
* id (str)
* rate_count (int)
* rate_range (str): 'day' or 'hour'
* limit_count (int)
* limit_range (str): 'day' or 'hour'
* type (str)
* schedule (list)
* args (dict)
:type alert: dict
Example:
.. code-block:: python
Alert().update(
alert={
'id': 'd9d4596e-49e4-4135-b3b3-847f9e7c1f43',
'args': {'direct': 'you@example.com'},
'rate_count': 1,
'rate_range': 'hour',
'limit_count': 1,
'limit_range': 'hour',
'schedule': [],
'enabled': True,
'type': 'mailto',
}
)
:return:
:rtype: dict
"""
data = {
'id': alert['id'],
'args': alert['args'],
'rate_count': alert['rate_count'],
'rate_range': alert['rate_range'],
'limit_count': alert['limit_count'],
'limit_range': alert['limit_range'],
'schedule': alert['schedule'],
'enabled': alert['enabled'],
'type': alert['type'],
}
return self._post(
request=ApiActions.UPDATE.value,
uri=ApiUri.ACTIONS.value,
params=data
) |
def setup(app):
"""
Initialize this Sphinx extension
"""
app.setup_extension('sphinx.ext.todo')
app.setup_extension('sphinx.ext.mathjax')
app.setup_extension("sphinx.ext.intersphinx")
app.config.intersphinx_mapping.update({
'https://docs.python.org/': None
})
app.config.intersphinx_mapping.update({
sage_doc_url + doc + "/": None
for doc in sage_documents
})
app.config.intersphinx_mapping.update({
sage_doc_url + "reference/" + module: None
for module in sage_modules
})
app.setup_extension("sphinx.ext.extlinks")
app.config.extlinks.update({
'python': ('https://docs.python.org/release/'+pythonversion+'/%s', ''),
# Sage trac ticket shortcuts. For example, :trac:`7549` .
'trac': ('https://trac.sagemath.org/%s', 'trac ticket #'),
'wikipedia': ('https://en.wikipedia.org/wiki/%s', 'Wikipedia article '),
'arxiv': ('http://arxiv.org/abs/%s', 'Arxiv '),
'oeis': ('https://oeis.org/%s', 'OEIS sequence '),
'doi': ('https://dx.doi.org/%s', 'doi:'),
'pari': ('http://pari.math.u-bordeaux.fr/dochtml/help/%s', 'pari:'),
'mathscinet': ('http://www.ams.org/mathscinet-getitem?mr=%s', 'MathSciNet ')
})
app.config.html_theme = 'sage' |
def themes_path():
"""
Retrieve the location of the themes directory from the location of this package
This is taken from Sphinx's theme documentation
"""
package_dir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(package_dir, 'themes') |
def _post(self, request, uri, params=None):
"""
A wrapper for posting things.
:param request: The request type. Must be one of the
:class:`ApiActions<logentries_api.base.ApiActions>`
:type request: str
:param uri: The API endpoint to hit. Must be one of
:class:`ApiUri<logentries_api.base.ApiUri>`
:type uri: str
:param params: A dictionary of supplemental kw args
:type params: dict
:returns: The response of your post
:rtype: dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
request_data = {
'acl': self.account_key,
'account': self.account_key,
'request': request,
}
request_data.update(params or {})
response = requests.post(
url='https://api.logentries.com/v2/{}'.format(uri),
headers=self.headers,
data=json.dumps(request_data)
)
if not response.ok:
raise ServerException(
'{}: {}'.format(response.status_code, response.text))
return response.json() |
def list(self):
"""
Get all log sets
:return: Returns a dictionary where the key is the hostname or log set,
and the value is a list of the log keys
:rtype: dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
response = requests.get(self.base_url)
if not response.ok:
raise ServerException(
'{}: {}'.format(response.status_code, response.text))
return {
host.get('name'): [
log.get('key')
for log
in host.get('logs')]
for host
in response.json().get('list')
} |
def get(self, log_set):
"""
Get a specific log or log set
:param log_set: The log set or log to get. Ex: `.get(log_set='app')` or
`.get(log_set='app/log')`
:type log_set: str
:returns: The response of your log set or log
:rtype: dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
response = requests.get(self.base_url + log_set.rstrip('/'))
if not response.ok:
raise ServerException(
'{}: {}'.format(response.status_code, response.text))
return response.json() |
def find_attacker_slider(dest_list, occ_bb, piece_bb, target_bb, pos,
domain):
""" Find a slider attacker
Parameters
----------
dest_list : list
To store the results.
occ_bb : int, bitboard
Occupancy bitboard.
piece_bb : int, bitboard
Bitboard with the position of the attacker piece.
target_bb : int, bitboard
Occupancy bitboard without any of the sliders in question.
pos : int
Target position.
pos_map : function
Mapping between a board position and its position in a single
rotated/translated rank produced with domain_trans.
domain_trans : function
Transformation from a rank/file/diagonal/anti-diagonal containing pos
to a single rank
pos_inv_map : function
Inverse of pos_map
"""
pos_map, domain_trans, pos_inv_map = domain
r = reach[pos_map(pos)][domain_trans(target_bb, pos)]
m = r & domain_trans(piece_bb, pos)
while m:
r = m&-m
rpos = r.bit_length()-1
if not (ray[rpos][pos_map(pos)] & domain_trans(occ_bb, pos)):
dest_list.append(pos_inv_map(rpos, pos))
m ^= r |
def duration(self):
'''
The approximate transit duration for the general case of an eccentric orbit
'''
ecc = self.ecc if not np.isnan(self.ecc) else np.sqrt(self.ecw**2 + self.esw**2)
esw = self.esw if not np.isnan(self.esw) else ecc * np.sin(self.w)
aRs = ((G * self.rhos * (1. + self.MpMs) *
(self.per * DAYSEC)**2.) / (3. * np.pi))**(1./3.)
inc = np.arccos(self.bcirc/aRs)
becc = self.bcirc * (1 - ecc**2)/(1 - esw)
tdur = self.per / 2. / np.pi * np.arcsin(((1. + self.RpRs)**2 -
becc**2)**0.5 / (np.sin(inc) * aRs))
tdur *= np.sqrt(1. - ecc**2.)/(1. - esw)
return tdur |
def update(self, **kwargs):
'''
Update the transit keyword arguments
'''
if kwargs.get('verify_kwargs', True):
valid = [y[0] for x in [TRANSIT, LIMBDARK, SETTINGS] for y in x._fields_] # List of valid kwargs
valid += ['b', 'times'] # These are special!
for k in kwargs.keys():
if k not in valid:
raise Exception("Invalid kwarg '%s'." % k)
if ('q1' in kwargs.keys()) and ('q2' in kwargs.keys()):
kwargs.update({'ldmodel': KIPPING})
elif ('c1' in kwargs.keys()) and ('c2' in kwargs.keys()) and \
('c3' in kwargs.keys()) and ('c4' in kwargs.keys()):
kwargs.update({'ldmodel': NONLINEAR})
self.limbdark.update(**kwargs)
self.transit.update(**kwargs)
self.settings.update(**kwargs) |
def Compute(self):
'''
Computes the light curve model
'''
err = _Compute(self.transit, self.limbdark, self.settings, self.arrays)
if err != _ERR_NONE: RaiseError(err) |
def Bin(self):
'''
Bins the light curve model to the provided time array
'''
err = _Bin(self.transit, self.limbdark, self.settings, self.arrays)
if err != _ERR_NONE: RaiseError(err) |
def Free(self):
'''
Frees the memory used by all of the dynamically allocated C arrays.
'''
if self.arrays._calloc:
_dbl_free(self.arrays._time)
_dbl_free(self.arrays._flux)
_dbl_free(self.arrays._bflx)
_dbl_free(self.arrays._M)
_dbl_free(self.arrays._E)
_dbl_free(self.arrays._f)
_dbl_free(self.arrays._r)
_dbl_free(self.arrays._x)
_dbl_free(self.arrays._y)
_dbl_free(self.arrays._z)
self.arrays._calloc = 0
if self.arrays._balloc:
_dbl_free(self.arrays._b)
self.arrays._balloc = 0
if self.arrays._ialloc:
_dbl_free(self.arrays._iarr)
self.arrays._ialloc = 0 |
def __recv(self, size=4096):
"""Reads data from the socket.
Raises:
NNTPError: When connection times out or read from socket fails.
"""
data = self.socket.recv(size)
if not data:
raise NNTPError("Failed to read from socket")
self.__buffer.write(data) |
def __line_gen(self):
"""Generator that reads a line of data from the server.
It first attempts to read from the internal buffer. If there is not
enough data to read a line it then requests more data from the server
and adds it to the buffer. This process repeats until a line of data
can be read from the internal buffer.
Yields:
A line of data when it becomes available.
"""
while True:
line = self.__buffer.readline()
if not line:
self.__recv()
continue
yield line |
def __buf_gen(self, length=0):
"""Generator that reads a block of data from the server.
It first attempts to read from the internal buffer. If there is not
enough data in the internal buffer it then requests more data from the
server and adds it to the buffer.
Args:
length: An optional amount of data to retrieve. A length of 0 (the
default) will retrieve a least one buffer of data.
Yields:
A block of data when enough data becomes available.
Note:
If a length of 0 is supplied then the size of the yielded buffer can
vary. If there is data in the internal buffer it will yield all of
that data otherwise it will yield the the data returned by a recv
on the socket.
"""
while True:
buf = self.__buffer.read(length)
if not buf:
self.__recv()
continue
yield buf |
def status(self):
"""Reads a command response status.
If there is no response message then the returned status message will
be an empty string.
Raises:
NNTPError: If data is required to be read from the socket and fails.
NNTPProtocolError: If the status line can't be parsed.
NNTPTemporaryError: For status code 400-499
NNTPPermanentError: For status code 500-599
Returns:
A tuple of status code (as an integer) and status message.
"""
line = next(self.__line_gen()).rstrip()
parts = line.split(None, 1)
try:
code, message = int(parts[0]), ""
except ValueError:
raise NNTPProtocolError(line)
if code < 100 or code >= 600:
raise NNTPProtocolError(line)
if len(parts) > 1:
message = parts[1]
if 400 <= code <= 499:
raise NNTPTemporaryError(code, message)
if 500 <= code <= 599:
raise NNTPPermanentError(code, message)
return code, message |
def __info_plain_gen(self):
"""Generator for the lines of an info (textual) response.
When a terminating line (line containing single period) is received the
generator exits.
If there is a line begining with an 'escaped' period then the extra
period is trimmed.
Yields:
A line of the info response.
Raises:
NNTPError: If data is required to be read from the socket and fails.
"""
self.__generating = True
for line in self.__line_gen():
if line == ".\r\n":
break
if line.startswith("."):
yield line[1:]
yield line
self.__generating = False |
def __info_gzip_gen(self):
"""Generator for the lines of a compressed info (textual) response.
Compressed responses are an extension to the NNTP protocol supported by
some usenet servers to reduce the bandwidth of heavily used range style
commands that can return large amounts of textual data.
This function handles gzip compressed responses that have the
terminating line inside or outside the compressed data. From experience
if the 'XFEATURE COMPRESS GZIP' command causes the terminating '.\\r\\n'
to follow the compressed data and 'XFEATURE COMPRESS GZIP TERMINATOR'
causes the terminator to be the last part of the compressed data (i.e
the reply the gzipped version of the original reply - terminating line
included)
This function will produce that same output as the __info_plain_gen()
function. In other words it takes care of decompression.
Yields:
A line of the info response.
Raises:
NNTPError: If data is required to be read from the socket and fails.
NNTPDataError: If decompression fails.
"""
self.__generating = True
inflate = zlib.decompressobj(15+32)
done, buf = False, fifo.Fifo()
while not done:
try:
data = inflate.decompress(next(self.__buf_gen()))
except zlib.error:
raise NNTPDataError("Decompression failed")
if data:
buf.write(data)
if inflate.unused_data:
buf.write(inflate.unused_data)
for line in buf:
if line == ".\r\n":
done = True
break
if line.startswith("."):
yield line[1:]
yield line
self.__generating = False |
def __info_yenczlib_gen(self):
"""Generator for the lines of a compressed info (textual) response.
Compressed responses are an extension to the NNTP protocol supported by
some usenet servers to reduce the bandwidth of heavily used range style
commands that can return large amounts of textual data. The server
returns that same data as it would for the uncompressed versions of the
command the difference being that the data is zlib deflated and then
yEnc encoded.
This function will produce that same output as the info_gen()
function. In other words it takes care of decoding and decompression.
Yields:
A line of the info response.
Raises:
NNTPError: If data is required to be read from the socket and fails.
NNTPDataError: When there is an error parsing the yEnc header or
trailer, if the CRC check fails or decompressing data fails.
"""
escape = 0
dcrc32 = 0
inflate = zlib.decompressobj(-15)
# header
header = next(self.__info_plain_gen())
if not header.startswith("=ybegin"):
raise NNTPDataError("Bad yEnc header")
# data
buf, trailer = fifo.Fifo(), ""
for line in self.__info_plain_gen():
if line.startswith("=yend"):
trailer = line
continue
data, escape, dcrc32 = yenc.decode(line, escape, dcrc32)
try:
data = inflate.decompress(data)
except zlib.error:
raise NNTPDataError("Decompression failed")
if not data:
continue
buf.write(data)
for l in buf:
yield l
# trailer
if not trailer:
raise NNTPDataError("Missing yEnc trailer")
# expected crc32
ecrc32 = yenc.crc32(trailer)
if ecrc32 is None:
raise NNTPDataError("Bad yEnc trailer")
# check crc32
if ecrc32 != dcrc32 & 0xffffffff:
raise NNTPDataError("Bad yEnc CRC") |
def info_gen(self, code, message, compressed=False):
"""Dispatcher for the info generators.
Determines which __info_*_gen() should be used based on the supplied
parameters.
Args:
code: The status code for the command response.
message: The status message for the command reponse.
compressed: Force decompression. Useful for xz* commands.
Returns:
An info generator.
"""
if "COMPRESS=GZIP" in message:
return self.__info_gzip_gen()
if compressed:
return self.__info_yenczlib_gen()
return self.__info_plain_gen() |
def info(self, code, message, compressed=False):
"""The complete content of an info response.
This should only used for commands that return small or known amounts of
data.
Returns:
A the complete content of a textual response.
"""
return "".join([x for x in self.info_gen(code, message, compressed)]) |
def command(self, verb, args=None):
"""Call a command on the server.
If the user has not authenticated then authentication will be done
as part of calling the command on the server.
For commands that don't return a status message the status message
will default to an empty string.
Args:
verb: The verb of the command to call.
args: The arguments of the command as a string (default None).
Returns:
A tuple of status code (as an integer) and status message.
Note:
You can run raw commands by supplying the full command (including
args) in the verb.
Note: Although it is possible you shouldn't issue more than one command
at a time by adding newlines to the verb as it will most likely lead
to undesirable results.
"""
if self.__generating:
raise NNTPSyncError("Command issued while a generator is active")
cmd = verb
if args:
cmd += " " + args
cmd += "\r\n"
self.socket.sendall(cmd)
try:
code, message = self.status()
except NNTPTemporaryError as e:
if e.code() != 480:
raise e
code, message = self.command("AUTHINFO USER", self.username)
if code == 381:
code, message = self.command("AUTHINFO PASS", self.password)
if code != 281:
raise NNTPReplyError(code, message)
code, message = self.command(verb, args)
return code, message |
def capabilities(self, keyword=None):
"""CAPABILITIES command.
Determines the capabilities of the server.
Although RFC3977 states that this is a required command for servers to
implement not all servers do, so expect that NNTPPermanentError may be
raised when this command is issued.
See <http://tools.ietf.org/html/rfc3977#section-5.2>
Args:
keyword: Passed directly to the server, however, this is unused by
the server according to RFC3977.
Returns:
A list of capabilities supported by the server. The VERSION
capability is the first capability in the list.
"""
args = keyword
code, message = self.command("CAPABILITIES", args)
if code != 101:
raise NNTPReplyError(code, message)
return [x.strip() for x in self.info_gen(code, message)] |
def mode_reader(self):
"""MODE READER command.
Instructs a mode-switching server to switch modes.
See <http://tools.ietf.org/html/rfc3977#section-5.3>
Returns:
Boolean value indicating whether posting is allowed or not.
"""
code, message = self.command("MODE READER")
if not code in [200, 201]:
raise NNTPReplyError(code, message)
return code == 200 |
def quit(self):
"""QUIT command.
Tells the server to close the connection. After the server acknowledges
the request to quit the connection is closed both at the server and
client. Only useful for graceful shutdown. If you are in a generator
use close() instead.
Once this method has been called, no other methods of the NNTPClient
object should be called.
See <http://tools.ietf.org/html/rfc3977#section-5.4>
"""
code, message = self.command("QUIT")
if code != 205:
raise NNTPReplyError(code, message)
self.socket.close() |
def date(self):
"""DATE command.
Coordinated Universal time from the perspective of the usenet server.
It can be used to provide information that might be useful when using
the NEWNEWS command.
See <http://tools.ietf.org/html/rfc3977#section-7.1>
Returns:
The UTC time according to the server as a datetime object.
Raises:
NNTPDataError: If the timestamp can't be parsed.
"""
code, message = self.command("DATE")
if code != 111:
raise NNTPReplyError(code, message)
ts = date.datetimeobj(message, fmt="%Y%m%d%H%M%S")
return ts |
def help(self):
"""HELP command.
Provides a short summary of commands that are understood by the usenet
server.
See <http://tools.ietf.org/html/rfc3977#section-7.2>
Returns:
The help text from the server.
"""
code, message = self.command("HELP")
if code != 100:
raise NNTPReplyError(code, message)
return self.info(code, message) |
def newgroups_gen(self, timestamp):
"""Generator for the NEWGROUPS command.
Generates a list of newsgroups created on the server since the specified
timestamp.
See <http://tools.ietf.org/html/rfc3977#section-7.3>
Args:
timestamp: Datetime object giving 'created since' datetime.
Yields:
A tuple containing the name, low water mark, high water mark,
and status for the newsgroup.
Note: If the datetime object supplied as the timestamp is naive (tzinfo
is None) then it is assumed to be given as GMT.
"""
if timestamp.tzinfo:
ts = timestamp.asttimezone(date.TZ_GMT)
else:
ts = timestamp.replace(tzinfo=date.TZ_GMT)
args = ts.strftime("%Y%m%d %H%M%S %Z")
code, message = self.command("NEWGROUPS", args)
if code != 231:
raise NNTPReplyError(code, message)
for line in self.info_gen(code, message):
yield utils.parse_newsgroup(line) |
def newnews_gen(self, pattern, timestamp):
"""Generator for the NEWNEWS command.
Generates a list of message-ids for articles created since the specified
timestamp for newsgroups with names that match the given pattern.
See <http://tools.ietf.org/html/rfc3977#section-7.4>
Args:
pattern: Glob matching newsgroups of intrest.
timestamp: Datetime object giving 'created since' datetime.
Yields:
A message-id as string.
Note: If the datetime object supplied as the timestamp is naive (tzinfo
is None) then it is assumed to be given as GMT. If tzinfo is set
then it will be converted to GMT by this function.
"""
if timestamp.tzinfo:
ts = timestamp.asttimezone(date.TZ_GMT)
else:
ts = timestamp.replace(tzinfo=date.TZ_GMT)
args = pattern
args += " " + ts.strftime("%Y%m%d %H%M%S %Z")
code, message = self.command("NEWNEWS", args)
if code != 230:
raise NNTPReplyError(code, message)
for line in self.info_gen(code, message):
yield line.strip() |
def newnews(self, pattern, timestamp):
"""NEWNEWS command.
Retrieves a list of message-ids for articles created since the specified
timestamp for newsgroups with names that match the given pattern. See
newnews_gen() for more details.
See <http://tools.ietf.org/html/rfc3977#section-7.4>
Args:
pattern: Glob matching newsgroups of intrest.
timestamp: Datetime object giving 'created since' datetime.
Returns:
A list of message-ids as given by newnews_gen()
"""
return [x for x in self.newnews_gen(pattern, timestamp)] |
def list_active_gen(self, pattern=None):
"""Generator for the LIST ACTIVE command.
Generates a list of active newsgroups that match the specified pattern.
If no pattern is specfied then all active groups are generated.
See <http://tools.ietf.org/html/rfc3977#section-7.6.3>
Args:
pattern: Glob matching newsgroups of intrest.
Yields:
A tuple containing the name, low water mark, high water mark,
and status for the newsgroup.
"""
args = pattern
if args is None:
cmd = "LIST"
else:
cmd = "LIST ACTIVE"
code, message = self.command(cmd, args)
if code != 215:
raise NNTPReplyError(code, message)
for line in self.info_gen(code, message):
yield utils.parse_newsgroup(line) |
def list_active_times_gen(self):
"""Generator for the LIST ACTIVE.TIMES command.
Generates a list of newsgroups including the creation time and who
created them.
See <http://tools.ietf.org/html/rfc3977#section-7.6.4>
Yields:
A tuple containing the name, creation date as a datetime object and
creator as a string for the newsgroup.
"""
code, message = self.command("LIST ACTIVE.TIMES")
if code != 215:
raise NNTPReplyError(code, message)
for line in self.info_gen(code, message):
parts = line.split()
try:
name = parts[0]
timestamp = date.datetimeobj_epoch(parts[1])
creator = parts[2]
except (IndexError, ValueError):
raise NNTPDataError("Invalid LIST ACTIVE.TIMES")
yield name, timestamp, creator |
def list_newsgroups_gen(self, pattern=None):
"""Generator for the LIST NEWSGROUPS command.
Generates a list of newsgroups including the name and a short
description.
See <http://tools.ietf.org/html/rfc3977#section-7.6.6>
Args:
pattern: Glob matching newsgroups of intrest.
Yields:
A tuple containing the name, and description for the newsgroup.
"""
args = pattern
code, message = self.command("LIST NEWSGROUPS", args)
if code != 215:
raise NNTPReplyError(code, message)
for line in self.info_gen(code, message):
parts = line.strip().split()
name, description = parts[0], ""
if len(parts) > 1:
description = parts[1]
yield name, description |
def list_overview_fmt_gen(self):
"""Generator for the LIST OVERVIEW.FMT
See list_overview_fmt() for more information.
Yields:
An element in the list returned by list_overview_fmt().
"""
code, message = self.command("LIST OVERVIEW.FMT")
if code != 215:
raise NNTPReplyError(code, message)
for line in self.info_gen(code, message):
try:
name, suffix = line.rstrip().split(":")
except ValueError:
raise NNTPDataError("Invalid LIST OVERVIEW.FMT")
if suffix and not name:
name, suffix = suffix, name
if suffix and suffix != "full":
raise NNTPDataError("Invalid LIST OVERVIEW.FMT")
yield (name, suffix == "full") |
def list_extensions_gen(self):
"""Generator for the LIST EXTENSIONS command.
"""
code, message = self.command("LIST EXTENSIONS")
if code != 202:
raise NNTPReplyError(code, message)
for line in self.info_gen(code, message):
yield line.strip() |
def list_gen(self, keyword=None, arg=None):
"""Generator for LIST command.
See list() for more information.
Yields:
An element in the list returned by list().
"""
if keyword:
keyword = keyword.upper()
if keyword is None or keyword == "ACTIVE":
return self.list_active_gen(arg)
if keyword == "ACTIVE.TIMES":
return self.list_active_times_gen()
if keyword == "DISTRIB.PATS":
return self.list_distrib_pats_gen()
if keyword == "HEADERS":
return self.list_headers_gen(arg)
if keyword == "NEWSGROUPS":
return self.list_newsgroups_gen(arg)
if keyword == "OVERVIEW.FMT":
return self.list_overview_fmt_gen()
if keyword == "EXTENSIONS":
return self.list_extensions_gen()
raise NotImplementedError() |
def list(self, keyword=None, arg=None):
"""LIST command.
A wrapper for all of the other list commands. The output of this command
depends on the keyword specified. The output format for each keyword can
be found in the list function that corresponds to the keyword.
Args:
keyword: Information requested.
arg: Pattern or keyword specific argument.
Note: Keywords supported by this function are include ACTIVE,
ACTIVE.TIMES, DISTRIB.PATS, HEADERS, NEWSGROUPS, OVERVIEW.FMT and
EXTENSIONS.
Raises:
NotImplementedError: For unsupported keywords.
"""
return [x for x in self.list_gen(keyword, arg)] |
def group(self, name):
"""GROUP command.
"""
args = name
code, message = self.command("GROUP", args)
if code != 211:
raise NNTPReplyError(code, message)
parts = message.split(None, 4)
try:
total = int(parts[0])
first = int(parts[1])
last = int(parts[2])
group = parts[3]
except (IndexError, ValueError):
raise NNTPDataError("Invalid GROUP status '%s'" % message)
return total, first, last, group |
def next(self):
"""NEXT command.
"""
code, message = self.command("NEXT")
if code != 223:
raise NNTPReplyError(code, message)
parts = message.split(None, 3)
try:
article = int(parts[0])
ident = parts[1]
except (IndexError, ValueError):
raise NNTPDataError("Invalid NEXT status")
return article, ident |
def article(self, msgid_article=None, decode=None):
"""ARTICLE command.
"""
args = None
if msgid_article is not None:
args = utils.unparse_msgid_article(msgid_article)
code, message = self.command("ARTICLE", args)
if code != 220:
raise NNTPReplyError(code, message)
parts = message.split(None, 1)
try:
articleno = int(parts[0])
except ValueError:
raise NNTPProtocolError(message)
# headers
headers = utils.parse_headers(self.info_gen(code, message))
# decoding setup
decode = "yEnc" in headers.get("subject", "")
escape = 0
crc32 = 0
# body
body = []
for line in self.info_gen(code, message):
# decode body if required
if decode:
if line.startswith("=y"):
continue
line, escape, crc32 = yenc.decode(line, escape, crc32)
body.append(line)
return articleno, headers, "".join(body) |
def head(self, msgid_article=None):
"""HEAD command.
"""
args = None
if msgid_article is not None:
args = utils.unparse_msgid_article(msgid_article)
code, message = self.command("HEAD", args)
if code != 221:
raise NNTPReplyError(code, message)
return utils.parse_headers(self.info_gen(code, message)) |
def body(self, msgid_article=None, decode=False):
"""BODY command.
"""
args = None
if msgid_article is not None:
args = utils.unparse_msgid_article(msgid_article)
code, message = self.command("BODY", args)
if code != 222:
raise NNTPReplyError(code, message)
escape = 0
crc32 = 0
body = []
for line in self.info_gen(code, message):
# decode body if required
if decode:
if line.startswith("=y"):
continue
line, escape, crc32 = yenc.decode(line, escape, crc32)
# body
body.append(line)
return "".join(body) |
def xgtitle(self, pattern=None):
"""XGTITLE command.
"""
args = pattern
code, message = self.command("XGTITLE", args)
if code != 282:
raise NNTPReplyError(code, message)
return self.info(code, message) |
def xhdr(self, header, msgid_range=None):
"""XHDR command.
"""
args = header
if range is not None:
args += " " + utils.unparse_msgid_range(msgid_range)
code, message = self.command("XHDR", args)
if code != 221:
raise NNTPReplyError(code, message)
return self.info(code, message) |
def xzhdr(self, header, msgid_range=None):
"""XZHDR command.
Args:
msgid_range: A message-id as a string, or an article number as an
integer, or a tuple of specifying a range of article numbers in
the form (first, [last]) - if last is omitted then all articles
after first are included. A msgid_range of None (the default)
uses the current article.
"""
args = header
if msgid_range is not None:
args += " " + utils.unparse_msgid_range(msgid_range)
code, message = self.command("XZHDR", args)
if code != 221:
raise NNTPReplyError(code, message)
return self.info(code, message, compressed=True) |
def xover_gen(self, range=None):
"""Generator for the XOVER command.
The XOVER command returns information from the overview database for
the article(s) specified.
<http://tools.ietf.org/html/rfc2980#section-2.8>
Args:
range: An article number as an integer, or a tuple of specifying a
range of article numbers in the form (first, [last]). If last is
omitted then all articles after first are included. A range of
None (the default) uses the current article.
Returns:
A list of fields as given by the overview database for each
available article in the specified range. The fields that are
returned can be determined using the LIST OVERVIEW.FMT command if
the server supports it.
Raises:
NNTPReplyError: If no such article exists or the currently selected
newsgroup is invalid.
"""
args = None
if range is not None:
args = utils.unparse_range(range)
code, message = self.command("XOVER", args)
if code != 224:
raise NNTPReplyError(code, message)
for line in self.info_gen(code, message):
yield line.rstrip().split("\t") |
def xpat_gen(self, header, msgid_range, *pattern):
"""Generator for the XPAT command.
"""
args = " ".join(
[header, utils.unparse_msgid_range(msgid_range)] + list(pattern)
)
code, message = self.command("XPAT", args)
if code != 221:
raise NNTPReplyError(code, message)
for line in self.info_gen(code, message):
yield line.strip() |
def xpat(self, header, id_range, *pattern):
"""XPAT command.
"""
return [x for x in self.xpat_gen(header, id_range, *pattern)] |
def xfeature_compress_gzip(self, terminator=False):
"""XFEATURE COMPRESS GZIP command.
"""
args = "TERMINATOR" if terminator else None
code, message = self.command("XFEATURE COMPRESS GZIP", args)
if code != 290:
raise NNTPReplyError(code, message)
return True |
def post(self, headers={}, body=""):
"""POST command.
Args:
headers: A dictionary of headers.
body: A string or file like object containing the post content.
Raises:
NNTPDataError: If binary characters are detected in the message
body.
Returns:
A value that evaluates to true if posting the message succeeded.
(See note for further details)
Note:
'\\n' line terminators are converted to '\\r\\n'
Note:
Though not part of any specification it is common for usenet servers
to return the message-id for a successfully posted message. If a
message-id is identified in the response from the server then that
message-id will be returned by the function, otherwise True will be
returned.
Note:
Due to protocol issues if illegal characters are found in the body
the message will still be posted but will be truncated as soon as
an illegal character is detected. No illegal characters will be sent
to the server. For information illegal characters include embedded
carriage returns '\\r' and null characters '\\0' (because this
function converts line feeds to CRLF, embedded line feeds are not an
issue)
"""
code, message = self.command("POST")
if code != 340:
raise NNTPReplyError(code, message)
# send headers
hdrs = utils.unparse_headers(headers)
self.socket.sendall(hdrs)
if isinstance(body, basestring):
body = cStringIO.StringIO(body)
# send body
illegal = False
for line in body:
if line.startswith("."):
line = "." + line
if line.endswith("\r\n"):
line = line[:-2]
elif line.endswith("\n"):
line = line[:-1]
if any(c in line for c in "\0\r"):
illegal = True
break
self.socket.sendall(line + "\r\n")
self.socket.sendall(".\r\n")
# get status
code, message = self.status()
# check if illegal characters were detected
if illegal:
raise NNTPDataError("Illegal characters found")
# check status
if code != 240:
raise NNTPReplyError(code, message)
# return message-id possible
message_id = message.split(None, 1)[0]
if message_id.startswith("<") and message_id.endswith(">"):
return message_id
return True |
def _lower(v):
"""assumes that classes that inherit list, tuple or dict have a constructor
that is compatible with those base classes. If you are using classes that
don't satisfy this requirement you can subclass them and add a lower()
method for the class"""
if hasattr(v, "lower"):
return v.lower()
if isinstance(v, (list, tuple)):
return v.__class__(_lower(x) for x in v)
if isinstance(v, dict):
return v.__class__(_lower(v.items()))
return v |
def I(r, limbdark):
'''
The standard quadratic limb darkening law.
:param ndarray r: The radius vector
:param limbdark: A :py:class:`pysyzygy.transit.LIMBDARK` instance containing
the limb darkening law information
:returns: The stellar intensity as a function of `r`
'''
if limbdark.ldmodel == QUADRATIC:
u1 = limbdark.u1
u2 = limbdark.u2
return (1-u1*(1-np.sqrt(1-r**2))-u2*(1-np.sqrt(1-r**2))**2)/(1-u1/3-u2/6)/np.pi
elif limbdark.ldmodel == KIPPING:
a = np.sqrt(limbdark.q1)
b = 2*limbdark.q2
u1 = a*b
u2 = a*(1 - b)
return (1-u1*(1-np.sqrt(1-r**2))-u2*(1-np.sqrt(1-r**2))**2)/(1-u1/3-u2/6)/np.pi
elif limbdark.ldmodel == NONLINEAR:
raise Exception('Nonlinear model not yet implemented!') # TODO!
else:
raise Exception('Invalid limb darkening model.') |
def PlotTransit(compact = False, ldplot = True, plottitle = "",
xlim = None, binned = True, **kwargs):
'''
Plots a light curve described by `kwargs`
:param bool compact: Display the compact version of the plot? Default `False`
:param bool ldplot: Displat the limb darkening inset? Default `True`
:param str plottitle: The title of the plot. Default `""`
:param float xlim: The half-width of the orbit plot in stellar radii. Default is to \
auto adjust this
:param bool binned: Bin the light curve model to the exposure time? Default `True`
:param kwargs: Any keyword arguments to be passed to :py:func:`pysyzygy.transit.Transit`
:returns fig: The :py:mod:`matplotlib` figure object
'''
# Plotting
fig = pl.figure(figsize = (12,8))
fig.subplots_adjust(hspace=0.3)
ax1, ax2 = pl.subplot(211), pl.subplot(212)
if not compact:
fig.subplots_adjust(right = 0.7)
t0 = kwargs.pop('t0', 0.)
trn = Transit(**kwargs)
try:
trn.Compute()
notransit = False
except Exception as e:
if str(e) == "Object does not transit the star.":
notransit = True
else: raise Exception(e)
time = trn.arrays.time + t0
if not notransit:
if binned:
trn.Bin()
flux = trn.arrays.bflx
else:
flux = trn.arrays.flux
time = np.concatenate(([-1.e5], time, [1.e5])) # Add baseline on each side
flux = np.concatenate(([1.], flux, [1.]))
ax1.plot(time, flux, '-', color='DarkBlue')
rng = np.max(flux) - np.min(flux)
if rng > 0:
ax1.set_ylim(np.min(flux) - 0.1*rng, np.max(flux) + 0.1*rng)
left = np.argmax(flux < (1. - 1.e-8))
right = np.argmax(flux[left:] > (1. - 1.e-8)) + left
rng = time[right] - time[left]
ax1.set_xlim(time[left] - rng, time[right] + rng)
ax1.set_xlabel('Time (Days)', fontweight='bold')
ax1.set_ylabel('Normalized Flux', fontweight='bold')
# Adjust these for full-orbit plotting
maxpts = kwargs.get('maxpts', 10000); kwargs.update({'maxpts': maxpts})
per = kwargs.get('per', 10.); kwargs.update({'per': per})
kwargs.update({'fullorbit': True})
kwargs.update({'exppts': 30})
kwargs.update({'exptime': 50 * per / maxpts})
trn = Transit(**kwargs)
try:
trn.Compute()
except Exception as e:
if str(e) == "Object does not transit the star.":
pass
else: raise Exception(e)
# Sky-projected motion
x = trn.arrays.x
y = trn.arrays.y
z = trn.arrays.z
inc = (np.arccos(trn.transit.bcirc/trn.transit.aRs)*180./np.pi) # Orbital inclination
# Mask the star
for j in range(len(x)):
if (x[j]**2 + y[j]**2) < 1. and (z[j] > 0):
x[j] = np.nan
y[j] = np.nan
# The star
r = np.linspace(0,1,100)
Ir = I(r,trn.limbdark)/I(0,trn.limbdark)
for ri,Iri in zip(r[::-1],Ir[::-1]):
star = pl.Circle((0, 0), ri, color=str(0.95*Iri), alpha=1.)
ax2.add_artist(star)
# Inset: Limb darkening
if ldplot:
if compact:
inset1 = pl.axes([0.145, 0.32, .09, .1])
else:
inset1 = fig.add_axes([0.725,0.3,0.2,0.15])
inset1.plot(r,Ir,'k-')
pl.setp(inset1, xlim=(-0.1,1.1), ylim=(-0.1,1.1), xticks=[0,1], yticks=[0,1])
for tick in inset1.xaxis.get_major_ticks() + inset1.yaxis.get_major_ticks():
tick.label.set_fontsize(8)
inset1.set_ylabel(r'I/I$_0$', fontsize=8, labelpad=-8)
inset1.set_xlabel(r'r/R$_\star$', fontsize=8, labelpad=-8)
inset1.set_title('Limb Darkening', fontweight='bold', fontsize=9)
# Inset: Top view of orbit
if compact:
inset2 = pl.axes([0.135, 0.115, .1, .1])
else:
inset2 = fig.add_axes([0.725,0.1,0.2,0.15])
pl.setp(inset2, xticks=[], yticks=[])
trn.transit.bcirc = trn.transit.aRs # This ensures we are face-on
try:
trn.Compute()
except Exception as e:
if str(e) == "Object does not transit the star.":
pass
else: raise Exception(e)
xp = trn.arrays.x
yp = trn.arrays.y
inset2.plot(xp, yp, '-', color='DarkBlue', alpha=0.5)
# Draw some invisible dots at the corners to set the window size
xmin, xmax, ymin, ymax = np.nanmin(xp), np.nanmax(xp), np.nanmin(yp), np.nanmax(yp)
xrng = xmax - xmin
yrng = ymax - ymin
xmin -= 0.1*xrng; xmax += 0.1*xrng;
ymin -= 0.1*yrng; ymax += 0.1*yrng;
inset2.scatter([xmin,xmin,xmax,xmax], [ymin,ymax,ymin,ymax], alpha = 0.)
# Plot the star
for ri,Iri in zip(r[::-10],Ir[::-10]):
star = pl.Circle((0, 0), ri, color=str(0.95*Iri), alpha=1.)
inset2.add_artist(star)
# Plot the planet
ycenter = yp[np.where(np.abs(xp) == np.nanmin(np.abs(xp)))][0]
while ycenter > 0:
xp[np.where(np.abs(xp) == np.nanmin(np.abs(xp)))] = np.nan
ycenter = yp[np.where(np.abs(xp) == np.nanmin(np.abs(xp)))][0]
planet = pl.Circle((0, ycenter), trn.transit.RpRs, color='DarkBlue', alpha=1.)
inset2.add_artist(planet)
inset2.set_title('Top View', fontweight='bold', fontsize=9)
inset2.set_aspect('equal','datalim')
# The orbit itself
with np.errstate(invalid='ignore'):
ax2.plot(x, y, '-', color='DarkBlue', lw = 1. if per < 30. else
max(1. - (per - 30.) / 100., 0.3) )
# The planet
with np.errstate(invalid = 'ignore'):
ycenter = y[np.where(np.abs(x) == np.nanmin(np.abs(x)))][0]
while ycenter > 0:
x[np.where(np.abs(x) == np.nanmin(np.abs(x)))] = np.nan
ycenter = y[np.where(np.abs(x) == np.nanmin(np.abs(x)))][0]
planet = pl.Circle((0, ycenter), trn.transit.RpRs, color='DarkBlue', alpha=1.)
ax2.add_artist(planet)
# Force aspect
if xlim is None:
xlim = 1.1 * max(np.nanmax(x), np.nanmax(-x))
ax2.set_ylim(-xlim/3.2,xlim/3.2)
ax2.set_xlim(-xlim,xlim)
ax2.set_xlabel(r'X (R$_\star$)', fontweight='bold')
ax2.set_ylabel(r'Y (R$_\star$)', fontweight='bold')
ax1.set_title(plottitle, fontsize=12)
if not compact:
rect = 0.725,0.55,0.2,0.35
ax3 = fig.add_axes(rect)
ax3.xaxis.set_visible(False)
ax3.yaxis.set_visible(False)
# Table of parameters
ltable = [ r'$P:$',
r'$e:$',
r'$i:$',
r'$\omega:$',
r'$\rho_\star:$',
r'$M_p:$',
r'$R_p:$',
r'$q_1:$',
r'$q_2:$']
rtable = [ r'$%.4f\ \mathrm{days}$' % trn.transit.per,
r'$%.5f$' % trn.transit.ecc,
r'$%.4f^\circ$' % inc,
r'$%.3f^\circ$' % (trn.transit.w*180./np.pi),
r'$%.5f\ \mathrm{g/cm^3}$' % trn.transit.rhos,
r'$%.5f\ M_\star$' % trn.transit.MpMs,
r'$%.5f\ R_\star$' % trn.transit.RpRs,
r'$%.5f$' % trn.limbdark.q1,
r'$%.5f$' % trn.limbdark.q2]
yt = 0.875
for l,r in zip(ltable, rtable):
ax3.annotate(l, xy=(0.25, yt), xycoords="axes fraction", ha='right', fontsize=16)
ax3.annotate(r, xy=(0.35, yt), xycoords="axes fraction", fontsize=16)
yt -= 0.1
return fig |
def _offset(value):
"""Parse timezone to offset in seconds.
Args:
value: A timezone in the '+0000' format. An integer would also work.
Returns:
The timezone offset from GMT in seconds as an integer.
"""
o = int(value)
if o == 0:
return 0
a = abs(o)
s = a*36+(a%100)*24
return (o//a)*s |
def timestamp_d_b_Y_H_M_S(value):
"""Convert timestamp string to time in seconds since epoch.
Timestamps strings like '18 Jun 2013 12:00:00 GMT' are able to be converted
by this function.
Args:
value: A timestamp string in the format '%d %b %Y %H:%M:%S GMT'.
Returns:
The time in seconds since epoch as an integer.
Raises:
ValueError: If timestamp is invalid.
KeyError: If the abbrieviated month is invalid.
Note: The timezone is ignored it is simply assumed to be UTC/GMT.
"""
d, b, Y, t, Z = value.split()
H, M, S = t.split(":")
return int(calendar.timegm((
int(Y), _months[b.lower()], int(d), int(H), int(M), int(S), 0, 0, 0
))) |
def datetimeobj_d_b_Y_H_M_S(value):
"""Convert timestamp string to a datetime object.
Timestamps strings like '18 Jun 2013 12:00:00 GMT' are able to be converted
by this function.
Args:
value: A timestamp string in the format '%d %b %Y %H:%M:%S GMT'.
Returns:
A datetime object.
Raises:
ValueError: If timestamp is invalid.
KeyError: If the abbrieviated month is invalid.
Note: The timezone is ignored it is simply assumed to be UTC/GMT.
"""
d, b, Y, t, Z = value.split()
H, M, S = t.split(":")
return datetime.datetime(
int(Y), _months[b.lower()], int(d), int(H), int(M), int(S), tzinfo=TZ_GMT
) |
def timestamp_a__d_b_Y_H_M_S_z(value):
"""Convert timestamp string to time in seconds since epoch.
Timestamps strings like 'Tue, 18 Jun 2013 22:00:00 +1000' are able to be
converted by this function.
Args:
value: A timestamp string in the format '%a, %d %b %Y %H:%M:%S %z'.
Returns:
The time in seconds since epoch as an integer.
Raises:
ValueError: If timestamp is invalid.
KeyError: If the abbrieviated month is invalid.
"""
a, d, b, Y, t, z = value.split()
H, M, S = t.split(":")
return int(calendar.timegm((
int(Y), _months[b.lower()], int(d), int(H), int(M), int(S), 0, 0, 0
))) - _offset(z) |
def datetimeobj_a__d_b_Y_H_M_S_z(value):
"""Convert timestamp string to a datetime object.
Timestamps strings like 'Tue, 18 Jun 2013 22:00:00 +1000' are able to be
converted by this function.
Args:
value: A timestamp string in the format '%a, %d %b %Y %H:%M:%S %z'.
Returns:
A datetime object.
Raises:
ValueError: If timestamp is invalid.
KeyError: If the abbrieviated month is invalid.
"""
a, d, b, Y, t, z = value.split()
H, M, S = t.split(":")
return datetime.datetime(
int(Y), _months[b.lower()], int(d), int(H), int(M), int(S),
tzinfo=dateutil.tz.tzoffset(None, _offset(z))
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.