code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
parser = argparse.ArgumentParser(
description='Transform Pylint JSON report to HTML')
parser.add_argument(
'filename',
metavar='FILENAME',
type=argparse.FileType('r'),
nargs='?',
default=sys.stdin,
help='Pylint JSON report input file (or stdin)')
parser.add_argument(
'-o', '--output',
metavar='FILENAME',
type=argparse.FileType('w'),
default=sys.stdout,
help='Pylint HTML report output file (or stdout)')
parser.add_argument(
'-f', '--input-format',
metavar='FORMAT',
choices=[SIMPLE_JSON, EXTENDED_JSON],
action='store',
dest='input_format',
default='json',
help='Pylint JSON Report input type (json or jsonextended)')
return parser
|
def build_command_parser()
|
Build command parser using ``argparse`` module.
| 2.726638
| 2.72485
| 1.000656
|
parser = build_command_parser()
options = parser.parse_args()
file_pointer = options.filename
input_format = options.input_format
with file_pointer:
json_data = json.load(file_pointer)
if input_format == SIMPLE_JSON:
report = Report(json_data)
elif input_format == EXTENDED_JSON:
report = Report(
json_data.get('messages'),
json_data.get('stats'),
json_data.get('previous'))
print(report.render(), file=options.output)
|
def main()
|
Pylint JSON to HTML Main Entry Point
| 3.570026
| 3.37255
| 1.058554
|
template = self.get_template()
return template.render(
messages=self._messages,
metrics=self.metrics,
report=self)
|
def render(self)
|
Render report to HTML
| 8.213794
| 5.985428
| 1.372299
|
self._messages.append({
'type': msg.category,
'module': msg.module,
'obj': msg.obj,
'line': msg.line,
'column': msg.column,
'path': msg.path,
'symbol': msg.symbol,
'message': str(msg.msg) or '',
'message-id': msg.msg_id,
})
|
def handle_message(self, msg)
|
Store new message for later use.
.. seealso:: :meth:`~JsonExtendedReporter.on_close`
| 3.340867
| 3.323148
| 1.005332
|
reports = {
'messages': self._messages,
'stats': stats,
'previous': previous_stats,
}
print(json.dumps(reports, cls=JSONSetEncoder, indent=4), file=self.out)
|
def on_close(self, stats, previous_stats)
|
Print the extended JSON report to reporter's output.
:param dict stats: Metrics for the current pylint run
:param dict previous_stats: Metrics for the previous pylint run
| 4.877794
| 4.606254
| 1.05895
|
def triangulate(points):
# Remove duplicate xy points bc that would make delauney fail, and must remember z (if any) for retrieving originals from index results
seen = set()
uniqpoints = [ p for p in points if str( p[:2] ) not in seen and not seen.add( str( p[:2] ) )]
classpoints = [_Point(*point[:2]) for point in uniqpoints]
# Compute Delauney
triangle_ids = tesselator.computeDelaunayTriangulation(classpoints)
# Get vertices from result indexes
triangles = [[uniqpoints[i] for i in triangle] for triangle in triangle_ids]
return triangles
|
Connects an input list of xy tuples with lines forming a set of
smallest possible Delauney triangles between them.
Arguments:
- **points**: A list of xy or xyz point tuples to triangulate.
Returns:
- A list of triangle polygons. If the input coordinate points contained
a third z value then the output triangles will also have these z values.
| null | null | null |
|
# Remove duplicate xy points bc that would make delauney fail, and must remember z (if any) for retrieving originals from index results
seen = set()
uniqpoints = [ p for p in points if str( p[:2] ) not in seen and not seen.add( str( p[:2] ) )]
classpoints = [_Point(*point[:2]) for point in uniqpoints]
# Create fake sitepoints around the point extent to correct for infinite polygons
# For a similar approach and problem see: http://gis.stackexchange.com/questions/11866/voronoi-polygons-that-run-out-to-infinity
xs,ys = list(zip(*uniqpoints))[:2]
pointswidth = max(xs) - min(xs)
pointsheight = max(ys) - min(ys)
xbuff,ybuff = ( pointswidth / 100.0 * buffer_percent , pointsheight / 100.0 * buffer_percent )
midx,midy = ( sum(xs) / float(len(xs)) , sum(ys) / float(len(ys)) )
#bufferbox = [(midx-xbuff,midy-ybuff),(midx+xbuff,midy-ybuff),(midx+xbuff,midy+ybuff),(midx-xbuff,midy+ybuff)] # corner buffer
bufferbox = [(midx-xbuff,midy),(midx+xbuff,midy),(midx,midy+ybuff),(midx,midy-ybuff)] # mid sides buffer
classpoints.extend([_Point(*corner) for corner in bufferbox])
# Compute Voronoi
vertices,edges,poly_dict = tesselator.computeVoronoiDiagram(classpoints)
# Turn unordered result edges into ordered polygons
polygons = list()
for sitepoint,polyedges in list(poly_dict.items()):
polyedges = [edge[1:] for edge in polyedges]
poly = list()
firststart,firstend = polyedges.pop(0)
poly.append(firstend)
while polyedges:
curend = poly[-1]
for i,other in enumerate(polyedges):
otherstart,otherend = other
if otherstart == curend:
poly.append(otherend)
##print otherstart,otherend
polyedges.pop(i)
break
elif otherend == curend:
##print otherend,otherstart
poly.append(otherstart)
polyedges.pop(i)
break
# Get vertices from indexes
try: sitepoint = uniqpoints[sitepoint]
except IndexError:
sitepoint = None # fake bbox sitepoints shouldnt be in the results
poly = [vertices[vi] for vi in poly if vi != -1]
polygons.append((sitepoint, poly))
# Maybe clip parts of polygons that stick outside screen?
# ...
return polygons
|
def voronoi(points, buffer_percent=100)
|
Surrounds each point in an input list of xy tuples with a
unique Voronoi polygon.
Arguments:
- **points**: A list of xy or xyz point tuples to triangulate.
- **buffer_percent** (optional): Controls how much bigger than
the original bbox of the input points to set the bbox of fake points,
used to account for lacking values around the edges (default is 100 percent).
Returns:
- Returns a list of 2-tuples, with the first item in each tuple being the
original input point (or None for each corner of the bounding box buffer),
and the second item being the point's corressponding Voronoi polygon.
| 4.490158
| 4.423324
| 1.015109
|
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
|
def equals(val1, val2)
|
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
For the sake of simplicity, this function executes in constant time only
when the two strings have the same length. It short-circuits when they
have different lengths.
| 1.847628
| 1.92827
| 0.958179
|
''' Encodes integer into variable-length format into data.'''
if n < 0:
raise ValueError('only support positive integer')
while True:
this_byte = n & 0x7f
n >>= 7
if n == 0:
data.append(this_byte)
break
data.append(this_byte | 0x80)
|
def _encode_uvarint(data, n)
|
Encodes integer into variable-length format into data.
| 2.988739
| 2.418808
| 1.235624
|
''' Parses a sequence of packets in data.
The sequence is terminated by a packet with a field type of EOS
:param data bytes to be deserialized.
:return: the rest of data and an array of packet V2
'''
from pymacaroons.exceptions import MacaroonDeserializationException
prev_field_type = -1
packets = []
while True:
if len(data) == 0:
raise MacaroonDeserializationException(
'section extends past end of buffer')
rest, packet = self._parse_packet_v2(data)
if packet.field_type == self._EOS:
return rest, packets
if packet.field_type <= prev_field_type:
raise MacaroonDeserializationException('fields out of order')
packets.append(packet)
prev_field_type = packet.field_type
data = rest
|
def _parse_section_v2(self, data)
|
Parses a sequence of packets in data.
The sequence is terminated by a packet with a field type of EOS
:param data bytes to be deserialized.
:return: the rest of data and an array of packet V2
| 4.791882
| 2.420735
| 1.979515
|
''' Parses a V2 data packet at the start of the given data.
The format of a packet is as follows:
field_type(varint) payload_len(varint) data[payload_len bytes]
apart from EOS which has no payload_en or data (it's a single zero
byte).
:param data:
:return: rest of data, PacketV2
'''
from pymacaroons.exceptions import MacaroonDeserializationException
ft, n = _decode_uvarint(data)
data = data[n:]
if ft == self._EOS:
return data, PacketV2(ft, None)
payload_len, n = _decode_uvarint(data)
data = data[n:]
if payload_len > len(data):
raise MacaroonDeserializationException(
'field data extends past end of buffer')
return data[payload_len:], PacketV2(ft, data[0:payload_len])
|
def _parse_packet_v2(self, data)
|
Parses a V2 data packet at the start of the given data.
The format of a packet is as follows:
field_type(varint) payload_len(varint) data[payload_len bytes]
apart from EOS which has no payload_en or data (it's a single zero
byte).
:param data:
:return: rest of data, PacketV2
| 6.069051
| 2.349208
| 2.583446
|
''' Return a new discharge macaroon bound to the receiving macaroon's
current signature so that it can be used in a request.
This must be done before a discharge macaroon is sent to a server.
:param discharge_macaroon:
:return: bound discharge macaroon
'''
protected = discharge_macaroon.copy()
return HashSignaturesBinder(self).bind(protected)
|
def prepare_for_request(self, discharge_macaroon)
|
Return a new discharge macaroon bound to the receiving macaroon's
current signature so that it can be used in a request.
This must be done before a discharge macaroon is sent to a server.
:param discharge_macaroon:
:return: bound discharge macaroon
| 7.151454
| 2.697423
| 2.651217
|
''' Return a caveat as a dictionary for export as the JSON
macaroon v1 format.
'''
serialized = {}
if len(c.caveat_id) > 0:
serialized['cid'] = c.caveat_id
if c.verification_key_id:
serialized['vid'] = utils.raw_urlsafe_b64encode(
c.verification_key_id).decode('utf-8')
if c.location:
serialized['cl'] = c.location
return serialized
|
def _caveat_v1_to_dict(c)
|
Return a caveat as a dictionary for export as the JSON
macaroon v1 format.
| 4.375601
| 2.990641
| 1.463098
|
''' Return a caveat as a dictionary for export as the JSON
macaroon v2 format.
'''
serialized = {}
if len(c.caveat_id_bytes) > 0:
_add_json_binary_field(c.caveat_id_bytes, serialized, 'i')
if c.verification_key_id:
_add_json_binary_field(c.verification_key_id, serialized, 'v')
if c.location:
serialized['l'] = c.location
return serialized
|
def _caveat_v2_to_dict(c)
|
Return a caveat as a dictionary for export as the JSON
macaroon v2 format.
| 4.404077
| 3.081987
| 1.428973
|
''' Set the given field to the given val (a bytearray) in the serialized
dictionary.
If the value isn't valid utf-8, we base64 encode it and use field+"64"
as the field name.
'''
try:
val = b.decode("utf-8")
serialized[field] = val
except UnicodeDecodeError:
val = utils.raw_urlsafe_b64encode(b).decode('utf-8')
serialized[field + '64'] = val
|
def _add_json_binary_field(b, serialized, field)
|
Set the given field to the given val (a bytearray) in the serialized
dictionary.
If the value isn't valid utf-8, we base64 encode it and use field+"64"
as the field name.
| 5.023871
| 2.145607
| 2.341469
|
''' Read the value of a JSON field that may be string or base64-encoded.
'''
val = deserialized.get(field)
if val is not None:
return utils.convert_to_bytes(val)
val = deserialized.get(field + '64')
if val is None:
return None
return utils.raw_urlsafe_b64decode(val)
|
def _read_json_binary_field(deserialized, field)
|
Read the value of a JSON field that may be string or base64-encoded.
| 3.910354
| 2.955463
| 1.323094
|
'''Serialize the macaroon in JSON format indicated by the version field.
@param macaroon the macaroon to serialize.
@return JSON macaroon.
'''
from pymacaroons import macaroon
if m.version == macaroon.MACAROON_V1:
return self._serialize_v1(m)
return self._serialize_v2(m)
|
def serialize(self, m)
|
Serialize the macaroon in JSON format indicated by the version field.
@param macaroon the macaroon to serialize.
@return JSON macaroon.
| 4.400939
| 2.415588
| 1.821891
|
'''Serialize the macaroon in JSON format v1.
@param macaroon the macaroon to serialize.
@return JSON macaroon.
'''
serialized = {
'identifier': utils.convert_to_string(macaroon.identifier),
'signature': macaroon.signature,
}
if macaroon.location:
serialized['location'] = macaroon.location
if macaroon.caveats:
serialized['caveats'] = [
_caveat_v1_to_dict(caveat) for caveat in macaroon.caveats
]
return json.dumps(serialized)
|
def _serialize_v1(self, macaroon)
|
Serialize the macaroon in JSON format v1.
@param macaroon the macaroon to serialize.
@return JSON macaroon.
| 2.700497
| 2.12503
| 1.270804
|
'''Serialize the macaroon in JSON format v2.
@param macaroon the macaroon to serialize.
@return JSON macaroon in v2 format.
'''
serialized = {}
_add_json_binary_field(macaroon.identifier_bytes, serialized, 'i')
_add_json_binary_field(binascii.unhexlify(macaroon.signature_bytes),
serialized, 's')
if macaroon.location:
serialized['l'] = macaroon.location
if macaroon.caveats:
serialized['c'] = [
_caveat_v2_to_dict(caveat) for caveat in macaroon.caveats
]
return json.dumps(serialized)
|
def _serialize_v2(self, macaroon)
|
Serialize the macaroon in JSON format v2.
@param macaroon the macaroon to serialize.
@return JSON macaroon in v2 format.
| 2.985846
| 2.403291
| 1.242399
|
'''Deserialize a JSON macaroon depending on the format.
@param serialized the macaroon in JSON format.
@return the macaroon object.
'''
deserialized = json.loads(serialized)
if deserialized.get('identifier') is None:
return self._deserialize_v2(deserialized)
else:
return self._deserialize_v1(deserialized)
|
def deserialize(self, serialized)
|
Deserialize a JSON macaroon depending on the format.
@param serialized the macaroon in JSON format.
@return the macaroon object.
| 4.243697
| 2.356687
| 1.800704
|
'''Deserialize a JSON macaroon in v1 format.
@param serialized the macaroon in v1 JSON format.
@return the macaroon object.
'''
from pymacaroons.macaroon import Macaroon, MACAROON_V1
from pymacaroons.caveat import Caveat
caveats = []
for c in deserialized.get('caveats', []):
caveat = Caveat(
caveat_id=c['cid'],
verification_key_id=(
utils.raw_b64decode(c['vid']) if c.get('vid')
else None
),
location=(
c['cl'] if c.get('cl') else None
),
version=MACAROON_V1
)
caveats.append(caveat)
return Macaroon(
location=deserialized.get('location'),
identifier=deserialized['identifier'],
caveats=caveats,
signature=deserialized['signature'],
version=MACAROON_V1
)
|
def _deserialize_v1(self, deserialized)
|
Deserialize a JSON macaroon in v1 format.
@param serialized the macaroon in v1 JSON format.
@return the macaroon object.
| 2.867835
| 2.337772
| 1.226739
|
'''Deserialize a JSON macaroon v2.
@param serialized the macaroon in JSON format v2.
@return the macaroon object.
'''
from pymacaroons.macaroon import Macaroon, MACAROON_V2
from pymacaroons.caveat import Caveat
caveats = []
for c in deserialized.get('c', []):
caveat = Caveat(
caveat_id=_read_json_binary_field(c, 'i'),
verification_key_id=_read_json_binary_field(c, 'v'),
location=_read_json_binary_field(c, 'l'),
version=MACAROON_V2
)
caveats.append(caveat)
return Macaroon(
location=_read_json_binary_field(deserialized, 'l'),
identifier=_read_json_binary_field(deserialized, 'i'),
caveats=caveats,
signature=binascii.hexlify(
_read_json_binary_field(deserialized, 's')),
version=MACAROON_V2
)
|
def _deserialize_v2(self, deserialized)
|
Deserialize a JSON macaroon v2.
@param serialized the macaroon in JSON format v2.
@return the macaroon object.
| 2.553999
| 2.089014
| 1.222586
|
if not id:
if not (user and project):
raise ValueError('Both user and project required')
if (
isinstance(user, User)
and isinstance(project, Project)
):
_user_id = user.id
_project_id = project.id
elif (
isinstance(user, (int, str,))
and isinstance(project, (int, str,))
):
_user_id = user
_project_id = project
else:
raise TypeError
id = cls.where(user_id=_user_id, project_id=_project_id).next().id
return super(ProjectPreferences, cls).find(id)
|
def find(cls, id='', user=None, project=None)
|
Like :py:meth:`.PanoptesObject.find` but can also query by user and
project.
- **user** and **project** can be either a :py:class:`.User` and
:py:class:`.Project` instance respectively, or they can be given as
IDs. If either argument is given, the other is also required.
| 2.479926
| 2.491107
| 0.995512
|
if (isinstance(settings, dict)):
_to_update = settings
if (
isinstance(user, User)
and isinstance(project, Project)
):
_user_id = user.id
_project_id = project.id
elif (
isinstance(user, (int, str,))
and isinstance(project, (int, str,))
):
_user_id = user
_project_id = project
else:
raise TypeError
cls.http_post(
'update_settings',
json={
'project_preferences': {
'user_id': _user_id,
'project_id': _project_id,
'settings': _to_update,
}
}
)
else:
raise TypeError
|
def save_settings(cls, project=None, user=None, settings=None)
|
Save settings for a user without first fetching their preferences.
- **user** and **project** can be either a :py:class:`.User` and
:py:class:`.Project` instance respectively, or they can be given as
IDs. If either argument is given, the other is also required.
- **settings** is a :py:class:`dict` containing the settings to be
saved.
| 2.468825
| 2.418054
| 1.020997
|
cls._local.save_exec = ThreadPoolExecutor(
max_workers=ASYNC_SAVE_THREADS
)
return cls._local.save_exec
|
def async_saves(cls)
|
Returns a context manager to allow asynchronously creating subjects.
Using this context manager will create a pool of threads which will
create multiple subjects at once and upload any local files
simultaneously.
The recommended way to use this is with the `with` statement::
with Subject.async_saves():
local_files = [...]
for filename in local_files:
s = Subject()
s.links.project = 1234
s.add_location(filename)
s.save()
Alternatively, you can manually shut down the thread pool::
pool = Subject.async_saves()
local_files = [...]
try:
for filename in local_files:
s = Subject()
s.links.project = 1234
s.add_location(filename)
s.save()
finally:
pool.shutdown()
| 8.347537
| 10.575901
| 0.789298
|
if not client:
client = Panoptes.client()
async_save = hasattr(self._local, 'save_exec')
with client:
if async_save:
try:
# The recursive call will exec in a new thread, so
# self._local.save_exec will be undefined above
self._async_future = self._local.save_exec.submit(
self.save,
client=client,
)
return
except RuntimeError:
del self._local.save_exec
async_save = False
if not self.metadata == self._original_metadata:
self.modified_attributes.add('metadata')
response = retry(
super(Subject, self).save,
attempts=UPLOAD_RETRY_LIMIT,
sleeptime=RETRY_BACKOFF_INTERVAL,
retry_exceptions=(PanoptesAPIException,),
log_args=False,
)
if not response:
return
try:
if async_save:
upload_exec = self._local.save_exec
else:
upload_exec = ThreadPoolExecutor(
max_workers=ASYNC_SAVE_THREADS,
)
for location, media_data in zip(
response['subjects'][0]['locations'],
self._media_files
):
if not media_data:
continue
for media_type, url in location.items():
upload_exec.submit(
retry,
self._upload_media,
args=(url, media_data, media_type),
attempts=UPLOAD_RETRY_LIMIT,
sleeptime=RETRY_BACKOFF_INTERVAL,
retry_exceptions=(
requests.exceptions.RequestException,
),
log_args=False,
)
finally:
if not async_save:
upload_exec.shutdown()
|
def save(self, client=None)
|
Like :py:meth:`.PanoptesObject.save`, but also uploads any local files
which have previosly been added to the subject with
:py:meth:`add_location`. Automatically retries uploads on error.
If multiple local files are to be uploaded, several files will be
uploaded simultaneously to save time.
| 3.925149
| 3.800537
| 1.032788
|
if hasattr(self, "_async_future") and self._async_future.done():
self._async_future.result()
return True
else:
return False
|
def async_save_result(self)
|
Retrieves the result of this subject's asynchronous save.
- Returns `True` if the subject was saved successfully.
- Raises `concurrent.futures.CancelledError` if the save was cancelled.
- If the save failed, raises the relevant exception.
- Returns `False` if the subject hasn't finished saving or if the
subject has not been queued for asynchronous save.
| 3.741786
| 3.463635
| 1.080306
|
if type(location) is dict:
self.locations.append(location)
self._media_files.append(None)
return
elif type(location) in (str,) + _OLD_STR_TYPES:
f = open(location, 'rb')
else:
f = location
try:
media_data = f.read()
if MEDIA_TYPE_DETECTION == 'magic':
media_type = magic.from_buffer(media_data, mime=True)
else:
media_type = imghdr.what(None, media_data)
if not media_type:
raise UnknownMediaException(
'Could not detect file type. Please try installing '
'libmagic: https://panoptes-python-client.readthedocs.'
'io/en/latest/user_guide.html#uploading-non-image-'
'media-types'
)
media_type = 'image/{}'.format(media_type)
self.locations.append(media_type)
self._media_files.append(media_data)
finally:
f.close()
|
def add_location(self, location)
|
Add a media location to this subject.
- **location** can be an open :py:class:`file` object, a path to a
local file, or a :py:class:`dict` containing MIME types and URLs for
remote media.
Examples::
subject.add_location(my_file)
subject.add_location('/data/image.jpg')
subject.add_location({'image/png': 'https://example.com/image.png'})
| 3.350072
| 3.409731
| 0.982503
|
if generate:
self.generate_export(export_type)
if generate or wait:
export = self.wait_export(export_type, wait_timeout)
else:
export = self.describe_export(export_type)
if export_type in TALK_EXPORT_TYPES:
media_url = export['data_requests'][0]['url']
else:
media_url = export['media'][0]['src']
response = requests.get(media_url, stream=True)
response.csv_reader = functools.partial(
csv.reader,
response.iter_lines(decode_unicode=True),
)
response.csv_dictreader = functools.partial(
csv.DictReader,
response.iter_lines(decode_unicode=True),
)
return response
|
def get_export(
self,
export_type,
generate=False,
wait=False,
wait_timeout=None,
)
|
Downloads a data export over HTTP. Returns a `Requests Response
<http://docs.python-requests.org/en/master/api/#requests.Response>`_
object containing the content of the export.
- **export_type** is a string specifying which type of export should be
downloaded.
- **generate** is a boolean specifying whether to generate a new export
and wait for it to be ready, or to just download the latest export.
- **wait** is a boolean specifying whether to wait for an in-progress
export to finish, if there is one. Has no effect if ``generate`` is
``True``.
- **wait_timeout** is the number of seconds to wait if ``wait`` is
``True``. Has no effect if ``wait`` is ``False`` or if ``generate``
is ``True``.
The returned :py:class:`.Response` object has two additional attributes
as a convenience for working with the CSV content; **csv_reader** and
**csv_dictreader**, which are wrappers for :py:meth:`.csv.reader`
and :py:class:`csv.DictReader` respectively. These wrappers take care
of correctly decoding the export content for the CSV parser.
Example::
classification_export = Project(1234).get_export('classifications')
for row in classification_export.csv_reader():
print(row)
classification_export = Project(1234).get_export('classifications')
for row in classification_export.csv_dictreader():
print(row)
| 2.671276
| 2.646692
| 1.009289
|
success = False
if timeout:
end_time = datetime.datetime.now() + datetime.timedelta(
seconds=timeout
)
while (not timeout) or (datetime.datetime.now() < end_time):
export_description = self.describe_export(
export_type,
)
if export_type in TALK_EXPORT_TYPES:
export_metadata = export_description['data_requests'][0]
else:
export_metadata = export_description['media'][0]['metadata']
if export_metadata.get('state', '') in ('ready', 'finished'):
success = True
break
time.sleep(2)
if not success:
raise PanoptesAPIException(
'{}_export not ready within {} seconds'.format(
export_type,
timeout
)
)
return export_description
|
def wait_export(
self,
export_type,
timeout=None,
)
|
Blocks until an in-progress export is ready.
- **export_type** is a string specifying which type of export to wait
for.
- **timeout** is the maximum number of seconds to wait.
If ``timeout`` is given and the export is not ready by the time limit,
:py:class:`.PanoptesAPIException` is raised.
| 3.097345
| 2.93215
| 1.056339
|
if export_type in TALK_EXPORT_TYPES:
return talk.post_data_request(
'project-{}'.format(self.id),
export_type.replace('talk_', '')
)
return self.http_post(
self._export_path(export_type),
json={"media": {"content_type": "text/csv"}},
)[0]
|
def generate_export(self, export_type)
|
Start a new export.
- **export_type** is a string specifying which type of export to start.
Returns a :py:class:`dict` containing metadata for the new export.
| 7.989119
| 8.208589
| 0.973263
|
if export_type in TALK_EXPORT_TYPES:
return talk.get_data_request(
'project-{}'.format(self.id),
export_type.replace('talk_', '')
)[0]
return self.http_get(
self._export_path(export_type),
)[0]
|
def describe_export(self, export_type)
|
Fetch metadata for an export.
- **export_type** is a string specifying which type of export to look
up.
Returns a :py:class:`dict` containing metadata for the export.
| 7.162036
| 8.174932
| 0.876097
|
scope = kwargs.pop('scope', None)
if not scope:
return super(Classification, cls).where(**kwargs)
return cls.paginated_results(*cls.http_get(scope, params=kwargs))
|
def where(cls, **kwargs)
|
where(scope=None, **kwargs)
Like :py:meth:`.PanoptesObject.where`, but also allows setting the
query scope.
- **scope** can be any of the values given in the `Classification
Collection API documentation <http://docs.panoptes.apiary.io/#reference/classification/classification/list-all-classifications>`_
without the leading slash.
Examples::
my_classifications = Classification.where()
my_proj_123_classifications = Classification.where(project_id=123)
all_proj_123_classifications = Classification.where(
scope='project',
project_id=123,
)
| 8.27215
| 6.585032
| 1.256205
|
if not id and not slug:
return None
try:
return cls.where(id=id, slug=slug).next()
except StopIteration:
raise PanoptesAPIException(
"Could not find collection with slug='{}'".format(slug)
)
|
def find(cls, id='', slug=None)
|
Similar to :py:meth:`.PanoptesObject.find`, but allows lookup by slug
as well as ID.
Examples::
collection_1234 = Collection.find(1234)
my_collection = Collection.find(slug="example/my-collection")
| 4.472981
| 3.948447
| 1.132846
|
if not (
isinstance(subject, Subject)
or isinstance(subject, (int, str,))
):
raise TypeError
if isinstance(subject, Subject):
_subject_id = subject.id
else:
_subject_id = str(subject)
self.http_post(
'{}/links/default_subject'.format(self.id),
json={'default_subject': _subject_id},
)
|
def set_default_subject(self, subject)
|
Sets the subject's location media URL as a link.
It displays as the default subject on PFE.
- **subject** can be a single :py:class:`.Subject` instance or a single
subject ID.
Examples::
collection.set_default_subject(1234)
collection.set_default_subject(Subject(1234))
| 3.711778
| 3.77249
| 0.983906
|
for sms in SetMemberSubject.where(subject_set_id=self.id):
yield sms.links.subject
|
def subjects(self)
|
A generator which yields :py:class:`.Subject` objects which are in this
subject set.
Examples::
for subject in subject_set.subjects:
print(subject.id)
| 27.240767
| 35.243725
| 0.772925
|
subjects = [ s.id if isinstance(s, Subject) else s for s in subjects ]
return Workflow.http_post(
'{}/retired_subjects'.format(self.id),
json={
'subject_ids': subjects,
'retirement_reason': reason
}
)
|
def retire_subjects(self, subjects, reason='other')
|
Retires subjects in this workflow.
- **subjects** can be a list of :py:class:`Subject` instances, a list
of subject IDs, a single :py:class:`Subject` instance, or a single
subject ID.
- **reason** gives the reason the :py:class:`Subject` has been retired.
Defaults to **other**.
Examples::
workflow.retire_subjects(1234)
workflow.retire_subjects([1,2,3,4])
workflow.retire_subjects(Subject(1234))
workflow.retire_subjects([Subject(12), Subject(34)])
| 4.100728
| 4.435869
| 0.924447
|
return [
r.links.owner for r in ProjectRole.where(project_id=self.id)
if len(roles) == 0 or len(set(roles) & set(r.roles)) > 0
]
|
def collaborators(self, *roles)
|
Returns a list of :py:class:`.User` who are collaborators on this
project.
Zero or more role arguments can be passed as strings to narrow down the
results. If any roles are given, users who possess at least one of the
given roles are returned.
Examples::
all_collabs = project.collaborators()
moderators = project.collaborators("moderators")
moderators_and_translators = project.collaborators(
"moderators",
"translators",
)
| 5.064729
| 5.586278
| 0.906637
|
cls._local.panoptes_client = cls(*args, **kwargs)
cls._local.panoptes_client.login()
return cls._local.panoptes_client
|
def connect(cls, *args, **kwargs)
|
connect(username=None, password=None, endpoint=None, admin=False)
Configures the Panoptes client for use.
Note that there is no need to call this unless you need to pass one or
more of the below arguments. By default, the client will connect to
the public Zooniverse.org API as an anonymous user.
All arguments are optional:
- **username** is your Zooniverse.org username.
- **password** is your Zooniverse.org password.
- **endpoint** is the HTTP API endpoint you'd like to connect to.
Defaults to **https://www.zooniverse.org**. Should not include a
trailing slash.
- **admin** is a boolean, switching on admin mode if ``True``. Has no
effect if the given username is not a Zooniverse.org administrator.
Examples::
Panoptes.connect(username='example', password='example')
Panoptes.connect(endpoint='https://panoptes.example.com')
| 4.314887
| 4.928549
| 0.875488
|
_id = kwargs.pop('id', '')
return cls.paginated_results(*cls.http_get(_id, params=kwargs))
|
def where(cls, **kwargs)
|
Returns a generator which yields instances matching the given query
arguments.
For example, this would yield all :py:class:`.Project`::
Project.where()
And this would yield all launch approved :py:class:`.Project`::
Project.where(launch_approved=True)
| 13.7144
| 16.564466
| 0.827941
|
if not _id:
return None
try:
return next(cls.where(id=_id))
except StopIteration:
raise PanoptesAPIException(
"Could not find {} with id='{}'".format(cls.__name__, _id)
)
|
def find(cls, _id)
|
Returns the individual instance with the given ID, if it exists. Raises
:py:class:`PanoptesAPIException` if the object with that ID is not
found.
| 3.855725
| 2.765221
| 1.394364
|
if not self.id:
save_method = Panoptes.client().post
force_reload = False
else:
if not self.modified_attributes:
return
if not self._loaded:
self.reload()
save_method = Panoptes.client().put
force_reload = True
response, response_etag = save_method(
self.url(self.id),
json={self._api_slug: self._savable_dict(
modified_attributes=self.modified_attributes
)},
etag=self.etag
)
raw_resource_response = response[self._api_slug][0]
self.set_raw(raw_resource_response, response_etag)
if force_reload:
self._loaded = False
return response
|
def save(self)
|
Saves the object. If the object has not been saved before (i.e. it's
new), then a new object is created. Otherwise, any changes are
submitted to the API.
| 4.53286
| 4.10872
| 1.103229
|
if not self.id:
return
reloaded_object = self.__class__.find(self.id)
self.set_raw(
reloaded_object.raw,
reloaded_object.etag
)
|
def reload(self)
|
Re-fetches the object from the API, discarding any local changes.
Returns without doing anything if the object is new.
| 5.972281
| 4.360356
| 1.369677
|
if not self.id:
return
if not self._loaded:
self.reload()
return self.http_delete(self.id, etag=self.etag)
|
def delete(self)
|
Deletes the object. Returns without doing anything if the object is
new.
| 7.595754
| 5.961069
| 1.274227
|
if self.readonly:
raise NotImplementedError(
'{} links can\'t be modified'.format(self._slug)
)
if not self._parent.id:
raise ObjectNotSavedException(
"Links can not be modified before the object has been saved."
)
_objs = [obj for obj in self._build_obj_list(objs) if obj not in self]
if not _objs:
return
self._parent.http_post(
'{}/links/{}'.format(self._parent.id, self._slug),
json={self._slug: _objs},
retry=True,
)
self._linked_object_ids.extend(_objs)
|
def add(self, objs)
|
Adds the given `objs` to this `LinkCollection`.
- **objs** can be a list of :py:class:`.PanoptesObject` instances, a
list of object IDs, a single :py:class:`.PanoptesObject` instance, or
a single object ID.
Examples::
organization.links.projects.add(1234)
organization.links.projects.add(Project(1234))
workflow.links.subject_sets.add([1,2,3,4])
workflow.links.subject_sets.add([Project(12), Project(34)])
| 4.93842
| 4.527738
| 1.090704
|
if self.readonly:
raise NotImplementedError(
'{} links can\'t be modified'.format(self._slug)
)
if not self._parent.id:
raise ObjectNotSavedException(
"Links can not be modified before the object has been saved."
)
_objs = [obj for obj in self._build_obj_list(objs) if obj in self]
if not _objs:
return
_obj_ids = ",".join(_objs)
self._parent.http_delete(
'{}/links/{}/{}'.format(self._parent.id, self._slug, _obj_ids),
retry=True,
)
self._linked_object_ids = [
obj for obj in self._linked_object_ids if obj not in _objs
]
|
def remove(self, objs)
|
Removes the given `objs` from this `LinkCollection`.
- **objs** can be a list of :py:class:`.PanoptesObject` instances, a
list of object IDs, a single :py:class:`.PanoptesObject` instance, or
a single object ID.
Examples::
organization.links.projects.remove(1234)
organization.links.projects.remove(Project(1234))
workflow.links.subject_sets.remove([1,2,3,4])
workflow.links.subject_sets.remove([Project(12), Project(34)])
| 4.367526
| 4.102867
| 1.064506
|
# Promote dicts to Namelists
if not isinstance(nml, Namelist) and isinstance(nml, dict):
nml_in = Namelist(nml)
else:
nml_in = nml
nml_in.write(nml_path, force=force, sort=sort)
|
def write(nml, nml_path, force=False, sort=False)
|
Save a namelist to disk using either a file object or its file path.
File object usage:
>>> with open(nml_path, 'w') as nml_file:
>>> f90nml.write(nml, nml_file)
File path usage:
>>> f90nml.write(nml, 'data.nml')
This function is equivalent to the ``write`` function of the ``Namelist``
object ``nml``.
>>> nml.write('data.nml')
By default, ``write`` will not overwrite an existing file. To override
this, use the ``force`` flag.
>>> nml.write('data.nml', force=True)
To alphabetically sort the ``Namelist`` keys, use the ``sort`` flag.
>>> nml.write('data.nml', sort=True)
| 3.225065
| 3.463808
| 0.931075
|
parser = Parser()
return parser.read(nml_path, nml_patch, out_path)
|
def patch(nml_path, nml_patch, out_path=None)
|
Create a new namelist based on an input namelist and reference dict.
>>> f90nml.patch('data.nml', nml_patch, 'patched_data.nml')
This function is equivalent to the ``read`` function of the ``Parser``
object with the patch output arguments.
>>> parser = f90nml.Parser()
>>> nml = parser.read('data.nml', nml_patch, 'patched_data.nml')
A patched namelist file will retain any formatting or comments from the
original namelist file. Any modified values will be formatted based on the
settings of the ``Namelist`` object.
| 4.571044
| 8.824044
| 0.518021
|
self.trans = Transform(self.size[0], self.size[1], x1, y1, x2, y2)
|
def setCoords(self, x1, y1, x2, y2)
|
Set coordinates of window to run from (x1,y1) in the
lower-left corner to (x2,y2) in the upper-right corner.
| 4.038473
| 4.062569
| 0.994069
|
if format.upper() in cairosvg.SURFACES:
surface = cairosvg.SURFACES[format.upper()]
else:
raise Exception("'%s' image format unavailable: use one of %s" %
(format.upper(), list(cairosvg.SURFACES.keys())))
return surface.convert(bytestring=str(self), **kwargs)
|
def convert(self, format="png", **kwargs)
|
png, ps, pdf, gif, jpg, svg
returns image in format as bytes
| 4.442941
| 4.311343
| 1.030524
|
import PIL.Image
bytes = self.convert("png")
sfile = io.BytesIO(bytes)
pil = PIL.Image.open(sfile)
return pil
|
def toPIL(self, **attribs)
|
Convert canvas to a PIL image
| 4.899243
| 4.735047
| 1.034677
|
im = self.toPIL(**attribs)
sfile = io.BytesIO()
im.save(sfile, format="gif")
return sfile.getvalue()
|
def toGIF(self, **attribs)
|
Convert canvas to GIF bytes
| 3.403711
| 3.394361
| 1.002755
|
array = self.toArray()
(width, height, depth) = array.size
for x in range(width):
for y in range(height):
yield Pixel(array, x, y)
|
def getPixels(self)
|
Return a stream of pixels from current Canvas.
| 4.243708
| 4.074062
| 1.04164
|
return Point(self.center[0] - self.radius,
self.center[1] - self.radius)
|
def getP1(self)
|
Left, upper point
| 4.545643
| 3.936727
| 1.154676
|
return Point(self.center[0] + self.radius,
self.center[1] + self.radius)
|
def getP2(self)
|
Right, lower point
| 4.600185
| 3.996555
| 1.151038
|
self.error = error
if not self.is_running.is_set():
def loop():
self.need_to_stop.clear()
self.is_running.set()
for robot in self.robots:
if robot.brain:
self.runBrain(robot.brain)
count = 0
while not self.need_to_stop.isSet():
if not self.paused.is_set():
self.clock += self.sim_time
for robot in self.robots:
try:
robot.update()
except Exception as exc:
self.need_to_stop.set()
if error:
error.value = "Error: %s. Now stopping simulation." % str(exc)
else:
raise
if gui:
self.draw()
if count % self.gui_update == 0:
if "canvas" in set_values:
set_values["canvas"].value = str(self.render())
if "energy" in set_values:
if len(self.robots) > 0:
set_values["energy"].value = str(self.robots[0].energy)
count += 1
self.realsleep(self.sim_time)
if self.robots[0].energy <= 0:
self.need_to_stop.set()
self.is_running.clear()
for robot in self.robots:
robot.stop()
threading.Thread(target=loop).start()
|
def start_sim(self, gui=True, set_values={}, error=None)
|
Run the simulation in the background, showing the GUI by default.
| 2.931866
| 2.899255
| 1.011248
|
from calysto.display import display, clear_output
canvas = self.render()
clear_output(wait=True)
display(canvas)
|
def draw(self)
|
Render and draw the world and robots.
| 8.807503
| 8.021418
| 1.097998
|
start = self.time()
while (self.time() - start < seconds and
not self.need_to_stop.is_set()):
self.need_to_stop.wait(self.sim_time)
|
def sleep(self, seconds)
|
Sleep in simulated time.
| 4.214789
| 3.767939
| 1.118593
|
if self.error:
self.error.value = ""
def wrapper():
self.brain_running.set()
try:
f()
except KeyboardInterrupt:
# Just stop
pass
except Exception as e:
if self.error:
self.error.value = "<pre style='background: #fdd'>" + traceback.format_exc() + "</pre>"
else:
raise
finally:
self.brain_running.clear()
# Otherwise, will show error
threading.Thread(target=wrapper).start()
|
def runBrain(self, f)
|
Run a brain program in the background.
| 4.291976
| 4.107275
| 1.044969
|
dx, dy = map(lambda v: v * lam_percent, self.psize)
total = 0
while total < count:
points = np.random.poisson(lam=(dx, dy), size=(count, 2))
for x, y in points:
px, py = (int(x - dx + cx), int(y - dy + cy))
if self.getPatch(px, py) is None:
self.setPatch(px, py, item)
total += 1
if total == count:
break
|
def addCluster(self, cx, cy, item, count, lam_percent=.25)
|
Add a Poisson cluster of count items around (x,y).
| 3.202787
| 2.962228
| 1.081209
|
self.vx = vx
self.sleep(seconds)
self.vx = 0
|
def forward(self, seconds, vx=5)
|
Move continuously in simulator for seconds and velocity vx.
| 4.482998
| 3.916874
| 1.144535
|
length = len(codon)
retval = int(codon)
return retval/(10 ** (length - 1)) - 5.0
|
def codon2weight(self, codon)
|
Turn a codon of "000" to "999" to a number between
-5.0 and 5.0.
| 9.171851
| 5.289417
| 1.734
|
if length is None:
length = self.clen
retval = 0
weight = min(max(weight + 5.0, 0), 10.0) * (10 ** (length - 1))
for i in range(length):
if i == length - 1: # last one
d = int(round(weight / (10 ** (length - i - 1))))
else:
d = int(weight / (10 ** (length - i - 1)))
weight = weight % (10 ** (length - i - 1))
retval += d * (10 ** (length - i - 1))
return ("%0" + str(length) + "d") % retval
|
def weight2codon(self, weight, length=None)
|
Given a weight between -5 and 5, turn it into
a codon, eg "000" to "999"
| 2.656588
| 2.413423
| 1.100755
|
return (isinstance(val, list) and
any(isinstance(v, vtype) for v in val) and
all((isinstance(v, vtype) or v is None) for v in val))
|
def is_nullable_list(val, vtype)
|
Return True if list contains either values of type `vtype` or None.
| 2.539311
| 2.24828
| 1.129446
|
if isinstance(width, int):
if width >= 0:
self._column_width = width
else:
raise ValueError('Column width must be nonnegative.')
else:
raise TypeError('Column width must be a nonnegative integer.')
|
def column_width(self, width)
|
Validate and set the column width.
| 2.33411
| 2.112263
| 1.105028
|
# Explicit indent setting
if isinstance(value, str):
if value.isspace() or len(value) == 0:
self._indent = value
else:
raise ValueError('String indentation can only contain '
'whitespace.')
# Set indent width
elif isinstance(value, int):
if value >= 0:
self._indent = value * ' '
else:
raise ValueError('Indentation spacing must be nonnegative.')
else:
raise TypeError('Indentation must be specified by string or space '
'width.')
|
def indent(self, value)
|
Validate and set the indent width.
| 3.684388
| 3.488928
| 1.056023
|
if not isinstance(value, bool):
raise TypeError('end_comma attribute must be a logical type.')
self._end_comma = value
|
def end_comma(self, value)
|
Validate and set the comma termination flag.
| 4.9332
| 4.029046
| 1.224409
|
if not isinstance(value, bool):
raise TypeError('index_spacing attribute must be a logical type.')
self._index_spacing = value
|
def index_spacing(self, value)
|
Validate and set the index_spacing flag.
| 4.998543
| 3.801184
| 1.314996
|
if not isinstance(value, bool):
raise TypeError('uppercase attribute must be a logical type.')
self._uppercase = value
|
def uppercase(self, value)
|
Validate and set the uppercase flag.
| 6.931507
| 5.240732
| 1.322622
|
if isinstance(value, str):
# Duck-test the format string; raise ValueError on fail
'{0:{1}}'.format(1.23, value)
self._float_format = value
else:
raise TypeError('Floating point format code must be a string.')
|
def float_format(self, value)
|
Validate and set the upper case flag.
| 8.548389
| 7.604915
| 1.124061
|
if not any(isinstance(value, t) for t in (list, tuple)):
raise TypeError("Logical representation must be a tuple with "
"a valid true and false value.")
if not len(value) == 2:
raise ValueError("List must contain two values.")
self.false_repr = value[0]
self.true_repr = value[1]
|
def logical_repr(self, value)
|
Set the string representation of logical values.
| 3.73827
| 3.631502
| 1.0294
|
if isinstance(value, str):
if not (value.lower().startswith('f') or
value.lower().startswith('.f')):
raise ValueError("Logical false representation must start "
"with 'F' or '.F'.")
else:
self._logical_repr[0] = value
else:
raise TypeError('Logical false representation must be a string.')
|
def false_repr(self, value)
|
Validate and set the logical false representation.
| 3.832059
| 3.094126
| 1.238495
|
# TODO: Validate contents? (May want to set before adding the data.)
if not isinstance(value, dict):
raise TypeError('start_index attribute must be a dict.')
self._start_index = value
|
def start_index(self, value)
|
Validate and set the vector start index.
| 8.917084
| 8.302082
| 1.074078
|
nml_is_file = hasattr(nml_path, 'read')
if not force and not nml_is_file and os.path.isfile(nml_path):
raise IOError('File {0} already exists.'.format(nml_path))
nml_file = nml_path if nml_is_file else open(nml_path, 'w')
try:
self._writestream(nml_file, sort)
finally:
if not nml_is_file:
nml_file.close()
|
def write(self, nml_path, force=False, sort=False)
|
Write Namelist to a Fortran 90 namelist file.
>>> nml = f90nml.read('input.nml')
>>> nml.write('out.nml')
| 2.141816
| 2.667052
| 0.803065
|
for sec in nml_patch:
if sec not in self:
self[sec] = Namelist()
self[sec].update(nml_patch[sec])
|
def patch(self, nml_patch)
|
Update the namelist from another partial or full namelist.
This is different from the intrinsic `update()` method, which replaces
a namelist section. Rather, it updates the values within a section.
| 3.696156
| 3.11101
| 1.188089
|
for key, value in self.items():
for inner_key, inner_value in value.items():
yield (key, inner_key), inner_value
|
def groups(self)
|
Return an iterator that spans values with group and variable names.
Elements of the iterator consist of a tuple containing two values. The
first is internal tuple containing the current namelist group and its
variable name. The second element of the returned tuple is the value
associated with the current group and variable.
| 3.30924
| 3.052592
| 1.084076
|
if self._newline:
print(file=nml_file)
self._newline = True
if self.uppercase:
grp_name = grp_name.upper()
if sort:
grp_vars = Namelist(sorted(grp_vars.items(), key=lambda t: t[0]))
print('&{0}'.format(grp_name), file=nml_file)
for v_name, v_val in grp_vars.items():
v_start = grp_vars.start_index.get(v_name, None)
for v_str in self._var_strings(v_name, v_val, v_start=v_start):
nml_line = self.indent + '{0}'.format(v_str)
print(nml_line, file=nml_file)
print('/', file=nml_file)
|
def _write_nmlgrp(self, grp_name, grp_vars, nml_file, sort=False)
|
Write namelist group to target file.
| 3.056417
| 2.927144
| 1.044164
|
# TODO: Preserve ordering
nmldict = OrderedDict(self)
# Search for namelists within the namelist
# TODO: Move repeated stuff to new functions
for key, value in self.items():
if isinstance(value, Namelist):
nmldict[key] = value.todict(complex_tuple)
elif isinstance(value, complex) and complex_tuple:
nmldict[key] = [value.real, value.imag]
try:
nmldict['_complex'].append(key)
except KeyError:
nmldict['_complex'] = [key]
elif isinstance(value, list):
complex_list = False
for idx, entry in enumerate(value):
if isinstance(entry, Namelist):
nmldict[key][idx] = entry.todict(complex_tuple)
elif isinstance(entry, complex) and complex_tuple:
nmldict[key][idx] = [entry.real, entry.imag]
complex_list = True
if complex_list:
try:
nmldict['_complex'].append(key)
except KeyError:
nmldict['_complex'] = [key]
# Append the start index if present
if self.start_index:
nmldict['_start_index'] = self.start_index
return nmldict
|
def todict(self, complex_tuple=False)
|
Return a dict equivalent to the namelist.
Since Fortran variables and names cannot start with the ``_``
character, any keys starting with this token denote metadata, such as
starting index.
The ``complex_tuple`` flag is used to convert complex data into an
equivalent 2-tuple, with metadata stored to flag the variable as
complex. This is primarily used to facilitate the storage of the
namelist into an equivalent format which does not support complex
numbers, such as JSON or YAML.
| 2.499367
| 2.326273
| 1.074408
|
if isinstance(value, bool):
return self._f90bool(value)
elif isinstance(value, numbers.Integral):
return self._f90int(value)
elif isinstance(value, numbers.Real):
return self._f90float(value)
elif isinstance(value, numbers.Complex):
return self._f90complex(value)
elif isinstance(value, basestring):
return self._f90str(value)
elif value is None:
return ''
else:
raise ValueError('Type {0} of {1} cannot be converted to a Fortran'
' type.'.format(type(value), value))
|
def _f90repr(self, value)
|
Convert primitive Python types to equivalent Fortran strings.
| 1.857173
| 1.811174
| 1.025398
|
return '({0:{fmt}}, {1:{fmt}})'.format(value.real, value.imag,
fmt=self.float_format)
|
def _f90complex(self, value)
|
Return a Fortran 90 representation of a complex number.
| 5.087142
| 4.2477
| 1.197623
|
# Replace Python quote escape sequence with Fortran
result = repr(str(value)).replace("\\'", "''").replace('\\"', '""')
# Un-escape the Python backslash escape sequence
result = result.replace('\\\\', '\\')
return result
|
def _f90str(self, value)
|
Return a Fortran 90 representation of a string.
| 8.170658
| 7.022153
| 1.163555
|
response_keys = set(page.keys())
uncommon_keys = response_keys - self.common_keys
for possible_data_key in uncommon_keys:
element = page[possible_data_key]
if isinstance(element, dict):
return [self.representation(self.client, self.service_name,
element)]
if isinstance(element, list):
return [self.representation(self.client, self.service_name, x)
for x in element]
|
def extract_data(self, page)
|
Extract the AppNexus object or list of objects from the response
| 4.005986
| 3.745061
| 1.069672
|
page = self.get_page(num_elements=1)
data = self.extract_data(page)
if data:
return data[0]
|
def first(self)
|
Extract the first AppNexus object present in the response
| 6.060071
| 4.930116
| 1.229194
|
if num_elements is None:
num_elements = self.batch_size
specs = self.specs.copy()
specs.update(start_element=start_element, num_elements=num_elements)
return self.client.get(self.service_name, **specs)
|
def get_page(self, start_element=0, num_elements=None)
|
Get a page (100 elements) starting from `start_element`
| 2.94344
| 2.889517
| 1.018662
|
initial_count = self.count()
count_with_skip = max(0, initial_count - self._skip)
size = min(count_with_skip, self._limit)
return size
|
def size(self)
|
Return the number of elements of the cursor with skip and limit
| 5.077178
| 3.940612
| 1.288424
|
if align == "left":
return (s + (p * n))[:n] + sep
elif align == "center":
pos = n + len(s)/2 - n/2
return ((p * n) + s + (p * n))[pos:pos + n] + sep
elif align == "right":
return ((p * n) + s)[-n:] + sep
|
def pad(s, n, p = " ", sep = "|", align = "left")
|
Returns a padded string.
s = string to pad
n = width of string to return
sep = separator (on end of string)
align = text alignment, "left", "center", or "right"
| 2.61171
| 2.622707
| 0.995807
|
for key in dict2:
dict1[key] = list(map(lambda a,b: a + b, dict1.get(key, [0,0,0,0]), dict2[key]))
return dict1
|
def sumMerge(dict1, dict2)
|
Adds two dictionaries together, and merges into the first, dict1.
Returns first dict.
| 2.49698
| 2.839141
| 0.879484
|
if mode == 'pickle':
import pickle
fp = open(filename)
network = pickle.load(fp)
fp.close()
return network
elif mode in ['plain', 'conx']:
fp = open(filename, "r")
line = fp.readline()
network = None
while line:
if line.startswith("layer,"):
# layer, name, size
temp, name, sizeStr = line.split(",")
name = name.strip()
size = int(sizeStr)
network.addLayer(name, size)
line = fp.readline()
weights = [float(f) for f in line.split()]
for i in range(network[name].size):
network[name].weight[i] = weights[i]
elif line.startswith("connection,"):
# connection, fromLayer, toLayer
temp, nameFrom, nameTo = line.split(",")
nameFrom, nameTo = nameFrom.strip(), nameTo.strip()
network.connect(nameFrom, nameTo)
for i in range(network[nameFrom].size):
line = fp.readline()
weights = [float(f) for f in line.split()]
for j in range(network[nameTo].size):
network[nameFrom, nameTo].weight[i][j] = weights[j]
elif line.startswith("parameter,"):
temp, exp = line.split(",")
exec(exp) # network is the neural network object
elif line.startswith("network,"):
temp, netType = line.split(",")
netType = netType.strip().lower()
if netType == "cascornetwork":
from pyrobot.brain.cascor import CascorNetwork
network = CascorNetwork()
elif netType == "network":
network = Network()
elif netType == "srn":
network = SRN()
else:
raise AttributeError("unknown network type: '%s'" % netType)
line = fp.readline()
return network
|
def loadNetworkFromFile(filename, mode = 'pickle')
|
Deprecated. Use loadNetwork instead.
| 2.657619
| 2.622089
| 1.01355
|
thunk = kwargs.get("thunk", lambda: random.random())
if not args:
return [thunk() for i in range(n)]
A = []
for i in range(n):
A.append( ndim(*args, thunk=thunk) )
return A
|
def ndim(n, *args, **kwargs)
|
Makes a multi-dimensional array of random floats. (Replaces RandomArray).
| 4.495357
| 3.590033
| 1.252177
|
if type(size) == type(1):
size = (size,)
temp = Numeric.array( ndim(*size), thunk=lambda: random.gauss(0, 1)) * (2.0 * bound)
return temp - bound
|
def randomArray2(size, bound)
|
Returns an array initialized to random values between -bound and
bound distributed in a gaussian probability distribution more
appropriate for a Tanh activation function.
| 9.144854
| 8.785654
| 1.040885
|
if type(size) == type(1):
size = (size,)
temp = Numeric.array( ndim(*size) ) * (2.0 * bound)
return temp - bound
|
def randomArray(size, bound)
|
Returns an array initialized to random values between -max and max.
| 9.149446
| 8.54887
| 1.070252
|
print(name + ": ", end=" ")
cnt = 0
for i in a:
print("%4.2f" % i, end=" ")
if width > 0 and (cnt + 1) % width == 0:
print('')
cnt += 1
|
def displayArray(name, a, width = 0)
|
Prints an array (any sequence of floats, really) to the screen.
| 2.753612
| 2.678041
| 1.028219
|
string = name + ": "
cnt = 0
for i in a:
string += "%4.2f " % i
if width > 0 and (cnt + 1) % width == 0:
string += '\n'
cnt += 1
return string
|
def toStringArray(name, a, width = 0)
|
Returns an array (any sequence of floats, really) as a string.
| 2.848087
| 2.68099
| 1.062326
|
for i in a:
fp.write("%f%s" % (i, delim))
if nl:
fp.write("\n")
|
def writeArray(fp, a, delim = " ", nl = 1)
|
Writes a sequence a of floats to file pointed to by file pointer.
| 2.491161
| 2.294163
| 1.085869
|
self.randomize()
self.dweight = Numeric.zeros(self.size, 'f')
self.delta = Numeric.zeros(self.size, 'f')
self.wed = Numeric.zeros(self.size, 'f')
self.wedLast = Numeric.zeros(self.size, 'f')
self.target = Numeric.zeros(self.size, 'f')
self.error = Numeric.zeros(self.size, 'f')
self.activation = Numeric.zeros(self.size, 'f')
self.netinput = Numeric.zeros(self.size, 'f')
self.targetSet = 0
self.activationSet = 0
self.verify = 1
# layer report of stats:
self.pcorrect = 0
self.ptotal = 0
self.correct = 0
# misc:
self.minTarget = 0.0
self.maxTarget = 1.0
self.minActivation = 0.0
self.maxActivation = 1.0
|
def initialize(self)
|
Initializes important node values to zero for each node in the
layer (target, error, activation, dbias, delta, netinput, bed).
| 3.042897
| 2.580528
| 1.179176
|
if force or not self.frozen:
self.weight = randomArray(self.size, self._maxRandom)
|
def randomize(self, force = 0)
|
Initialize node biases to random values in the range [-max, max].
| 14.830421
| 13.247418
| 1.119495
|
# overwrites current data
if newsize <= 0:
raise LayerError('Layer size changed to zero.', newsize)
minSize = min(self.size, newsize)
bias = randomArray(newsize, self._maxRandom)
Numeric.put(bias, Numeric.arange(minSize), self.weight)
self.weight = bias
self.size = newsize
self.displayWidth = newsize
self.targetSet = 0
self.activationSet = 0
self.target = Numeric.zeros(self.size, 'f')
self.error = Numeric.zeros(self.size, 'f')
self.activation = Numeric.zeros(self.size, 'f')
self.dweight = Numeric.zeros(self.size, 'f')
self.delta = Numeric.zeros(self.size, 'f')
self.netinput = Numeric.zeros(self.size, 'f')
self.wed = Numeric.zeros(self.size, 'f')
self.wedLast = Numeric.zeros(self.size, 'f')
|
def changeSize(self, newsize)
|
Changes the size of the layer. Should only be called through
Network.changeLayerSize().
| 3.648044
| 3.470975
| 1.051014
|
tss = self.TSSError()
return math.sqrt(tss / self.size)
|
def RMSError(self)
|
Returns Root Mean Squared Error for this layer's pattern.
| 15.668197
| 8.858873
| 1.768644
|
return Numeric.add.reduce(Numeric.fabs(self.target - self.activation) < tolerance)
|
def getCorrect(self, tolerance)
|
Returns the number of nodes within tolerance of the target.
| 12.711768
| 10.448794
| 1.216578
|
maxvalue = -10000
maxpos = -1
ttlvalue = 0
if type == 'activation':
ttlvalue = Numeric.add.reduce(self.activation)
maxpos = Numeric.argmax(self.activation)
maxvalue = self.activation[maxpos]
elif type == 'target':
# note that backprop() resets self.targetSet flag
if self.verify and self.targetSet == 0:
raise LayerError('getWinner() called with \'target\' but target has not been set.', \
self.targetSet)
ttlvalue = Numeric.add.reduce(self.target)
maxpos = Numeric.argmax(self.target)
maxvalue = self.target[maxpos]
else:
raise LayerError('getWinner() called with unknown layer attribute.', \
type)
if self.size > 0:
avgvalue = ttlvalue / float(self.size)
else:
raise LayerError('getWinner() called for layer of size zero.', \
self.size)
return maxpos, maxvalue, avgvalue
|
def getWinner(self, type = 'activation')
|
Returns the winner of the type specified {'activation' or
'target'}.
| 3.446731
| 3.369811
| 1.022826
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.