text stringlengths 4 1.02M | meta dict |
|---|---|
import os
def make_output_dir(state):
if not state.output_directory:
msg = "No Output Directory Specified for State:" + state.name
raise Exception(msg)
if os.path.exists(state.output_directory):
return
os.makedirs(state.output_directory)
| {
"content_hash": "1d0f9e8f32eb142e4b19b410f526b2ec",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 69,
"avg_line_length": 21.46153846153846,
"alnum_prop": 0.6702508960573477,
"repo_name": "savorywatt/tailorSCAD",
"id": "f24859ed2cc58270e2144b0c1e1d6133da32b454",
"size": "279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tailorscad/process/save.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "290"
},
{
"name": "OpenSCAD",
"bytes": "291"
},
{
"name": "Python",
"bytes": "16795"
}
],
"symlink_target": ""
} |
"""Tests for autotagging functionality.
"""
from __future__ import division, absolute_import, print_function
import re
import copy
from test import _common
from test._common import unittest
from beets import autotag
from beets.autotag import match
from beets.autotag.hooks import Distance, string_dist
from beets.library import Item
from beets.util import plurality
from beets.autotag import AlbumInfo, TrackInfo
from beets import config
class PluralityTest(_common.TestCase):
def test_plurality_consensus(self):
objs = [1, 1, 1, 1]
obj, freq = plurality(objs)
self.assertEqual(obj, 1)
self.assertEqual(freq, 4)
def test_plurality_near_consensus(self):
objs = [1, 1, 2, 1]
obj, freq = plurality(objs)
self.assertEqual(obj, 1)
self.assertEqual(freq, 3)
def test_plurality_conflict(self):
objs = [1, 1, 2, 2, 3]
obj, freq = plurality(objs)
self.assertTrue(obj in (1, 2))
self.assertEqual(freq, 2)
def test_plurality_empty_sequence_raises_error(self):
with self.assertRaises(ValueError):
plurality([])
def test_current_metadata_finds_pluralities(self):
items = [Item(artist='The Beetles', album='The White Album'),
Item(artist='The Beatles', album='The White Album'),
Item(artist='The Beatles', album='Teh White Album')]
likelies, consensus = match.current_metadata(items)
self.assertEqual(likelies['artist'], 'The Beatles')
self.assertEqual(likelies['album'], 'The White Album')
self.assertFalse(consensus['artist'])
def test_current_metadata_artist_consensus(self):
items = [Item(artist='The Beatles', album='The White Album'),
Item(artist='The Beatles', album='The White Album'),
Item(artist='The Beatles', album='Teh White Album')]
likelies, consensus = match.current_metadata(items)
self.assertEqual(likelies['artist'], 'The Beatles')
self.assertEqual(likelies['album'], 'The White Album')
self.assertTrue(consensus['artist'])
def test_albumartist_consensus(self):
items = [Item(artist='tartist1', album='album',
albumartist='aartist'),
Item(artist='tartist2', album='album',
albumartist='aartist'),
Item(artist='tartist3', album='album',
albumartist='aartist')]
likelies, consensus = match.current_metadata(items)
self.assertEqual(likelies['artist'], 'aartist')
self.assertFalse(consensus['artist'])
def test_current_metadata_likelies(self):
fields = ['artist', 'album', 'albumartist', 'year', 'disctotal',
'mb_albumid', 'label', 'catalognum', 'country', 'media',
'albumdisambig']
items = [Item(**dict((f, '%s_%s' % (f, i or 1)) for f in fields))
for i in range(5)]
likelies, _ = match.current_metadata(items)
for f in fields:
self.assertEqual(likelies[f], '%s_1' % f)
def _make_item(title, track, artist=u'some artist'):
return Item(title=title, track=track,
artist=artist, album=u'some album',
length=1,
mb_trackid='', mb_albumid='', mb_artistid='')
def _make_trackinfo():
return [
TrackInfo(u'one', None, u'some artist', length=1, index=1),
TrackInfo(u'two', None, u'some artist', length=1, index=2),
TrackInfo(u'three', None, u'some artist', length=1, index=3),
]
def _clear_weights():
"""Hack around the lazy descriptor used to cache weights for
Distance calculations.
"""
Distance.__dict__['_weights'].computed = False
class DistanceTest(_common.TestCase):
def tearDown(self):
super(DistanceTest, self).tearDown()
_clear_weights()
def test_add(self):
dist = Distance()
dist.add('add', 1.0)
self.assertEqual(dist._penalties, {'add': [1.0]})
def test_add_equality(self):
dist = Distance()
dist.add_equality('equality', 'ghi', ['abc', 'def', 'ghi'])
self.assertEqual(dist._penalties['equality'], [0.0])
dist.add_equality('equality', 'xyz', ['abc', 'def', 'ghi'])
self.assertEqual(dist._penalties['equality'], [0.0, 1.0])
dist.add_equality('equality', 'abc', re.compile(r'ABC', re.I))
self.assertEqual(dist._penalties['equality'], [0.0, 1.0, 0.0])
def test_add_expr(self):
dist = Distance()
dist.add_expr('expr', True)
self.assertEqual(dist._penalties['expr'], [1.0])
dist.add_expr('expr', False)
self.assertEqual(dist._penalties['expr'], [1.0, 0.0])
def test_add_number(self):
dist = Distance()
# Add a full penalty for each number of difference between two numbers.
dist.add_number('number', 1, 1)
self.assertEqual(dist._penalties['number'], [0.0])
dist.add_number('number', 1, 2)
self.assertEqual(dist._penalties['number'], [0.0, 1.0])
dist.add_number('number', 2, 1)
self.assertEqual(dist._penalties['number'], [0.0, 1.0, 1.0])
dist.add_number('number', -1, 2)
self.assertEqual(dist._penalties['number'], [0.0, 1.0, 1.0, 1.0,
1.0, 1.0])
def test_add_priority(self):
dist = Distance()
dist.add_priority('priority', 'abc', 'abc')
self.assertEqual(dist._penalties['priority'], [0.0])
dist.add_priority('priority', 'def', ['abc', 'def'])
self.assertEqual(dist._penalties['priority'], [0.0, 0.5])
dist.add_priority('priority', 'gh', ['ab', 'cd', 'ef',
re.compile('GH', re.I)])
self.assertEqual(dist._penalties['priority'], [0.0, 0.5, 0.75])
dist.add_priority('priority', 'xyz', ['abc', 'def'])
self.assertEqual(dist._penalties['priority'], [0.0, 0.5, 0.75,
1.0])
def test_add_ratio(self):
dist = Distance()
dist.add_ratio('ratio', 25, 100)
self.assertEqual(dist._penalties['ratio'], [0.25])
dist.add_ratio('ratio', 10, 5)
self.assertEqual(dist._penalties['ratio'], [0.25, 1.0])
dist.add_ratio('ratio', -5, 5)
self.assertEqual(dist._penalties['ratio'], [0.25, 1.0, 0.0])
dist.add_ratio('ratio', 5, 0)
self.assertEqual(dist._penalties['ratio'], [0.25, 1.0, 0.0, 0.0])
def test_add_string(self):
dist = Distance()
sdist = string_dist(u'abc', u'bcd')
dist.add_string('string', u'abc', u'bcd')
self.assertEqual(dist._penalties['string'], [sdist])
self.assertNotEqual(dist._penalties['string'], [0])
def test_add_string_none(self):
dist = Distance()
dist.add_string('string', None, 'string')
self.assertEqual(dist._penalties['string'], [1])
def test_add_string_both_none(self):
dist = Distance()
dist.add_string('string', None, None)
self.assertEqual(dist._penalties['string'], [0])
def test_distance(self):
config['match']['distance_weights']['album'] = 2.0
config['match']['distance_weights']['medium'] = 1.0
_clear_weights()
dist = Distance()
dist.add('album', 0.5)
dist.add('media', 0.25)
dist.add('media', 0.75)
self.assertEqual(dist.distance, 0.5)
# __getitem__()
self.assertEqual(dist['album'], 0.25)
self.assertEqual(dist['media'], 0.25)
def test_max_distance(self):
config['match']['distance_weights']['album'] = 3.0
config['match']['distance_weights']['medium'] = 1.0
_clear_weights()
dist = Distance()
dist.add('album', 0.5)
dist.add('medium', 0.0)
dist.add('medium', 0.0)
self.assertEqual(dist.max_distance, 5.0)
def test_operators(self):
config['match']['distance_weights']['source'] = 1.0
config['match']['distance_weights']['album'] = 2.0
config['match']['distance_weights']['medium'] = 1.0
_clear_weights()
dist = Distance()
dist.add('source', 0.0)
dist.add('album', 0.5)
dist.add('medium', 0.25)
dist.add('medium', 0.75)
self.assertEqual(len(dist), 2)
self.assertEqual(list(dist), [('album', 0.2), ('medium', 0.2)])
self.assertTrue(dist == 0.4)
self.assertTrue(dist < 1.0)
self.assertTrue(dist > 0.0)
self.assertEqual(dist - 0.4, 0.0)
self.assertEqual(0.4 - dist, 0.0)
self.assertEqual(float(dist), 0.4)
def test_raw_distance(self):
config['match']['distance_weights']['album'] = 3.0
config['match']['distance_weights']['medium'] = 1.0
_clear_weights()
dist = Distance()
dist.add('album', 0.5)
dist.add('medium', 0.25)
dist.add('medium', 0.5)
self.assertEqual(dist.raw_distance, 2.25)
def test_items(self):
config['match']['distance_weights']['album'] = 4.0
config['match']['distance_weights']['medium'] = 2.0
_clear_weights()
dist = Distance()
dist.add('album', 0.1875)
dist.add('medium', 0.75)
self.assertEqual(dist.items(), [('medium', 0.25), ('album', 0.125)])
# Sort by key if distance is equal.
dist = Distance()
dist.add('album', 0.375)
dist.add('medium', 0.75)
self.assertEqual(dist.items(), [('album', 0.25), ('medium', 0.25)])
def test_update(self):
dist1 = Distance()
dist1.add('album', 0.5)
dist1.add('media', 1.0)
dist2 = Distance()
dist2.add('album', 0.75)
dist2.add('album', 0.25)
dist2.add('media', 0.05)
dist1.update(dist2)
self.assertEqual(dist1._penalties, {'album': [0.5, 0.75, 0.25],
'media': [1.0, 0.05]})
class TrackDistanceTest(_common.TestCase):
def test_identical_tracks(self):
item = _make_item(u'one', 1)
info = _make_trackinfo()[0]
dist = match.track_distance(item, info, incl_artist=True)
self.assertEqual(dist, 0.0)
def test_different_title(self):
item = _make_item(u'foo', 1)
info = _make_trackinfo()[0]
dist = match.track_distance(item, info, incl_artist=True)
self.assertNotEqual(dist, 0.0)
def test_different_artist(self):
item = _make_item(u'one', 1)
item.artist = u'foo'
info = _make_trackinfo()[0]
dist = match.track_distance(item, info, incl_artist=True)
self.assertNotEqual(dist, 0.0)
def test_various_artists_tolerated(self):
item = _make_item(u'one', 1)
item.artist = u'Various Artists'
info = _make_trackinfo()[0]
dist = match.track_distance(item, info, incl_artist=True)
self.assertEqual(dist, 0.0)
class AlbumDistanceTest(_common.TestCase):
def _mapping(self, items, info):
out = {}
for i, t in zip(items, info.tracks):
out[i] = t
return out
def _dist(self, items, info):
return match.distance(items, info, self._mapping(items, info))
def test_identical_albums(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'some artist',
album=u'some album',
tracks=_make_trackinfo(),
va=False,
album_id=None,
artist_id=None,
)
self.assertEqual(self._dist(items, info), 0)
def test_incomplete_album(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'some artist',
album=u'some album',
tracks=_make_trackinfo(),
va=False,
album_id=None,
artist_id=None,
)
dist = self._dist(items, info)
self.assertNotEqual(dist, 0)
# Make sure the distance is not too great
self.assertTrue(dist < 0.2)
def test_global_artists_differ(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'someone else',
album=u'some album',
tracks=_make_trackinfo(),
va=False,
album_id=None,
artist_id=None,
)
self.assertNotEqual(self._dist(items, info), 0)
def test_comp_track_artists_match(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'should be ignored',
album=u'some album',
tracks=_make_trackinfo(),
va=True,
album_id=None,
artist_id=None,
)
self.assertEqual(self._dist(items, info), 0)
def test_comp_no_track_artists(self):
# Some VA releases don't have track artists (incomplete metadata).
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'should be ignored',
album=u'some album',
tracks=_make_trackinfo(),
va=True,
album_id=None,
artist_id=None,
)
info.tracks[0].artist = None
info.tracks[1].artist = None
info.tracks[2].artist = None
self.assertEqual(self._dist(items, info), 0)
def test_comp_track_artists_do_not_match(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2, u'someone else'))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'some artist',
album=u'some album',
tracks=_make_trackinfo(),
va=True,
album_id=None,
artist_id=None,
)
self.assertNotEqual(self._dist(items, info), 0)
def test_tracks_out_of_order(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'three', 2))
items.append(_make_item(u'two', 3))
info = AlbumInfo(
artist=u'some artist',
album=u'some album',
tracks=_make_trackinfo(),
va=False,
album_id=None,
artist_id=None,
)
dist = self._dist(items, info)
self.assertTrue(0 < dist < 0.2)
def test_two_medium_release(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist=u'some artist',
album=u'some album',
tracks=_make_trackinfo(),
va=False,
album_id=None,
artist_id=None,
)
info.tracks[0].medium_index = 1
info.tracks[1].medium_index = 2
info.tracks[2].medium_index = 1
dist = self._dist(items, info)
self.assertEqual(dist, 0)
def test_per_medium_track_numbers(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 1))
info = AlbumInfo(
artist=u'some artist',
album=u'some album',
tracks=_make_trackinfo(),
va=False,
album_id=None,
artist_id=None,
)
info.tracks[0].medium_index = 1
info.tracks[1].medium_index = 2
info.tracks[2].medium_index = 1
dist = self._dist(items, info)
self.assertEqual(dist, 0)
class AssignmentTest(unittest.TestCase):
def item(self, title, track):
return Item(
title=title, track=track,
mb_trackid='', mb_albumid='', mb_artistid='',
)
def test_reorder_when_track_numbers_incorrect(self):
items = []
items.append(self.item(u'one', 1))
items.append(self.item(u'three', 2))
items.append(self.item(u'two', 3))
trackinfo = []
trackinfo.append(TrackInfo(u'one', None))
trackinfo.append(TrackInfo(u'two', None))
trackinfo.append(TrackInfo(u'three', None))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [])
self.assertEqual(extra_tracks, [])
self.assertEqual(mapping, {
items[0]: trackinfo[0],
items[1]: trackinfo[2],
items[2]: trackinfo[1],
})
def test_order_works_with_invalid_track_numbers(self):
items = []
items.append(self.item(u'one', 1))
items.append(self.item(u'three', 1))
items.append(self.item(u'two', 1))
trackinfo = []
trackinfo.append(TrackInfo(u'one', None))
trackinfo.append(TrackInfo(u'two', None))
trackinfo.append(TrackInfo(u'three', None))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [])
self.assertEqual(extra_tracks, [])
self.assertEqual(mapping, {
items[0]: trackinfo[0],
items[1]: trackinfo[2],
items[2]: trackinfo[1],
})
def test_order_works_with_missing_tracks(self):
items = []
items.append(self.item(u'one', 1))
items.append(self.item(u'three', 3))
trackinfo = []
trackinfo.append(TrackInfo(u'one', None))
trackinfo.append(TrackInfo(u'two', None))
trackinfo.append(TrackInfo(u'three', None))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [])
self.assertEqual(extra_tracks, [trackinfo[1]])
self.assertEqual(mapping, {
items[0]: trackinfo[0],
items[1]: trackinfo[2],
})
def test_order_works_with_extra_tracks(self):
items = []
items.append(self.item(u'one', 1))
items.append(self.item(u'two', 2))
items.append(self.item(u'three', 3))
trackinfo = []
trackinfo.append(TrackInfo(u'one', None))
trackinfo.append(TrackInfo(u'three', None))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [items[1]])
self.assertEqual(extra_tracks, [])
self.assertEqual(mapping, {
items[0]: trackinfo[0],
items[2]: trackinfo[1],
})
def test_order_works_when_track_names_are_entirely_wrong(self):
# A real-world test case contributed by a user.
def item(i, length):
return Item(
artist=u'ben harper',
album=u'burn to shine',
title=u'ben harper - Burn to Shine {0}'.format(i),
track=i,
length=length,
mb_trackid='', mb_albumid='', mb_artistid='',
)
items = []
items.append(item(1, 241.37243007106997))
items.append(item(2, 342.27781704375036))
items.append(item(3, 245.95070222338137))
items.append(item(4, 472.87662515485437))
items.append(item(5, 279.1759535763187))
items.append(item(6, 270.33333768012))
items.append(item(7, 247.83435613222923))
items.append(item(8, 216.54504531525072))
items.append(item(9, 225.72775379800484))
items.append(item(10, 317.7643606963552))
items.append(item(11, 243.57001238834192))
items.append(item(12, 186.45916150485752))
def info(index, title, length):
return TrackInfo(title, None, length=length, index=index)
trackinfo = []
trackinfo.append(info(1, u'Alone', 238.893))
trackinfo.append(info(2, u'The Woman in You', 341.44))
trackinfo.append(info(3, u'Less', 245.59999999999999))
trackinfo.append(info(4, u'Two Hands of a Prayer', 470.49299999999999))
trackinfo.append(info(5, u'Please Bleed', 277.86599999999999))
trackinfo.append(info(6, u'Suzie Blue', 269.30599999999998))
trackinfo.append(info(7, u'Steal My Kisses', 245.36000000000001))
trackinfo.append(info(8, u'Burn to Shine', 214.90600000000001))
trackinfo.append(info(9, u'Show Me a Little Shame', 224.0929999999999))
trackinfo.append(info(10, u'Forgiven', 317.19999999999999))
trackinfo.append(info(11, u'Beloved One', 243.733))
trackinfo.append(info(12, u'In the Lord\'s Arms', 186.13300000000001))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [])
self.assertEqual(extra_tracks, [])
for item, info in mapping.items():
self.assertEqual(items.index(item), trackinfo.index(info))
class ApplyTestUtil(object):
def _apply(self, info=None, per_disc_numbering=False):
info = info or self.info
mapping = {}
for i, t in zip(self.items, info.tracks):
mapping[i] = t
config['per_disc_numbering'] = per_disc_numbering
autotag.apply_metadata(info, mapping)
class ApplyTest(_common.TestCase, ApplyTestUtil):
def setUp(self):
super(ApplyTest, self).setUp()
self.items = []
self.items.append(Item({}))
self.items.append(Item({}))
trackinfo = []
trackinfo.append(TrackInfo(
u'oneNew',
u'dfa939ec-118c-4d0f-84a0-60f3d1e6522c',
medium=1,
medium_index=1,
medium_total=1,
index=1,
artist_credit='trackArtistCredit',
artist_sort='trackArtistSort',
))
trackinfo.append(TrackInfo(
u'twoNew',
u'40130ed1-a27c-42fd-a328-1ebefb6caef4',
medium=2,
medium_index=1,
index=2,
medium_total=1,
))
self.info = AlbumInfo(
tracks=trackinfo,
artist=u'artistNew',
album=u'albumNew',
album_id='7edb51cb-77d6-4416-a23c-3a8c2994a2c7',
artist_id='a6623d39-2d8e-4f70-8242-0a9553b91e50',
artist_credit=u'albumArtistCredit',
artist_sort=u'albumArtistSort',
albumtype=u'album',
va=False,
mediums=2,
)
def test_titles_applied(self):
self._apply()
self.assertEqual(self.items[0].title, 'oneNew')
self.assertEqual(self.items[1].title, 'twoNew')
def test_album_and_artist_applied_to_all(self):
self._apply()
self.assertEqual(self.items[0].album, 'albumNew')
self.assertEqual(self.items[1].album, 'albumNew')
self.assertEqual(self.items[0].artist, 'artistNew')
self.assertEqual(self.items[1].artist, 'artistNew')
def test_track_index_applied(self):
self._apply()
self.assertEqual(self.items[0].track, 1)
self.assertEqual(self.items[1].track, 2)
def test_track_total_applied(self):
self._apply()
self.assertEqual(self.items[0].tracktotal, 2)
self.assertEqual(self.items[1].tracktotal, 2)
def test_disc_index_applied(self):
self._apply()
self.assertEqual(self.items[0].disc, 1)
self.assertEqual(self.items[1].disc, 2)
def test_disc_total_applied(self):
self._apply()
self.assertEqual(self.items[0].disctotal, 2)
self.assertEqual(self.items[1].disctotal, 2)
def test_per_disc_numbering(self):
self._apply(per_disc_numbering=True)
self.assertEqual(self.items[0].track, 1)
self.assertEqual(self.items[1].track, 1)
def test_per_disc_numbering_track_total(self):
self._apply(per_disc_numbering=True)
self.assertEqual(self.items[0].tracktotal, 1)
self.assertEqual(self.items[1].tracktotal, 1)
def test_mb_trackid_applied(self):
self._apply()
self.assertEqual(self.items[0].mb_trackid,
'dfa939ec-118c-4d0f-84a0-60f3d1e6522c')
self.assertEqual(self.items[1].mb_trackid,
'40130ed1-a27c-42fd-a328-1ebefb6caef4')
def test_mb_albumid_and_artistid_applied(self):
self._apply()
for item in self.items:
self.assertEqual(item.mb_albumid,
'7edb51cb-77d6-4416-a23c-3a8c2994a2c7')
self.assertEqual(item.mb_artistid,
'a6623d39-2d8e-4f70-8242-0a9553b91e50')
def test_albumtype_applied(self):
self._apply()
self.assertEqual(self.items[0].albumtype, 'album')
self.assertEqual(self.items[1].albumtype, 'album')
def test_album_artist_overrides_empty_track_artist(self):
my_info = copy.deepcopy(self.info)
self._apply(info=my_info)
self.assertEqual(self.items[0].artist, 'artistNew')
self.assertEqual(self.items[1].artist, 'artistNew')
def test_album_artist_overriden_by_nonempty_track_artist(self):
my_info = copy.deepcopy(self.info)
my_info.tracks[0].artist = 'artist1!'
my_info.tracks[1].artist = 'artist2!'
self._apply(info=my_info)
self.assertEqual(self.items[0].artist, 'artist1!')
self.assertEqual(self.items[1].artist, 'artist2!')
def test_artist_credit_applied(self):
self._apply()
self.assertEqual(self.items[0].albumartist_credit, 'albumArtistCredit')
self.assertEqual(self.items[0].artist_credit, 'trackArtistCredit')
self.assertEqual(self.items[1].albumartist_credit, 'albumArtistCredit')
self.assertEqual(self.items[1].artist_credit, 'albumArtistCredit')
def test_artist_sort_applied(self):
self._apply()
self.assertEqual(self.items[0].albumartist_sort, 'albumArtistSort')
self.assertEqual(self.items[0].artist_sort, 'trackArtistSort')
self.assertEqual(self.items[1].albumartist_sort, 'albumArtistSort')
self.assertEqual(self.items[1].artist_sort, 'albumArtistSort')
def test_full_date_applied(self):
my_info = copy.deepcopy(self.info)
my_info.year = 2013
my_info.month = 12
my_info.day = 18
self._apply(info=my_info)
self.assertEqual(self.items[0].year, 2013)
self.assertEqual(self.items[0].month, 12)
self.assertEqual(self.items[0].day, 18)
def test_date_only_zeros_month_and_day(self):
self.items = []
self.items.append(Item(year=1, month=2, day=3))
self.items.append(Item(year=4, month=5, day=6))
my_info = copy.deepcopy(self.info)
my_info.year = 2013
self._apply(info=my_info)
self.assertEqual(self.items[0].year, 2013)
self.assertEqual(self.items[0].month, 0)
self.assertEqual(self.items[0].day, 0)
def test_missing_date_applies_nothing(self):
self.items = []
self.items.append(Item(year=1, month=2, day=3))
self.items.append(Item(year=4, month=5, day=6))
self._apply()
self.assertEqual(self.items[0].year, 1)
self.assertEqual(self.items[0].month, 2)
self.assertEqual(self.items[0].day, 3)
def test_data_source_applied(self):
my_info = copy.deepcopy(self.info)
my_info.data_source = 'MusicBrainz'
self._apply(info=my_info)
self.assertEqual(self.items[0].data_source, 'MusicBrainz')
class ApplyCompilationTest(_common.TestCase, ApplyTestUtil):
def setUp(self):
super(ApplyCompilationTest, self).setUp()
self.items = []
self.items.append(Item({}))
self.items.append(Item({}))
trackinfo = []
trackinfo.append(TrackInfo(
u'oneNew',
u'dfa939ec-118c-4d0f-84a0-60f3d1e6522c',
u'artistOneNew',
u'a05686fc-9db2-4c23-b99e-77f5db3e5282',
index=1,
))
trackinfo.append(TrackInfo(
u'twoNew',
u'40130ed1-a27c-42fd-a328-1ebefb6caef4',
u'artistTwoNew',
u'80b3cf5e-18fe-4c59-98c7-e5bb87210710',
index=2,
))
self.info = AlbumInfo(
tracks=trackinfo,
artist=u'variousNew',
album=u'albumNew',
album_id='3b69ea40-39b8-487f-8818-04b6eff8c21a',
artist_id='89ad4ac3-39f7-470e-963a-56509c546377',
albumtype=u'compilation',
)
def test_album_and_track_artists_separate(self):
self._apply()
self.assertEqual(self.items[0].artist, 'artistOneNew')
self.assertEqual(self.items[1].artist, 'artistTwoNew')
self.assertEqual(self.items[0].albumartist, 'variousNew')
self.assertEqual(self.items[1].albumartist, 'variousNew')
def test_mb_albumartistid_applied(self):
self._apply()
self.assertEqual(self.items[0].mb_albumartistid,
'89ad4ac3-39f7-470e-963a-56509c546377')
self.assertEqual(self.items[1].mb_albumartistid,
'89ad4ac3-39f7-470e-963a-56509c546377')
self.assertEqual(self.items[0].mb_artistid,
'a05686fc-9db2-4c23-b99e-77f5db3e5282')
self.assertEqual(self.items[1].mb_artistid,
'80b3cf5e-18fe-4c59-98c7-e5bb87210710')
def test_va_flag_cleared_does_not_set_comp(self):
self._apply()
self.assertFalse(self.items[0].comp)
self.assertFalse(self.items[1].comp)
def test_va_flag_sets_comp(self):
va_info = copy.deepcopy(self.info)
va_info.va = True
self._apply(info=va_info)
self.assertTrue(self.items[0].comp)
self.assertTrue(self.items[1].comp)
class StringDistanceTest(unittest.TestCase):
def test_equal_strings(self):
dist = string_dist(u'Some String', u'Some String')
self.assertEqual(dist, 0.0)
def test_different_strings(self):
dist = string_dist(u'Some String', u'Totally Different')
self.assertNotEqual(dist, 0.0)
def test_punctuation_ignored(self):
dist = string_dist(u'Some String', u'Some.String!')
self.assertEqual(dist, 0.0)
def test_case_ignored(self):
dist = string_dist(u'Some String', u'sOME sTring')
self.assertEqual(dist, 0.0)
def test_leading_the_has_lower_weight(self):
dist1 = string_dist(u'XXX Band Name', u'Band Name')
dist2 = string_dist(u'The Band Name', u'Band Name')
self.assertTrue(dist2 < dist1)
def test_parens_have_lower_weight(self):
dist1 = string_dist(u'One .Two.', u'One')
dist2 = string_dist(u'One (Two)', u'One')
self.assertTrue(dist2 < dist1)
def test_brackets_have_lower_weight(self):
dist1 = string_dist(u'One .Two.', u'One')
dist2 = string_dist(u'One [Two]', u'One')
self.assertTrue(dist2 < dist1)
def test_ep_label_has_zero_weight(self):
dist = string_dist(u'My Song (EP)', u'My Song')
self.assertEqual(dist, 0.0)
def test_featured_has_lower_weight(self):
dist1 = string_dist(u'My Song blah Someone', u'My Song')
dist2 = string_dist(u'My Song feat Someone', u'My Song')
self.assertTrue(dist2 < dist1)
def test_postfix_the(self):
dist = string_dist(u'The Song Title', u'Song Title, The')
self.assertEqual(dist, 0.0)
def test_postfix_a(self):
dist = string_dist(u'A Song Title', u'Song Title, A')
self.assertEqual(dist, 0.0)
def test_postfix_an(self):
dist = string_dist(u'An Album Title', u'Album Title, An')
self.assertEqual(dist, 0.0)
def test_empty_strings(self):
dist = string_dist(u'', u'')
self.assertEqual(dist, 0.0)
def test_solo_pattern(self):
# Just make sure these don't crash.
string_dist(u'The ', u'')
string_dist(u'(EP)', u'(EP)')
string_dist(u', An', u'')
def test_heuristic_does_not_harm_distance(self):
dist = string_dist(u'Untitled', u'[Untitled]')
self.assertEqual(dist, 0.0)
def test_ampersand_expansion(self):
dist = string_dist(u'And', u'&')
self.assertEqual(dist, 0.0)
def test_accented_characters(self):
dist = string_dist(u'\xe9\xe1\xf1', u'ean')
self.assertEqual(dist, 0.0)
class EnumTest(_common.TestCase):
"""
Test Enum Subclasses defined in beets.util.enumeration
"""
def test_ordered_enum(self):
OrderedEnumClass = match.OrderedEnum('OrderedEnumTest', ['a', 'b', 'c']) # noqa
self.assertLess(OrderedEnumClass.a, OrderedEnumClass.b)
self.assertLess(OrderedEnumClass.a, OrderedEnumClass.c)
self.assertLess(OrderedEnumClass.b, OrderedEnumClass.c)
self.assertGreater(OrderedEnumClass.b, OrderedEnumClass.a)
self.assertGreater(OrderedEnumClass.c, OrderedEnumClass.a)
self.assertGreater(OrderedEnumClass.c, OrderedEnumClass.b)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| {
"content_hash": "aef0556b9cd24f0a958ab1ec53c1f2b0",
"timestamp": "",
"source": "github",
"line_count": 941,
"max_line_length": 88,
"avg_line_length": 35.722635494155156,
"alnum_prop": 0.5782835043879221,
"repo_name": "jcoady9/beets",
"id": "b2f3fd61d06c8fecc1d33452dcdee2fbbd9346e3",
"size": "34286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_autotag.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2951"
},
{
"name": "HTML",
"bytes": "3307"
},
{
"name": "JavaScript",
"bytes": "85950"
},
{
"name": "Python",
"bytes": "1767900"
},
{
"name": "Shell",
"bytes": "7413"
}
],
"symlink_target": ""
} |
import pytest
from anchore_engine.common import helpers
from anchore_engine.common.helpers import make_anchore_exception
values = [
pytest.param("{}", {}, id="'{}'"),
pytest.param({}, {}, id="{}"),
pytest.param("a string", "a string", id="'a string'"),
]
class TestSafeExtractJsonValue:
@pytest.mark.parametrize("value, expected", values)
def test_inputs(self, value, expected):
result = helpers.safe_extract_json_value(value)
assert result == expected
class TestExtractPythonContent:
IMAGE_DATA_STRUCTURE = {
"imagedata": {
"analysis_report": {
"package_list": {
"pkgs.python": {
"base": {
"/usr/lib/python3/dist-packages/PyYAML": '{"cpes": ["cpe:2.3:a:PyYAML:PyYAML:3.10:*:*:*:*:python:*:*","cpe:2.3:a:python-PyYAML:PyYAML:3.10:*:*:*:*:python:*:*","cpe:2.3:a:*:PyYAML:3.10:*:*:*:*:python:*:*","cpe:2.3:a:PyYAML:PyYAML:3.10:*:*:*:*:*:*:*","cpe:2.3:a:python-PyYAML:PyYAML:3.10:*:*:*:*:*:*:*","cpe:2.3:a:*:PyYAML:3.10:*:*:*:*:*:*:*"],"license": "MIT","licenses": ["MIT"],"location": "/usr/lib/python3/dist-packages/PyYAML","origin": "Kirill Simonov <xi@resolvent.net>","package": "PyYAML","type": "PYTHON","version": "3.10"}'
}
}
}
}
}
}
def test_valid_data(self):
extracted_content = helpers.extract_python_content(self.IMAGE_DATA_STRUCTURE)
key = "/usr/lib/python3/dist-packages/PyYAML"
assert extracted_content is not None
assert key in extracted_content
assert extracted_content[key]["package"] == "PyYAML"
assert extracted_content[key]["type"] == "PYTHON"
assert extracted_content[key]["version"] == "3.10"
assert extracted_content[key]["location"] == key
assert extracted_content[key]["license"] == "MIT"
assert len(extracted_content[key]["licenses"]) == 1
assert extracted_content[key]["licenses"][0] == "MIT"
assert len(extracted_content[key]["cpes"]) > 0
class TestMakeResponseError:
class TestException(Exception):
def __init__(self, msg, anchore_error_json=None):
super().__init__(msg)
if anchore_error_json is not None:
self.anchore_error_json = anchore_error_json
params = [
pytest.param(
{
"errmsg": "basic-test-case",
"in_httpcode": None,
"details": None,
"expected": {
"message": "basic-test-case",
"httpcode": 500,
"detail": {"error_codes": []},
},
},
id="basic",
),
pytest.param(
{
"errmsg": "basic-test-case",
"in_httpcode": 400,
"details": None,
"expected": {
"message": "basic-test-case",
"httpcode": 400,
"detail": {"error_codes": []},
},
},
id="basic-with-httpcode",
),
pytest.param(
{
"errmsg": "basic-test-case",
"in_httpcode": None,
"details": {"test": "value"},
"expected": {
"message": "basic-test-case",
"httpcode": 500,
"detail": {
"test": "value",
"error_codes": [],
},
},
},
id="basic-with-details",
),
pytest.param(
{
"errmsg": "basic-test-case",
"in_httpcode": None,
"details": {"error_codes": [500, 404]},
"expected": {
"message": "basic-test-case",
"httpcode": 500,
"detail": {"error_codes": [500, 404]},
},
},
id="basic-with-error-codes",
),
pytest.param(
{
"errmsg": Exception("thisisatest"),
"in_httpcode": None,
"details": None,
"expected": {
"message": "thisisatest",
"httpcode": 500,
"detail": {"error_codes": []},
},
},
id="basic-exception",
),
pytest.param(
{
"errmsg": TestException(
"testexception",
anchore_error_json={
"message": "test",
"httpcode": 500,
"detail": {"error_codes": [404]},
},
),
"in_httpcode": 400,
"details": None,
"expected": {
"message": "test",
"httpcode": 500,
"detail": {"error_codes": [404]},
},
},
id="basic-exception-with-anchore-error-json",
),
pytest.param(
{
"errmsg": TestException(
"testexception",
anchore_error_json={
"message": "test",
"httpcode": 500,
"detail": {"error_codes": [404]},
"error_code": 401,
},
),
"in_httpcode": 400,
"details": None,
"expected": {
"message": "test",
"httpcode": 500,
"detail": {"error_codes": [404, 401]},
},
},
id="basic-exception-with-anchore-error-json-and-error-code",
),
pytest.param(
{
"errmsg": TestException(
"testexception",
anchore_error_json='{"message": "test", "httpcode": 500, "detail": {"error_codes": [404]}}',
),
"in_httpcode": 400,
"details": None,
"expected": {
"message": "test",
"httpcode": 500,
"detail": {"error_codes": [404]},
},
},
id="basic-exception-with-json-string",
),
pytest.param(
{
"errmsg": TestException(
"testexception",
anchore_error_json='{"message" "test", "httpcode": 500, "detail": {"error_codes": [404]}}',
),
"in_httpcode": 400,
"details": None,
"expected": {
"message": "testexception",
"httpcode": 400,
"detail": {"error_codes": []},
},
},
id="basic-exception-with-bad-json-string",
),
]
@pytest.mark.parametrize("param", params)
def test_make_response_error(self, param):
actual = helpers.make_response_error(
param["errmsg"], param["in_httpcode"], param["details"]
)
assert actual["message"] == param["expected"]["message"]
assert actual["httpcode"] == param["expected"]["httpcode"]
assert actual["detail"] == param["expected"]["detail"]
class TestMakeAnchoreException:
# This handles the case where attributes are already set on the exception passed in
err_with_attrs = Exception("test")
err_with_attrs.anchore_error_json = {
"message": "attr-test",
"detail": {
"raw_exception_message": "attribute test",
"error_codes": [],
},
"httpcode": 500,
}
err_with_attrs.error_code = 404
@pytest.mark.parametrize(
"param",
[
pytest.param(
{
"err": "test",
"input_message": None,
"input_httpcode": None,
"input_detail": None,
"override_existing": None,
"input_error_codes": None,
"expected_msg": "test",
"expected_anchore_json": {
"message": "test",
"detail": {
"raw_exception_message": "test",
"error_codes": [],
},
"httpcode": 500,
},
},
id="string-err-only",
),
pytest.param(
{
"err": Exception("test"),
"input_message": None,
"input_httpcode": None,
"input_detail": None,
"override_existing": None,
"input_error_codes": None,
"expected_msg": "test",
"expected_anchore_json": {
"message": "test",
"detail": {
"raw_exception_message": "test",
"error_codes": [],
},
"httpcode": 500,
},
},
id="err-only",
),
pytest.param(
{
"err": err_with_attrs,
"input_message": None,
"input_httpcode": None,
"input_detail": None,
"override_existing": None,
"input_error_codes": None,
"expected_msg": "test",
"expected_anchore_json": {
"message": "attr-test",
"detail": {
"raw_exception_message": "attribute test",
"error_codes": [404],
},
"httpcode": 500,
},
},
id="err-only-with-attrs",
),
pytest.param(
{
"err": err_with_attrs,
"input_message": "override-msg",
"input_httpcode": 401,
"input_detail": {"unit": "test"},
"override_existing": True,
"input_error_codes": [402, 403],
"expected_msg": "test",
"expected_anchore_json": {
"message": "override-msg",
"detail": {"unit": "test", "error_codes": [402, 403, 404]},
"httpcode": 401,
},
},
id="override-successful",
),
],
)
def test_make_anchore_exception(self, param):
actual = make_anchore_exception(
param["err"],
param["input_message"],
param["input_httpcode"],
param["input_detail"],
param["override_existing"],
param["input_error_codes"],
)
assert str(actual) == param["expected_msg"]
assert actual.anchore_error_json == param["expected_anchore_json"]
| {
"content_hash": "9fd241d6f82e55da57058834259ce3dc",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 561,
"avg_line_length": 35.71473354231975,
"alnum_prop": 0.40112349688405163,
"repo_name": "anchore/anchore-engine",
"id": "4e9fb0ab2c5ac77e110a2fb5f6af73f623383181",
"size": "11393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/anchore_engine/common/test_helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3889"
},
{
"name": "Dockerfile",
"bytes": "10954"
},
{
"name": "Makefile",
"bytes": "12274"
},
{
"name": "Python",
"bytes": "4529553"
},
{
"name": "Shell",
"bytes": "16598"
}
],
"symlink_target": ""
} |
BOT_NAME = 'bgmapi'
SPIDER_MODULES = ['bgmapi.spiders']
NEWSPIDER_MODULE = 'bgmapi.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'bgmapi (https://github.com/wattlebird/Bangumi_Spider)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
LOG_LEVEL='INFO'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64…) Gecko/20100101 Firefox/63.0'
}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
# 'scrapy.spidermiddlewares.httperror.HttpErrorMiddleware': 404,
}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'bgmapi.middlewares.BgmapiDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'bgmapi.pipelines.AzureBlobPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 1
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
RETRY_TIMES = 10
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
HTTPERROR_ALLOWED_CODES = [404]
DUPEFILTER_CLASS = 'scrapy.dupefilters.BaseDupeFilter'
FEEDS = {
'%(name)s.jsonlines': {
'format': 'jsonlines',
'overwrite': True
}
}
FEED_EXPORT_ENCODING = 'utf-8'
########################################
# The following settings are only applicable to Ronnie Wang's spider setting
# Because Ronnie only uses Azure for storage.
UPLOAD_TO_AZURE_STORAGE = False
AZURE_ACCOUNT_NAME = "ikely" # this is the subdomain to https://*.blob.core.windows.net/
AZURE_ACCOUNT_KEY = ""
AZURE_CONTAINER = 'bangumi' # the name of the container (you should have already created it)
######################################## | {
"content_hash": "92886901d3e1cd4f62c2bb3b4278f055",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 102,
"avg_line_length": 34.28,
"alnum_prop": 0.7389148191365228,
"repo_name": "wattlebird/Bangumi_Spider",
"id": "66081a883eceb51b3baa65c9d05346b1e2c27021",
"size": "3848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bgmapi/settings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "759"
},
{
"name": "Python",
"bytes": "64473"
},
{
"name": "Shell",
"bytes": "2836"
}
],
"symlink_target": ""
} |
"""Support for SimpliSafe freeze sensor."""
from simplipy.entity import EntityTypes
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import DEVICE_CLASS_TEMPERATURE, TEMP_FAHRENHEIT
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import SimpliSafeBaseSensor
from .const import DATA_CLIENT, DOMAIN, LOGGER
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up SimpliSafe freeze sensors based on a config entry."""
simplisafe = hass.data[DOMAIN][DATA_CLIENT][entry.entry_id]
sensors = []
for system in simplisafe.systems.values():
if system.version == 2:
LOGGER.info("Skipping sensor setup for V2 system: %s", system.system_id)
continue
for sensor in system.sensors.values():
if sensor.type == EntityTypes.temperature:
sensors.append(SimplisafeFreezeSensor(simplisafe, system, sensor))
async_add_entities(sensors)
class SimplisafeFreezeSensor(SimpliSafeBaseSensor, SensorEntity):
"""Define a SimpliSafe freeze sensor entity."""
_attr_device_class = DEVICE_CLASS_TEMPERATURE
_attr_native_unit_of_measurement = TEMP_FAHRENHEIT
@callback
def async_update_from_rest_api(self) -> None:
"""Update the entity with the provided REST API data."""
self._attr_native_value = self._sensor.temperature
| {
"content_hash": "01c34360f3b7c2b12f71c9eeae33b7ff",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 84,
"avg_line_length": 37.45238095238095,
"alnum_prop": 0.7329942784488239,
"repo_name": "sander76/home-assistant",
"id": "c3f8d7c3ab0bb73f77a5ae32654dded23993f98b",
"size": "1573",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/simplisafe/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "36548768"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
import os
import tempfile
import shutil
from unittest import skip
from lxml import etree
from hs_core.hydroshare import resource
from .base import ModelInstanceSciMetaTestCase
class TestScienceMetadataSWAT(ModelInstanceSciMetaTestCase):
@skip("skip this test, as we do not support SWATModelInstanceResource types in myHPOM")
def test_put_scimeta_swat_model_instance(self):
# Update science metadata XML
title_1 = 'Flat River SWAT Instance'
title_2 = 'Cannon river'
abstract_text_1 = 'This model is created for Flat River.'
abstract_text_2 = ('This is a test to the SWAT Model Instance resource. '
'All the data had been obtained from real share SWAT '
'model from SWATShare https://mygeohub.org/groups/water-hub/swatshare. '
'Some of the metadata entries are assumed just used '
'to test the resource implementation')
kwords_1 = ('SWAT2009', 'FlatRIver')
kwords_2 = ('Cannon River', 'SWAT', 'SWATShare')
model_output_1 = 'No'
model_output_2 = 'Yes'
model_prog_name_1 = 'Unspecified'
model_prog_name_2 = self.title_prog
model_prog_id_1 = 'None'
model_prog_id_2 = self.pid_prog
tmp_dir = tempfile.mkdtemp()
res = resource.create_resource('SWATModelInstanceResource',
self.user,
'Test SWAT Model Instance Resource')
pid = res.short_id
self.resources_to_delete.append(pid)
try:
# Apply metadata from saved file
# First update the resource ID so that it matches the ID of the
# newly created resource.
scimeta = etree.parse('hs_core/tests/data/swat-resourcemetadata-1.xml')
self.updateScimetaResourceID(scimeta, pid)
# Write out to a file
out = etree.tostring(scimeta, pretty_print=True)
sci_meta_new = os.path.join(tmp_dir, self.RESOURCE_METADATA)
with open(sci_meta_new, 'w') as f:
f.writelines(out)
# Send updated metadata to REST API
self.updateScimeta(pid, sci_meta_new)
# Get science metadata
response = self.getScienceMetadata(pid, exhaust_stream=False)
sci_meta_updated = os.path.join(tmp_dir, self.RESOURCE_METADATA_UPDATED)
with open(sci_meta_updated, 'w') as f:
for l in response.streaming_content:
f.write(l)
scimeta = etree.parse(sci_meta_updated)
abstract = self.getAbstract(scimeta)
self.assertEqual(abstract, abstract_text_1)
title = self.getTitle(scimeta)
self.assertEqual(title, title_1)
keywords = self.getKeywords(scimeta)
kw_comp = zip(kwords_1, keywords)
for k in kw_comp:
self.assertEqual(k[0], k[1])
model_output = scimeta.xpath(self.MOD_OUT_PATH,
namespaces=self.NS)
self.assertEqual(len(model_output), 1)
self.assertEqual(model_output_1, model_output[0].text)
prog_name = scimeta.xpath(self.EXECUTED_BY_NAME_PATH,
namespaces=self.NS)
self.assertEqual(len(prog_name), 1)
self.assertEqual(model_prog_name_1, prog_name[0].text)
prog_id = scimeta.xpath(self.EXECUTED_BY_ID_PATH,
namespaces=self.NS)
self.assertEqual(len(prog_id), 1)
self.assertEqual(model_prog_id_1, prog_id[0].text)
# Make sure metadata update is idempotent
self.updateScimeta(pid, sci_meta_new)
# Get science metadata
response = self.getScienceMetadata(pid, exhaust_stream=False)
sci_meta_updated = os.path.join(tmp_dir, self.RESOURCE_METADATA_UPDATED)
with open(sci_meta_updated, 'w') as f:
for l in response.streaming_content:
f.write(l)
scimeta = etree.parse(sci_meta_updated)
abstract = self.getAbstract(scimeta)
self.assertEqual(abstract, abstract_text_1)
title = self.getTitle(scimeta)
self.assertEqual(title, title_1)
keywords = self.getKeywords(scimeta)
kw_comp = zip(kwords_1, keywords)
for k in kw_comp:
self.assertEqual(k[0], k[1])
model_output = scimeta.xpath(self.MOD_OUT_PATH,
namespaces=self.NS)
self.assertEqual(len(model_output), 1)
self.assertEqual(model_output_1, model_output[0].text)
prog_name = scimeta.xpath(self.EXECUTED_BY_NAME_PATH,
namespaces=self.NS)
self.assertEqual(len(prog_name), 1)
self.assertEqual(model_prog_name_1, prog_name[0].text)
prog_id = scimeta.xpath(self.EXECUTED_BY_ID_PATH,
namespaces=self.NS)
self.assertEqual(len(prog_id), 1)
self.assertEqual(model_prog_id_1, prog_id[0].text)
# Overwrite metadata with other resource metadata
# First update the resource ID so that it matches the ID of the
# newly created resource.
scimeta = etree.parse('hs_core/tests/data/swat-resourcemetadata-2.xml')
self.updateScimetaResourceID(scimeta, pid)
self.updateExecutedBy(scimeta, model_prog_name_2, model_prog_id_2)
# Write out to a file
out = etree.tostring(scimeta, pretty_print=True)
sci_meta_new = os.path.join(tmp_dir, self.RESOURCE_METADATA)
with open(sci_meta_new, 'w') as f:
f.writelines(out)
# Send updated metadata to REST API
self.updateScimeta(pid, sci_meta_new)
# Get science metadata
response = self.getScienceMetadata(pid, exhaust_stream=False)
sci_meta_updated = os.path.join(tmp_dir, self.RESOURCE_METADATA_UPDATED)
with open(sci_meta_updated, 'w') as f:
for l in response.streaming_content:
f.write(l)
scimeta = etree.parse(sci_meta_updated)
abstract = self.getAbstract(scimeta)
self.assertEqual(abstract, abstract_text_2)
title = self.getTitle(scimeta)
self.assertEqual(title, title_2)
keywords = self.getKeywords(scimeta)
kw_comp = zip(kwords_2, keywords)
for k in kw_comp:
self.assertEqual(k[0], k[1])
model_output = scimeta.xpath(self.MOD_OUT_PATH,
namespaces=self.NS)
self.assertEqual(len(model_output), 1)
self.assertEqual(model_output_2, model_output[0].text)
prog_name = scimeta.xpath(self.EXECUTED_BY_NAME_PATH,
namespaces=self.NS)
self.assertEqual(len(prog_name), 1)
self.assertEqual(model_prog_name_2, prog_name[0].text)
prog_id = scimeta.xpath(self.EXECUTED_BY_ID_PATH,
namespaces=self.NS)
self.assertEqual(len(prog_id), 1)
prog_id_2 = prog_id[0].text.strip('/').rpartition('/')[-1]
self.assertEqual(model_prog_id_2, prog_id_2)
finally:
shutil.rmtree(tmp_dir)
| {
"content_hash": "8cae3bcf425623a8b2b7df3ea7d08751",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 99,
"avg_line_length": 42.22222222222222,
"alnum_prop": 0.5663157894736842,
"repo_name": "ResearchSoftwareInstitute/MyHPOM",
"id": "eff7cb805bd785772352b8d8d7caed2822430156",
"size": "7600",
"binary": false,
"copies": "1",
"ref": "refs/heads/myhpom-develop",
"path": "hs_core/tests/api/rest/test_scimeta_swat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "399181"
},
{
"name": "HTML",
"bytes": "950570"
},
{
"name": "JavaScript",
"bytes": "2069460"
},
{
"name": "Python",
"bytes": "5006675"
},
{
"name": "R",
"bytes": "4463"
},
{
"name": "Shell",
"bytes": "53077"
},
{
"name": "XSLT",
"bytes": "790987"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from django.contrib.auth.decorators import login_required
from contrib.admin.admin import views
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'web.views.home', name='home'),
# always get the first record
url(r'^$', login_required(views.DashboardView.as_view())),
)
| {
"content_hash": "881e9fffb5e215a243eee8f6b116184e",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 62,
"avg_line_length": 32.25,
"alnum_prop": 0.599483204134367,
"repo_name": "rfancn/wxgigo",
"id": "0c37ce68379155be4539451c961c7253e298cd95",
"size": "387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/admin/admin/dashboard/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2696"
},
{
"name": "HTML",
"bytes": "50544"
},
{
"name": "JavaScript",
"bytes": "4201"
},
{
"name": "Python",
"bytes": "356710"
},
{
"name": "Shell",
"bytes": "1220"
}
],
"symlink_target": ""
} |
'''
Several measures for evaluating chunking predictions with respect to a gold standard.
Pass -h to see usage.
Terminology used below:
* 'token': an instance of an atomic unit, such as a word;
* 'tag': indicates grouping and (possibly) classification of a token, e.g. B-PERSON
* 'mention': a sequence of tokens according to some tagging;
* 'label': indicates classification of a mention (all tokens in the mention
must agree for a given tagging)
* 'position marker': the tag minus the label (if present), e.g. B
* 'continuation marker': a position marker indicating the second or subsequent
position within a mention, e.g. I with BIO tagging
* 'mention boundaries': the sequence of position markers associated with a mention
* 'entity': a mention type according to a given tagging; label may or
may not be included as part of the type
The first mandatory option is the position marker scheme, which must be one of:
* IO - inside/outside
* BIO - beginning/inside/outside (the sequence O I is illegal)
* BILOU (Ratinov & Roth, 2009) - beginning/inside/last/outside/unique
(illegal sequences: O {I,L}; B O; L I; {B,I} U; U {I,L})
Every tag must start with the position marker. It may optionally be followed
by a hyphen and a label. Tokens unlabeled in the input are implicitly given
a special "null" label, except for O, which must not be labeled.
(Unlabeled scores will therefore work if the gold standard has labels
but the predictions do not, or vice versa.) The label for a continuation tag
must match the label of the previous tag.
The input format follows the CoNLL conventions: each line consists of a
token, a gold tag, and a predicted tag, separated by tabs. Blank lines
indicate mandatory chunk boundaries (e.g. sentence boundaries if tagging
words).
The character ` has a special meaning when used in the first and/or second
tag field: it causes the entire token to be ignored when reading in the file.
This can be used to abstain from judging predictions for certain tokens, though
it should be used with caution as it may result in a different chunk interpretation
of neighboring tokens. E.g., the sequences
a O B-1
b ` B-2
c O I-2
and
a O O
b ` B
c O I
will both be rendered invalid if the middle token is ignored. If ` is used for all
tokens in a sequence, the sequence will safely be ignored.
@since: 2012-01-30
@author: Nathan Schneider (nschneid)
'''
from __future__ import print_function, division, absolute_import
import codecs, sys, os, re, fileinput, argparse
from collections import Counter, defaultdict
if __name__ == "__main__" and __package__ is None:
import scoring
else:
from . import scoring
IGNORE_SYMBOL = '`'
def isContinuation(tag, scheme='BIO'):
pm = tag[0]
if pm in 'BOU':
return False
if pm=='I':
return 'B' in scheme
if pm=='L':
return True
def isPrimary(tag, scheme='BIO'):
'''
The *primary* tag is the one in a privileged position within the
mention, a position occurring only once per mention:
* If the scheme contains B, B tags are primary
* If the scheme contains U, U tags are primary
* If the scheme contains L but not B, L tags are primary
'''
pm = tag[0]
if pm in 'BU': return True
if pm=='L' and 'B' not in scheme: return True
return False
def primarize(positionMarker, scheme='BIO'):
if isPrimary(positionMarker, scheme): return positionMarker
if 'B' in scheme: return 'B'
if 'L' in scheme: return 'L'
assert False
def tokenConfusions(goldseq, predseq, ignoreLabels=False, collapseNonO=False, scheme='BIO', bag=False, ignoreContinuation=False):
n = nFound = nMissed = nExtra = 0
if bag:
gC = Counter()
pC = Counter()
for g,p in zip(goldseq,predseq):
gpm, gl = g
ppm, pl = p
if ignoreLabels:
gl = pl = None
if ignoreContinuation:
if collapseNonO:
if gpm!='O': gpm = 'B' if isPrimary(gpm,scheme) else gpm
if ppm!='O': ppm = 'B' if isPrimary(ppm,scheme) else ppm
if gpm=='O' or isPrimary(gpm,scheme):
gC[(gpm,gl)] += 1
if ppm=='O' or isPrimary(ppm,scheme):
pC[(ppm,pl)] += 1
else:
if collapseNonO:
if gpm!='O': gpm = primarize(gpm,scheme)
if ppm!='O': ppm = primarize(ppm,scheme)
gC[(gpm,gl)] += 1
pC[(ppm,pl)] += 1
n = sum(gC.values())
assert ignoreContinuation or n==sum(pC.values())
for tag in set(gC.keys()+pC.keys()):
if tag[0]=='O': continue
gn, pn = gC[tag], pC[tag]
nFound += min(gn,pn)
if gn>pn:
nMissed += gn-pn
elif gn<pn:
nExtra += pn-gn
else:
for g,p in zip(goldseq,predseq):
gpm, gl = g
ppm, pl = p
n += 1
if (gpm==ppm or (collapseNonO and (gpm=='O')==(ppm=='O'))) and (ignoreLabels or gl==pl): # correct tag
if gpm!='O':
nFound += 1 # true positive
elif ppm=='O':
nMissed += 1 # false negative
else:
nExtra += 1 # false positive
return scoring.ConfusionMatrix(Both=nFound, Aonly=nMissed, Bonly=nExtra, Neither=n-nFound-nMissed-nExtra)
def mentionSpans(seq, includeOTokens=False, value='full' or 'label', scheme='BIO'):
'''
>>> d = mentionSpans([('B', 'PER'), ('I', 'PER'), ('I', 'PER'), ('B', 'ORG'), ('O', None), ('O', None), ('B', 'LOC')], includeOTokens=False)
>>> assert d=={(0, 3): [('B', 'PER'), ('I', 'PER'), ('I', 'PER')], (3, 4): [('B', 'ORG')], (6, 7): [('B', 'LOC')]}, d
>>> d = mentionSpans([('B', 'PER'), ('I', 'PER'), ('I', 'PER'), ('B', 'ORG'), ('O', None), ('O', None), ('B', 'LOC')], includeOTokens=True)
>>> assert d=={(0, 3): [('B', 'PER'), ('I', 'PER'), ('I', 'PER')], (3, 4): [('B', 'ORG')], (4, 5): [('O', None)], (5, 6): [('O', None)], (6, 7): [('B', 'LOC')]}, d
>>> d = mentionSpans([('B', 'PER'), ('I', 'PER'), ('I', 'PER'), ('B', 'ORG'), ('O', None), ('O', None), ('B', 'LOC')], includeOTokens=False, value='label')
>>> assert d=={(0, 3): 'PER', (3, 4): 'ORG', (6, 7): 'LOC'}, d
>>> d = mentionSpans([('B', 'PER'), ('I', 'PER'), ('I', 'PER'), ('B', 'ORG'), ('O', None), ('O', None), ('B', 'LOC')], includeOTokens=True, value='label')
>>> assert d=={(0, 3): 'PER', (3, 4): 'ORG', (4, 5): None, (5, 6): None, (6, 7): 'LOC'}, d
'''
spanMap = {}
i = 0
j = 1
while j<len(seq)+1:
if j==len(seq) or not isContinuation(seq[j][0], scheme):
if j-i>1 or (includeOTokens or seq[i][0]!='O'):
spanMap[(i,j)] = seq[i:j]
i = j
j += 1
assert value in ('full', 'label')
if value=='full':
return spanMap
return {k: v[0][1] for k,v in spanMap.items()}
def overlap(span1, span2):
a, b = span1
i, j = span2
assert a<b and i<j
return a<=i<b or i<=a<j
def softMentionConfusions(goldseq, predseq, ignoreLabels=False, matchCriterion=overlap, scheme='BIO'):
'''
Any partial overlap between a gold and predicted mention counts as a match between the two mentions.
Ignores labels.
True positives and true negatives don't really make sense here, so we return 0 counts for these
and use precision vs. recall calculations.
>>> gold = [('B', 'PER'), ('I', 'PER'), ('I', 'PER'), ('B', 'ORG'), ('O', None), ('O', None), ('B', 'LOC'), ('I', 'LOC')]
>>> pred = [('B', 'PER'), ('O', None), ('B', 'ORG'), ('I', 'ORG'), ('O', None), ('B', 'YYY'), ('B', 'ORG'), ('B', 'XXX')]
>>> conf, precRatio, recRatio = softMentionConfusions(gold, pred, ignoreLabels=True)
>>> assert conf==scoring.ConfusionMatrix(0, 1, 0, 0), conf
>>> assert precRatio==Counter(numer=4, denom=5)
>>> assert recRatio==Counter(numer=3, denom=3)
>>> gold = [('B', 'PER'), ('I', 'PER'), ('I', 'PER'), ('B', 'ORG'), ('O', None), ('O', None), ('B', 'LOC'), ('I', 'LOC')]
>>> pred = [('B', 'PER'), ('O', None), ('B', 'ORG'), ('I', 'ORG'), ('O', None), ('B', 'YYY'), ('B', 'ORG'), ('B', 'XXX')]
>>> conf, precRatio, recRatio = softMentionConfusions(gold, pred, ignoreLabels=False)
>>> assert conf==scoring.ConfusionMatrix(1, 3, 0, 0), conf
>>> assert precRatio==Counter(numer=2, denom=5)
>>> assert recRatio==Counter(numer=2, denom=3)
'''
x = dict.keys if ignoreLabels else dict.items
goldMentionSpans = set(x(mentionSpans(goldseq, includeOTokens=False, value='label', scheme=scheme)))
goldOSpans = set(x(mentionSpans(goldseq, includeOTokens=True, value='label', scheme=scheme))).difference(goldMentionSpans)
predMentionSpans = set(x(mentionSpans(predseq, includeOTokens=False, value='label', scheme=scheme)))
predOSpans = set(x(mentionSpans(predseq, includeOTokens=True, value='label', scheme=scheme))).difference(predMentionSpans)
if ignoreLabels:
match = lambda g,p: matchCriterion(g,p)
else:
match = lambda g,p: matchCriterion(g[0],p[0]) and g[1]==p[1]
nMatchedPred = nExtra = 0
uncoveredGold = set(goldMentionSpans)
for p in predMentionSpans:
matchedGold = {p} if p in goldMentionSpans else {g for g in goldMentionSpans if match(g,p)}
if matchedGold:
nMatchedPred += 1
uncoveredGold.difference_update(matchedGold)
else: # prediction doesn't overlap with any gold mention
nExtra += 1
nMatchedGold = len(goldMentionSpans) - len(uncoveredGold)
return (scoring.ConfusionMatrix(Aonly=len(uncoveredGold), Bonly=nExtra, Both=0, Neither=0),
Counter(numer=nMatchedPred, denom=len(predMentionSpans)),
Counter(numer=nMatchedGold, denom=len(goldMentionSpans)))
def mentionConfusions(goldseq, predseq, ignoreLabels=False, scheme='BIO'):
'''
>>> gold = [('B', 'PER'), ('I', 'PER'), ('I', 'PER'), ('B', 'ORG'), ('O', None), ('O', None), ('B', 'LOC'), ('B', 'XXX')]
>>> pred = [('B', 'PER'), ('O', None), ('B', 'ORG'), ('I', 'ORG'), ('O', None), ('B', 'YYY'), ('B', 'ORG'), ('B', 'XXX')]
>>> conf = mentionConfusions(gold, pred, ignoreLabels=True)
>>> assert conf==scoring.ConfusionMatrix(2, 3, 2, 1), conf
>>> gold = [('B', 'PER'), ('I', 'PER'), ('I', 'PER'), ('B', 'ORG'), ('O', None), ('O', None), ('B', 'LOC'), ('B', 'XXX')]
>>> pred = [('B', 'PER'), ('O', None), ('B', 'ORG'), ('I', 'ORG'), ('O', None), ('B', 'YYY'), ('B', 'ORG'), ('B', 'XXX')]
>>> conf = mentionConfusions(gold, pred, ignoreLabels=False)
>>> assert conf==scoring.ConfusionMatrix(3, 4, 1, 1), conf
'''
x = dict.keys if ignoreLabels else dict.items
goldMentionSpans = set(x(mentionSpans(goldseq, includeOTokens=False, value='label', scheme=scheme)))
goldOSpans = set(x(mentionSpans(goldseq, includeOTokens=True, value='label', scheme=scheme))).difference(goldMentionSpans)
predMentionSpans = set(x(mentionSpans(predseq, includeOTokens=False, value='label', scheme=scheme)))
predOSpans = set(x(mentionSpans(predseq, includeOTokens=True, value='label', scheme=scheme))).difference(predMentionSpans)
return scoring.ConfusionMatrix(len(goldMentionSpans.difference(predMentionSpans)), len(predMentionSpans.difference(goldMentionSpans)),
len(goldMentionSpans & predMentionSpans), len(goldOSpans & predOSpans))
def manningChunks(goldseq, predseq, scheme='BIO'): # TODO: right now this assumes BIO. IO and BILOU may require changes.
'''
Provided tag sequences must consist of pairs of the form (position marker, label).
Categorize errors in the provided tagging according to the scheme proposed by Chris Manning in
http://nlpers.blogspot.com/2006/08/doing-named-entity-recognition-dont.html
which consists of breaking sequences based on the combination of gold and predicted taggings
and assigning each chunk to one of seven categories:
tn, tp, fn, fp, le (label error), be (boundary error), lbe (label and boundary error)
His bracketing criteria: "Moving along the sequence, the subsequence boundaries are:
(i) at start and end of document,
(ii) anywhere there is a change to or from a word/O/O token from or to a token
where either guess or gold is not O, and
(iii) anywhere that both systems change their class assignment [I interpret this as any transition
out of and/or into a mention. -NSS] simultaneously, regardless of whether they agree."
The returned sequence is a list of tuples, where each tuple combines the tags
of a gold subsequence, the tags of a predicted sequence, and one of the seven
error categories.
>>> manningChunks([('O', None), ('B', 'PERS'), ('I', 'PERS'), ('O', None)],
... [('B', 'PERS'), ('I', 'PERS'), ('B', 'PERS'), ('O', None)])
[([('O', None), ('B', 'PERS'), ('I', 'PERS')], [('B', 'PERS'), ('I', 'PERS'), ('B', 'PERS')], 'be'), ([('O', None)], [('O', None)], 'tn')]
>>> tests = []
# Manning's examples don't explicitly include B or I tags, but we assume
# they contain no two consecutive B tags with the same label.
>>> tests.append(('drove/O/O along/O/O a/O/O narrow/O/O road/O/O', 'tn'))
>>> tests.append(('in/O/O Palo/LOC/LOC Alto/LOC/LOC ./O/O', 'tn,tp,tn'))
>>> tests.append(('in/O/O Palo/LOC/O Alto/LOC/O ./O/O', 'tn,fn,tn'))
>>> tests.append(('an/O/O Awful/O/ORG Headache/O/ORG ./O/O', 'tn,fp,tn'))
>>> tests.append(('I/O/O live/O/O in/O/O Palo/LOC/ORG Alto/LOC/ORG ./O/O', 'tn,le,tn'))
>>> tests.append(('Unless/O/PERS Karl/PERS/PERS Smith/PERS/PERS resigns/O/O', 'be,tn'))
>>> tests.append(('Unless/O/ORG Karl/PERS/ORG Smith/PERS/ORG resigns/O/O', 'lbe,tn'))
>>> for seq,cats in tests:
... tkns,goldseq,predseq = zip(*(itm.split('/') for itm in seq.split()))
... goldseqBIO = [('O',None) if l2=='O' else (('I',l2) if l2==l1 else ('B',l2)) for l1,l2 in zip((None,)+goldseq, goldseq)]
... predseqBIO = [('O',None) if l2=='O' else (('I',l2) if l2==l1 else ('B',l2)) for l1,l2 in zip((None,)+predseq, predseq)]
... chks = manningChunks(goldseqBIO, predseqBIO)
... assert [chk[2] for chk in chks]==cats.split(',')
>>> tests = []
# Additional examples
>>> tests.append(([('O', None), ('O', None), ('O', None), ('B', 'LOC'), ('I', 'LOC'), ('O', None)],
... [('O', None), ('O', None), ('O', None), ('B', 'ORG'), ('B', 'ORG'), ('O', None)], 'tn,lbe,tn'))
>>> tests.append(([('O', None), ('O', None), ('O', None), ('B', 'LOC'), ('B', 'LOC'), ('O', None)],
... [('O', None), ('O', None), ('O', None), ('B', 'ORG'), ('I', 'ORG'), ('O', None)], 'tn,lbe,tn'))
>>> tests.append(([('O', None), ('B', 'PERS'), ('I', 'PERS'), ('O', None)],
... [('B', 'PERS'), ('I', 'PERS'), ('B', 'PERS'), ('O', None)], 'be,tn'))
>>> tests.append(([('O', None), ('B', 'PERS'), ('I', 'PERS'), ('O', None)],
... [('B', 'PERS'), ('B', 'PERS'), ('I', 'PERS'), ('O', None)], 'fp,tp,tn'))
>>> tests.append(([('O', None), ('B', 'PERS'), ('I', 'PERS'), ('O', None)],
... [('B', 'PERS'), ('B', 'PERS'), ('B', 'PERS'), ('O', None)], 'fp,be,tn'))
>>> tests.append(([('O', None), ('B', 'PERS'), ('B', 'PERS'), ('O', None)],
... [('B', 'PERS'), ('B', 'PERS'), ('B', 'ORG'), ('O', None)], 'fp,tp,le,tn'))
>>> tests.append(([('O', None), ('B', 'PERS'), ('I', 'PERS'), ('O', None)],
... [('B', 'ORG'), ('B', 'ORG'), ('I', 'ORG'), ('O', None)], 'fp,le,tn'))
>>> tests.append(([('O', None), ('B', 'PERS'), ('I', 'PERS'), ('O', None), ('O', None), ('B', 'XXX')],
... [('B', 'ORG'), ('B', 'ORG'), ('I', 'ORG'), ('O', None), ('O', None), ('B', 'XXX')], 'fp,le,tn,tp'))
>>> tests.append(([('O', None), ('B', 'PERS'), ('I', 'PERS'), ('O', None), ('B', 'XXX'), ('B', 'XXX')],
... [('B', 'ORG'), ('B', 'ORG'), ('I', 'ORG'), ('O', None), ('O', None), ('B', 'XXX')], 'fp,le,tn,fn,tp'))
>>> tests.append(([('O', None), ('B', 'PERS'), ('I', 'PERS'), ('B', 'XXX'), ('I', 'XXX'), ('B', 'XXX')],
... [('B', 'ORG'), ('B', 'ORG'), ('I', 'ORG'), ('O', None), ('O', None), ('B', 'XXX')], 'fp,le,fn,tp'))
>>> tests.append(([('O', None), ('B', 'PERS'), ('I', 'PERS'), ('B', 'PERS'), ('I', 'PERS'), ('B', 'PERS')],
... [('O', None), ('B', 'PERS'), ('I', 'PERS'), ('O', None), ('O', None), ('B', 'PERS')], 'tn,tp,fn,tp'))
>>> tests.append(([('O', None), ('B', 'PERS'), ('I', 'PERS'), ('B', 'PERS'), ('I', 'PERS'), ('B', 'XXX')],
... [('O', None), ('B', 'PERS'), ('I', 'PERS'), ('O', None), ('O', None), ('B', 'PERS')], 'tn,tp,fn,le'))
>>> tests.append(([('O', None), ('B', 'PERS'), ('I', 'PERS'), ('B', 'PERS'), ('I', 'PERS'), ('B', 'XXX')],
... [('O', None), ('B', 'PERS'), ('I', 'PERS'), ('O', None), ('B', 'PERS'), ('I', 'PERS')], 'tn,tp,lbe'))
>>> for goldseqBIO,predseqBIO,cats in tests:
... chks = manningChunks(goldseqBIO, predseqBIO)
... assert [chk[2] for chk in chks]==cats.split(','),(goldseqBIO,predseqBIO,cats,chks)
'''
def nextChunk(goldseq, predseq, i):
gg = []
pp = []
j = i
while j<len(goldseq):
g = goldseq[j]
p = predseq[j]
if len(gg)>0:
if (gg[-1][0]=='O' and pp[-1][0]=='O')!=(g[0]=='O' and p[0]=='O'):
break # transition to or from word/O/O
elif (not isContinuation(g[0], scheme) and (g[0]!='O' or gg[-1][0]!='O')) and (not isContinuation(p[0], scheme) and (p[0]!='O' or pp[-1][0]!='O')):
break # transition out of and/or into a mention: i.e. any tag not continuing a mention or series of O's
gg.append(g)
pp.append(p)
j += 1
assert len(gg)==len(pp)
# boundary strings
gpmS = ''.join(g[0] for g in gg)
ppmS = ''.join(p[0] for p in pp)
# error type
if gpmS==ppmS: # boundaries agree
if {g[1] for g in gg if g[0]!='O'}=={p[1] for p in pp if p[0]!='O'}: # labels agree
cat = 'tn' if set(gpmS)=={'O'} else 'tp'
else:
cat = 'le'
else: # boundary error
if set(gpmS)=={'O'}:
cat = 'fp'
elif set(ppmS)=={'O'}:
cat = 'fn'
elif {g[1] for g in gg if g[0]!='O'}=={p[1] for p in pp if p[0]!='O'}: # labels agree
cat = 'be'
else:
cat = 'lbe'
return (gg, pp, cat)
assert len(goldseq)==len(predseq)
i = 0
chunks = []
while i<len(goldseq):
chk = nextChunk(goldseq, predseq, i)
chunks.append(chk)
i += len(chk[0])
assert i==len(goldseq)
return chunks
def manningCounts(goldseq, predseq, scheme='BIO'):
chunks = manningChunks(goldseq, predseq, scheme=scheme)
return Counter(chk[2] for chk in chunks for i in range(len(chk[0]))), Counter(chk[2] for chk in chunks)
def manningScore(goldseq, predseq, scheme='BIO'):
'''
One scoring scheme based on Manning chunks (see above)
that is intended to combat the traditional mention F score's
bias against proposing entities.
@return: "Precision" and "recall" values per this approach
>>> gold = [('B', 'PER'), ('I', 'PER'), ('I', 'PER'), ('B', 'ORG'), ('O', None), ('O', None), ('B', 'LOC'), ('B', 'XXX')]
>>> pred = [('B', 'PER'), ('O', None), ('B', 'ORG'), ('I', 'ORG'), ('O', None), ('B', 'YYY'), ('B', 'ORG'), ('B', 'XXX')]
>>> manningScore(gold, pred) # (5-1-1-.5)/5, (4-1-0-.5)/4
(0.5, 0.625)
>>> gold = [('B', 'PER'), ('I', 'PER'), ('I', 'PER'), ('O', None), ('O', None), ('O', None), ('B', 'LOC'), ('B', 'XXX')]
>>> pred = [('B', 'PER'), ('O', None), ('B', 'ORG'), ('I', 'ORG'), ('O', None), ('B', 'YYY'), ('B', 'ORG'), ('B', 'XXX')]
>>> manningScore(gold, pred)==(.5, 2/3) # (5-1-1-.5)/5, (3-.5-0-.5)/3
True
'''
chunks = manningChunks(goldseq, predseq, scheme=scheme)
precDemerits = 0
recDemerits = 0
for gg,pp,errcat in chunks:
if errcat=='fp':
precDemerits += 1
elif errcat=='fn':
recDemerits += 1
elif errcat in ('be','le','lbe'):
recDemerits += 0.5*len(mentionSpans(gg, includeOTokens=False, scheme=scheme))
precDemerits += 0.5*len(mentionSpans(pp, includeOTokens=False, scheme=scheme))
nGold = len(mentionSpans(goldseq, includeOTokens=False, scheme=scheme))
nGuesses = len(mentionSpans(predseq, includeOTokens=False, scheme=scheme))
return (nGuesses-precDemerits)/nGuesses, (nGold-recDemerits)/nGold
def ensureSequence(seq, scheme='BIO' or 'IO' or 'BILOU', fixProblems=False):
'''
Check that the tag sequence is legal under the current tagging scheme.
Raise an informative error if it is invalid.
'''
seq = list(seq)
pmS = ''.join(t[0] for t in seq) # position markers
pmsNotInScheme = set(pmS).difference(set(scheme))
if pmsNotInScheme:
raise Exception('One or more position markers not allowed by the {} tagging scheme: {}'.format(scheme, pmsNotInScheme))
if 'B' in scheme and (pmS[0]=='I' or pmS[0]=='L'):
s = 'Illegal position marker at the beginning of a sequence (tagging scheme {}): {}'.format(scheme, pmS[0])
if fixProblems:
print(s, file=sys.stderr)
seq[0] = ('B', seq[0][1])
else:
raise Exception(s)
if 'L' in scheme and pmS[-1]=='I':
s = 'Illegal position marker at the end of a sequence (tagging scheme {}): {}'.format(scheme, pmS[-1])
if fixProblems:
print(s, file=sys.stderr)
seq[-1] = ('L', seq[-1][1])
else:
raise Exception(s)
if 'B' in scheme and ('OI' in pmS or 'UI' in pmS or 'LI' in pmS):
s = 'Illegal position marker sequence (tagging scheme {}): O I or U I or L I (I must always continue a mention)'.format(scheme)
if fixProblems:
print(s, file=sys.stderr)
m = re.search(r'[OUL]I', pmS)
while m:
i = m.start()
seq[i] = ('B', seq[i+1][1])
pmS = ''.join(t[0] for t in seq)
m = re.search(r'[OUL]I', pmS)
else:
raise Exception(s)
if 'OL' in pmS or 'UL' in pmS:
raise Exception('Illegal position marker sequence (tagging scheme {}): O L or U L (L must always continue a mention)'.format(scheme))
if 'U' in scheme and ('BB' in pmS or 'BO' in pmS or 'BU' in pmS or 'IU' in pmS or 'LL' in pmS or 'OL' in pmS):
raise Exception('Illegal position marker sequence (tagging scheme {}): B B or B O or B U or I U or L L or O L (must use U for all and only length-1 mentions)'.format(scheme))
# ensure each mention has a consistent label
for (i,t1),t2 in zip(enumerate(seq), seq[1:]):
pm2, l2 = t2
if pm2=='O' and l2 is not None:
raise Exception('Illegal label for an O tag: {}'.format(l2))
if isContinuation(pm2, scheme=scheme) and l2!=t1[1]:
if fixProblems:
s = "Continuation tag's label ({}) is inconsistent with previous label ({}); using the previous one".format(l2, t1[1])
print(s, file=sys.stderr)
seq[i+1] = (pm2, t1[1])
else:
raise Exception("Continuation tag's label ({}) is inconsistent with previous label ({})".format(l2, t1[1]))
return seq
def slashFormat(tkns,golds,preds):
return u' '.join(u'{}/{}/{}'.format(t, u'-'.join(g) if g[1] is not None else g[0], u'-'.join(p) if p[1] is not None else p[0]) for t,g,p in zip(tkns,golds,preds))
def loadSequences(conllF, scheme='BIO'):
'''
Generator over sequences in the input file, where each sequence is a list of token triples of the form
(token_word, (gold_position_marker, gold_label), (pred_position_marker, pred_label))
An error is raised if any of the sequences are ill-formed.
'''
def nextSequence(conllF, scheme='BIO'):
'''@return: The next (non-ignored) sequence as a list, or [] if there is no remaining sequence.'''
seq = []
nIgnored = 0
for ln in conllF:
ln = ln[:-1]
if ln.strip()=='':
if seq: break
continue
tkn, goldt, predt = ln.split('\t')
assert tkn,'Missing token on line: {}'.format(ln)
assert goldt,'Missing first (gold) tag on line: {}'.format(ln)
assert predt,'Missing second (predicted) tag on line: {}'.format(ln)
if goldt==IGNORE_SYMBOL or predt==IGNORE_SYMBOL:
nIgnored += 1
continue
gold = goldt.split('-') if '-' in goldt else (goldt, None)
gold = (str(gold[0]), gold[1]) # convert from unicode
pred = tuple(predt.split('-')) if '-' in predt else (predt, None)
pred = (str(pred[0]), pred[1])
assert len(gold)==len(pred)==2
seq.append((tkn, gold, pred))
if nIgnored>0:
global nIgnoredTokens, nIgnoredSeqs
nIgnoredTokens += nIgnored
nIgnoredSeqs += 1 # this sequence was at least partially ignored
if not seq: # this sequence was entirely ignored; get the next one
return nextSequence(conllF, scheme)
return seq
while True:
seq = nextSequence(conllF, scheme=scheme)
if not seq:
break
tkns,golds,preds = zip(*seq)
try:
golds = ensureSequence(golds, scheme=scheme, fixProblems=True)
preds = ensureSequence(preds, scheme=scheme, fixProblems=True)
except:
print('Ending at line {}:'.format(fileinput.lineno()), slashFormat(tkns,golds,preds).encode('utf-8'), file=sys.stderr)
raise
yield zip(tkns,golds,preds)
#import doctest
#doctest.testmod()
'''
Error report:
00000 TOKENS 00000 00000 MENTIONS 00000
found xtra miss O/O found xtra miss O/O
tp fp fn tn A% P% R% F1% tp fp fn tn P% R% F1%
Exact L 0000 0000 0000 000000 00 00 00 00.0 0000 0000 0000 000000 00 00 00.0
Exact UL 0000 0000 0000 000000 00 00 00 00.0 0000 0000 0000 000000 00 00 00.0 # ignoring labels
Soft L 0000 0000 0000 000000 00 00 00 00.0 0000 0000 0000 000000 00 00 00.0 # ignoring position markers / mention match if at least one of its tokens is found with the correct label
Soft UL 0000 0000 0000 000000 00 00 00 00.0 0000 0000 0000 000000 00 00 00.0 # unlabeled: collapsing B & I / mention match if at least one of its tokens is found
Bag L 0000 0000 0000 000000 00 00 00 00.0 0000 0000 0000 000000 00 00 00.0 # treating each sentence as a bag of labeled items
Bag UL 0000 0000 0000 000000 00 00 00 00.0 0000 0000 0000 000000 00 00 00.0 # treating each sentence as a bag of unlabeled items
Manning 0000 0000 0000 000000 0000 0000 0000 000000
le: 0000 be: 0000 lbe: 0000 le: 0000 be: 0000 lbe: 0000
'''
if __name__=='__main__':
#print(scoring.ConfusionMatrix(Aonly=4, Bonly=0, Both=2, Neither=8).asPRF())
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-v', action='store_true', help='Display a legend explaining the output')
parser.add_argument('-p', action='store_true', help='Show percentages instead of counts for tp, fp, etc.') # TODO:
parser.add_argument('-n', action='store_true', help='Show ratios instead of percentages for precision and recall') # TODO:
group = parser.add_mutually_exclusive_group()
group.add_argument('-l', metavar='LABEL', action='append', help='Specify one of the labels to select (tokens corresponding to unselected labels will be counted as O)') # TODO
group.add_argument('-L', action='store_true', help='Suppress per-label reports')
parser.add_argument('-m', metavar='THRESHOLD', action='store', type=lambda v: float(v) if '.' in v or 'e' in v else int(v),
help='Specify a minimum threshold for values to display for tp, fp, etc. '
'This can be a count (an integer) or a percentage (between 0 and 1). 0 entries will not display; '
'nonzero entries below the threshold will display as *.') # TODO:
parser.add_argument('scheme', choices=['BIO','IO','BILOU'], help='Tagging scheme')
parser.add_argument('conllFiles', nargs='*')
args = parser.parse_args()
scheme = args.scheme
def newConfsMap():
confs = {}
for x in ['Exact L', 'Exact UL', 'Soft L', 'Soft UL', 'Bag L', 'Bag UL', 'Manning']:
confs[x] = {'token': scoring.ConfusionMatrix(0,0,0,0), 'mention': scoring.ConfusionMatrix(0,0,0,0)}
confs['Manning2'] = {'token': Counter(), 'mention': Counter()}
softPR = {'UL': {'P': Counter(numer=0, denom=0), 'R': Counter(numer=0, denom=0)},
'L': {'P': Counter(numer=0, denom=0), 'R': Counter(numer=0, denom=0)}}
return confs, softPR
data = defaultdict(newConfsMap) # labelset => confs map
nSeqs = Counter() # label set => number of sequences having some (predicted or gold) label in the set
nTokens = Counter() # label set => number of tokens in the sequences corresponding to this label set
allLabels = set()
global nIgnoredTokens, nIgnoredSeqs
nIgnoredTokens = 0
nIgnoredSeqs = 0
sys.stdin = codecs.getreader("utf-8")(sys.stdin)
for seq in loadSequences(fileinput.input(args.conllFiles, openhook=fileinput.hook_encoded("utf-8")), scheme):
tkns,golds,preds = zip(*seq)
tkns,golds,preds = list(tkns),list(golds),list(preds)
labelsThisSeq = set(itm[1] for itm in golds+preds if itm[0]!='O')
allLabels.update(labelsThisSeq)
selectedLbls = args.l
if selectedLbls:
lblsets = {tuple(selectedLbls)} # a specific subset of labels
elif args.L:
lblsets = {()} # all labels
else:
lblsets = {(lbl,) for lbl in allLabels} | {()} # all labels, plus each label individually
for lblset in lblsets:
if lblset==('LOC',):
pass
if lblset!=() and not set(lblset)&labelsThisSeq:
continue
nSeqs[lblset] += 1
nTokens[lblset] += len(tkns)
if lblset!=():
goldseq = [((pm,lbl) if lbl in lblset+(None,) else ('O',None)) for pm,lbl in golds]
predseq = [((pm,lbl) if lbl in lblset+(None,) else ('O',None)) for pm,lbl in preds]
# lblset+(None,) because if e.g. the predictions are unlabeled (their label is stored as None),
# we want to be able to compute unlabeled scores even if the gold tokens are labeled
else:
goldseq = golds
predseq = preds
confs, softPR = data[lblset]
confs['Exact UL']['token'] += tokenConfusions(goldseq, predseq, ignoreLabels=True, collapseNonO=False, scheme=scheme)
confs['Exact L']['token'] += tokenConfusions(goldseq, predseq, ignoreLabels=False, collapseNonO=False, scheme=scheme)
confs['Soft UL']['token'] += tokenConfusions(goldseq, predseq, ignoreLabels=True, collapseNonO=True, scheme=scheme)
confs['Soft L']['token'] += tokenConfusions(goldseq, predseq, ignoreLabels=False, collapseNonO=True, scheme=scheme)
confs['Bag UL']['token'] += tokenConfusions(goldseq, predseq, ignoreLabels=True, collapseNonO=True, scheme=scheme, bag=True)
confs['Bag L']['token'] += tokenConfusions(goldseq, predseq, ignoreLabels=False, collapseNonO=True, scheme=scheme, bag=True)
confs['Exact UL']['mention'] += mentionConfusions(goldseq, predseq, ignoreLabels=True, scheme=scheme)
confs['Exact L']['mention'] += mentionConfusions(goldseq, predseq, ignoreLabels=False, scheme=scheme)
softConf, softP, softR = softMentionConfusions(goldseq, predseq, ignoreLabels=True, scheme=scheme)
confs['Soft UL']['mention'] += softConf
softPR['UL']['P'] += softP
softPR['UL']['R'] += softR
softConf, softP, softR = softMentionConfusions(goldseq, predseq, ignoreLabels=False, scheme=scheme)
confs['Soft L']['mention'] += softConf
softPR['L']['P'] += softP
softPR['L']['R'] += softR
confs['Bag UL']['mention'] += tokenConfusions(goldseq, predseq, ignoreLabels=True, collapseNonO=True, scheme=scheme, bag=True, ignoreContinuation=True)
confs['Bag L']['mention'] += tokenConfusions(goldseq, predseq, ignoreLabels=False, collapseNonO=True, scheme=scheme, bag=True, ignoreContinuation=True)
manningTkn, manningChk = manningCounts(goldseq, predseq, scheme)
confs['Manning2']['token'] += manningTkn
confs['Manning2']['mention'] += manningChk
if len(data)==0:
print('No relevant sequences for the given options', file=sys.stderr)
if nIgnoredTokens>0:
print('Ignoring {} tokens in {} sequences'.format(nIgnoredTokens, nIgnoredSeqs), file=sys.stderr)
for lblset,(confs,softPR) in sorted(data.items(), key=lambda itm: itm[0]):
if lblset==():
lblsS = 'All {} labels'.format(len(allLabels))
else:
if len(allLabels)==1:
continue
lblsS = 'Labels: '+' '.join('(null)' if lbl is None else (repr(lbl) if re.search(r'\s|[\'"]', lbl) else lbl) for lbl in lblset)
unseen = set(lblset).difference(allLabels)
if unseen:
print('Warning: some labels never seen in data:', unseen, file=sys.stderr)
c = confs['Manning2']['token']
confs['Manning']['token'] = scoring.ConfusionMatrix(Both=c['tp'], Neither=c['tn'], Aonly=c['fn'], Bonly=c['fp'])
c = confs['Manning2']['mention']
confs['Manning']['mention'] = scoring.ConfusionMatrix(Both=c['tp'], Neither=c['tn'], Aonly=c['fn'], Bonly=c['fp'])
nGoldMentions = confs['Exact UL']['mention'].Atotal
nPredMentions = confs['Exact UL']['mention'].Btotal
print('''
{}
{:5} {:5} TOKENS {:<5} {:5} MENTIONS {:<5}
found xtra miss O/O found xtra miss O/O
tp fp fn tn A% P% R% F1% tp fp fn tn P% R% F1%'''.format(lblsS, scheme, nTokens[lblset], nSeqs[lblset], nGoldMentions, nPredMentions))
# TODO: Manning score?
for x in ['Exact L', 'Exact UL', 'Soft L', 'Soft UL', 'Bag L', 'Bag UL', 'Manning', 'Manning2']:
print('{:8} '.format(x if x!='Manning2' else ''), end='')
# token-level info
conf = confs[x]['token']
if x=='Manning2':
print('le: {:4} be: {:4} lbe: {:4} '.format(conf['le'], conf['be'], conf['lbe']), end='')
else:
print('{:4} {:4} {:4} {:6} '.format(conf.Both, conf.Bonly, conf.Aonly, conf.Neither), end='')
#print(conf)
prf = conf.asPRF(suppressZeroDenominatorCheck=True)
if x!='Manning':
print('{: >2.0f} {: >2.0f} {: >2.0f} {: >4.1f} '.format(100*conf.pAgreement, 100*prf.P, 100*prf.R, 100*prf.F), end='')
else:
print('{:2} {:2} {:4} '.format('','',''), end='')
# mention-level info
conf = confs[x]['mention']
if x=='Manning2':
print('le: {:4} be: {:4} lbe: {:4} '.format(conf['le'], conf['be'], conf['lbe']))
else:
print('{:>4} {:4} {:4} {:>6} '.format(conf.Both if 'Soft' not in x else '-', conf.Bonly, conf.Aonly, conf.Neither if 'Soft' not in x else '-'), end='')
if x.startswith('Soft'):
ratios = softPR[x.split()[1]]['P'], softPR[x.split()[1]]['R']
prec = 100*ratios[0]['numer']/ratios[0]['denom'] if ratios[0]['denom']>0 else float('NaN')
rec = 100*ratios[1]['numer']/ratios[1]['denom'] if ratios[1]['denom']>0 else float('NaN')
prf = [prec, rec, scoring.harmonicMean(prec, rec)]
else:
hardPRF = conf.asPRF(suppressZeroDenominatorCheck=True)
prf = [100*hardPRF.P, 100*hardPRF.R, 100*hardPRF.F]
if x!='Manning':
print('{: >2.0f} {: >2.0f} {: >4.1f}'.format(*prf), end='')
# for more decimal places:
#print('{: >2.2f} {: >2.2f} {: >4.2f}'.format(*prf), end='')
print()
if args.v:
print('''
---------------------------------------------------------------------------------
L E G E N D
---------------------------------------------------------------------------------
(Selected labels)
(0) (1) TOKENS (2) (3) MENTIONS (4)
found xtra miss O/O found xtra miss O/O
tp fp fn tn A% P% R% F1% tp fp fn tn P% R% F1%
Exact L 0000 0000 0000 000000 00 00 00 00.0 0000 0000 0000 000000 00 00 00.0 (7)
Exact UL 0000 0000 0000 000000 00 00 00 00.0 0000 0000 0000 000000 00 00 00.0 (8)
Soft L 0000 0000 0000 000000 00 00 00 00.0 - 0000 0000 - 00 00 00.0 (9)
Soft UL 0000 0000 0000 000000 00 00 00 00.0 - 0000 0000 - 00 00 00.0 (10)
Bag L 0000 0000 0000 000000 00 00 00 00.0 0000 0000 0000 000000 00 00 00.0 (11)
Bag UL 0000 0000 0000 000000 00 00 00 00.0 0000 0000 0000 000000 00 00 00.0 (12)
Manning 0000 0000 0000 000000 0000 0000 0000 000000 (13)
le: 0000 be: 0000 lbe: 0000 le: 0000 be: 0000 lbe: 0000 |
------------- (5) -------------- ------------- (6) --------------
(Selected labels) Subset of labels used to calculate scores in this table.
Any (gold or predicted) tag with a label not in this set will be replaced with O.
(Tags without a label will be retained.)
(0) tagging scheme, e.g. BIO
(1) # tokens in sequences containing the selected labels at least once (gold or predicted)
(2) # sequences (sentences) containing the selected labels at least once (gold or predicted)
(3) # gold mentions
(4) # predicted mentions
(5) token-level, (6) mention-level statistics
COLUMNS true positives, false positives, false negatives, true negatives,
accuracy (token-level only), precision, recall, F1
MEASURES
(7) Exact, labeled: tokens/mentions only count if the prediction exactly matches the gold
(8) Exact, unlabeled: category labels are ignored--only boundary errors are penalized
(9) Soft, labeled: token-level measures consider only the label in matching;
mention-level measures allow matches where any token is shared in common between mentions
on the two sides, provided those mentions have the same label
(10) Soft, unlabeled: token-level measures ignore the label and disregard the B vs. I distinction;
mention-level measures are computed as in (9) but ignoring the labels
For (9) and (10), mention-level tp and tn measures are omitted because 1-many, many-1, and
many-to-many alignments are possible.
(11) Bag, labeled: each sequence (sentence) is treated as a bag of tokens. Mention-level measures
consider exactly one token per mention. Positional differences are disregarded: all non-O
tags count the same for token-level measures and U is never distinguished from B in the
mention-level measures.
(12) Bag, unlabeled: each sequence is treated as a bag of tokens as in (11), but labels are ignored.
(13) Manning: counts of error types based on Chris Manning's chunking scheme; token and chunk
(error event) counts are given for the results (no mention spans multiple chunks).
le = label error, be = boundary error, lbe = label and boundary error; tp/tn reflect
exact matches and fp/fn indicate predictions that do not overlap with a gold mention.
''')
# TODO:
# - type-level measures?
# - unordered (per-sequence bag) measures, including a binary version (e.g. "how many sentences had this label at least once")?
# - test on real data
# - counts of label confusions?
| {
"content_hash": "d70eac4b1aeb09b79dccde8b2c11ced2",
"timestamp": "",
"source": "github",
"line_count": 785,
"max_line_length": 188,
"avg_line_length": 52.58471337579618,
"alnum_prop": 0.5577654497444221,
"repo_name": "nschneid/pyutil",
"id": "6fed039c6dee07d9e60e0eb28651e45d4eff9a38",
"size": "41304",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "chunkeval.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "162185"
}
],
"symlink_target": ""
} |
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("resolwe_bio_kb", "0007_feature_fullname_350"),
]
operations = [
migrations.AlterField(
model_name="feature",
name="aliases",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=256),
blank=True,
default=list,
size=None,
),
),
]
| {
"content_hash": "79451e33f7fec35ffc8321fae4947769",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 60,
"avg_line_length": 25.318181818181817,
"alnum_prop": 0.5547576301615799,
"repo_name": "genialis/resolwe-bio",
"id": "1ccf65123e4c59d8de263941570aa3167f8563a0",
"size": "604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resolwe_bio/kb/migrations/0008_callable_defaults.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "10553"
},
{
"name": "PLpgSQL",
"bytes": "4491"
},
{
"name": "Python",
"bytes": "1729619"
},
{
"name": "R",
"bytes": "20619"
},
{
"name": "Shell",
"bytes": "6713"
}
],
"symlink_target": ""
} |
import IECore
import Gaffer
import GafferUI
QtCore = GafferUI._qtImport( "QtCore" )
QtGui = GafferUI._qtImport( "QtGui" )
## The Timeline presents a time slider which edits the frame
# entry of a context.
class Timeline( GafferUI.EditorWidget ) :
def __init__( self, scriptNode, **kw ) :
self.__row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, borderWidth = 4, spacing = 2 )
GafferUI.EditorWidget.__init__( self, self.__row, scriptNode, **kw )
with self.__row :
self.__visibilityButton = GafferUI.Button( image="timeline3.png", hasFrame=False )
self.__visibilityButtonClickedConnection = self.__visibilityButton.clickedSignal().connect( Gaffer.WeakMethod( self.__visibilityButtonClicked ) )
self.__scriptRangeStart = GafferUI.NumericPlugValueWidget( scriptNode["frameRange"]["start"] )
self.__scriptRangeStart.numericWidget().setFixedCharacterWidth( 4 )
self.__scriptRangeStart.setToolTip( self.__scriptRangeStart.getPlug().fullName() )
self.__sliderRangeStart = GafferUI.NumericWidget( scriptNode["frameRange"]["start"].getValue() )
self.__sliderRangeStart.setFixedCharacterWidth( 4 )
self.__sliderRangeStart.setToolTip( "Slider minimum" )
self.__sliderRangeStartChangedConnection = self.__sliderRangeStart.editingFinishedSignal().connect( Gaffer.WeakMethod( self.__sliderRangeChanged ) )
self.__slider = GafferUI.NumericSlider(
value = self.getContext().getFrame(),
min = float( scriptNode["frameRange"]["start"].getValue() ),
max = float( scriptNode["frameRange"]["end"].getValue() ),
parenting = { "expand" : True },
)
self.__slider.setPositionIncrement( 0 ) # disable so the slider doesn't mask our global frame increment shortcut
self.__sliderValueChangedConnection = self.__slider.valueChangedSignal().connect( Gaffer.WeakMethod( self.__valueChanged ) )
self.__startButton = GafferUI.Button( image = "timelineStart.png", hasFrame=False )
self.__startButtonClickedConnection = self.__startButton.clickedSignal().connect( Gaffer.WeakMethod( self.__startOrEndButtonClicked ) )
self.__playPause = GafferUI.Button( image = "timelinePlay.png", hasFrame=False )
self.__playPauseClickedConnection = self.__playPause.clickedSignal().connect( Gaffer.WeakMethod( self.__playPauseClicked ) )
self.__endButton = GafferUI.Button( image = "timelineEnd.png", hasFrame=False )
self.__endButtonClickedConnection = self.__endButton.clickedSignal().connect( Gaffer.WeakMethod( self.__startOrEndButtonClicked ) )
self.__frame = GafferUI.NumericWidget( self.getContext().getFrame() )
self.__frame.setFixedCharacterWidth( 5 )
self.__frame.setToolTip( "Current frame" )
self.__frameChangedConnection = self.__frame.valueChangedSignal().connect( Gaffer.WeakMethod( self.__valueChanged ) )
self.__sliderRangeEnd = GafferUI.NumericWidget( scriptNode["frameRange"]["end"].getValue() )
self.__sliderRangeEnd.setFixedCharacterWidth( 4 )
self.__sliderRangeEnd.setToolTip( "Slider maximum" )
self.__sliderRangeEndChangedConnection = self.__sliderRangeEnd.editingFinishedSignal().connect( Gaffer.WeakMethod( self.__sliderRangeChanged ) )
self.__scriptRangeEnd = GafferUI.NumericPlugValueWidget( scriptNode["frameRange"]["end"] )
self.__scriptRangeEnd.numericWidget().setFixedCharacterWidth( 4 )
self.__scriptRangeEnd.setToolTip( self.__scriptRangeEnd.getPlug().fullName() )
self.__scriptNodePlugSetConnection = scriptNode.plugSetSignal().connect( Gaffer.WeakMethod( self.__scriptNodePlugSet ) )
frameIncrementShortcut = QtGui.QShortcut( QtGui.QKeySequence( "Right" ), self._qtWidget() )
frameIncrementShortcut.activated.connect( Gaffer.WeakMethod( self.__incrementFrame ) )
frameDecrementShortcut = QtGui.QShortcut( QtGui.QKeySequence( "Left" ), self._qtWidget() )
frameDecrementShortcut.activated.connect( IECore.curry( Gaffer.WeakMethod( self.__incrementFrame ), -1 ) )
self.__playback = None
self._updateFromContext( set() )
def _updateFromContext( self, modifiedItems ) :
if self.__playback is None or not self.__playback.context().isSame( self.getContext() ) :
self.__playback = GafferUI.Playback.acquire( self.getContext() )
self.__playback.setFrameRange( self.__sliderRangeStart.getValue(), self.__sliderRangeEnd.getValue() )
self.__playbackStateChangedConnection = self.__playback.stateChangedSignal().connect( Gaffer.WeakMethod( self.__playbackStateChanged ) )
self.__playbackFrameRangeChangedConnection = self.__playback.frameRangeChangedSignal().connect( Gaffer.WeakMethod( self.__playbackFrameRangeChanged ) )
if "frame" not in modifiedItems :
return
# update the frame counter and slider position
with Gaffer.BlockedConnection( [ self.__frameChangedConnection, self.__sliderValueChangedConnection ] ) :
self.__frame.setValue( self.getContext().getFrame() )
self.__slider.setValue( self.getContext().getFrame() )
def __sliderRangeChanged( self, widget ) :
assert( widget is self.__sliderRangeStart or widget is self.__sliderRangeEnd )
# clamp value within range specified by script
value = widget.getValue()
value = max( self.scriptNode()["frameRange"]["start"].getValue(), value )
value = min( self.scriptNode()["frameRange"]["end"].getValue(), value )
# move the other end of the range if necessary
if widget is self.__sliderRangeStart :
minValue = value
maxValue = max( value, self.__sliderRangeEnd.getValue() )
else :
maxValue = value
minValue = min( value, self.__sliderRangeStart.getValue() )
self.__playback.setFrameRange( minValue, maxValue )
# this is connected to the valueChangedSignal on both the slider and the frame field
def __valueChanged( self, widget, reason ) :
assert( widget is self.__slider or widget is self.__frame )
if widget is self.__slider :
## \todo Have the rounding come from NumericSlider, and allow the shift
# modifier to choose fractional frame values.
frame = int( self.__slider.getValue() )
else :
frame = self.__frame.getValue()
frame = float( max( frame, self.scriptNode()["frameRange"]["start"].getValue() ) )
frame = float( min( frame, self.scriptNode()["frameRange"]["end"].getValue() ) )
if reason == reason.DragBegin :
self.__playback.setState( self.__playback.State.Scrubbing )
elif reason == reason.DragEnd :
self.__playback.setState( self.__playback.State.Stopped )
if widget is self.__frame :
# if frame was set outside the range, the actual value in the context
# may not change, so we need to update the value in the frame field manually
self.__frame.setValue( frame )
self.getContext().setFrame( frame )
def __scriptNodePlugSet( self, plug ) :
combineFunction = None
if plug.isSame( self.scriptNode()["frameRange"]["start"] ) :
combineFunction = max
elif plug.isSame( self.scriptNode()["frameRange"]["end"] ) :
combineFunction = min
if combineFunction is not None :
self.__playback.setFrameRange(
combineFunction( plug.getValue(), self.__sliderRangeStart.getValue() ),
combineFunction( plug.getValue(), self.__sliderRangeEnd.getValue() ),
)
def __visibilityButtonClicked( self, button ) :
assert( button is self.__visibilityButton )
if self.__scriptRangeStart.getVisible() :
self.__scriptRangeStart.setVisible( False )
self.__scriptRangeEnd.setVisible( False )
self.__visibilityButton.setImage( "timeline2.png" )
elif self.__sliderRangeStart.getVisible() :
self.__sliderRangeStart.setVisible( False )
self.__sliderRangeEnd.setVisible( False )
self.__visibilityButton.setImage( "timeline1.png" )
else :
self.__scriptRangeStart.setVisible( True )
self.__scriptRangeEnd.setVisible( True )
self.__sliderRangeStart.setVisible( True )
self.__sliderRangeEnd.setVisible( True )
self.__visibilityButton.setImage( "timeline3.png" )
def __playPauseClicked( self, button ) :
assert( button is self.__playPause )
if self.__playback.getState() == self.__playback.State.Stopped :
self.__playback.setState( self.__playback.State.PlayingForwards )
else :
self.__playback.setState( self.__playback.State.Stopped )
def __startOrEndButtonClicked( self, button ) :
self.__playback.setState( self.__playback.State.Stopped )
if button is self.__startButton :
self.getContext().setFrame( self.__sliderRangeStart.getValue() )
else :
self.getContext().setFrame( self.__sliderRangeEnd.getValue() )
def __playbackStateChanged( self, playback ) :
if playback.getState() in ( playback.State.PlayingForwards, playback.State.PlayingBackwards ) :
self.__playPause.setImage( "timelinePause.png" )
else :
self.__playPause.setImage( "timelinePlay.png" )
def __playbackFrameRangeChanged( self, playback ) :
minValue, maxValue = playback.getFrameRange()
with Gaffer.BlockedConnection( ( self.__sliderRangeStartChangedConnection, self.__sliderRangeEndChangedConnection ) ) :
self.__slider.setRange( minValue, maxValue )
self.__sliderRangeStart.setValue( minValue )
self.__sliderRangeEnd.setValue( maxValue )
def __incrementFrame( self, increment = 1 ) :
self.__playback.incrementFrame( increment )
def __repr__( self ) :
return "GafferUI.Timeline( scriptNode )"
GafferUI.EditorWidget.registerType( "Timeline", Timeline )
| {
"content_hash": "d4b4885555f02391d68cd8ae5a40ea20",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 154,
"avg_line_length": 43.948356807511736,
"alnum_prop": 0.7212904604208952,
"repo_name": "davidsminor/gaffer",
"id": "9e44c909a3726b01f2c8170f9c123071592240c7",
"size": "11235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/GafferUI/Timeline.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9286"
},
{
"name": "C++",
"bytes": "3358250"
},
{
"name": "COBOL",
"bytes": "64449"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "Python",
"bytes": "3267354"
},
{
"name": "Shell",
"bytes": "7055"
},
{
"name": "Slash",
"bytes": "35200"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from codecs import open
import os
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
try:
import pypandoc
long_description = pypandoc.convert('README.md', format='md', to='rst')
except(IOError, ImportError):
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# get the dependencies and installs
with open(os.path.join(here, 'requirements.txt'), encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if ('git+' not in x) and
(not x.startswith('#')) and (not x.startswith('-'))]
dependency_links = [x.strip().replace('git+', '') for x in all_reqs if 'git+' not in x]
setup(
name='gcdt-lookups',
version='0.0.26',
description='Plugin (gcdt-lookups) for gcdt',
long_description=long_description,
license='MIT',
classifiers=[
'Natural Language :: English',
'Intended Audience :: Developers',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
],
keywords='',
packages=find_packages(exclude=['docs', 'tests*']),
include_package_data=True,
author='glomex SRE Team',
install_requires=install_requires,
dependency_links=dependency_links,
author_email='mark.fink@glomex.com',
entry_points={
'gcdt10': [
'lookups=gcdt_lookups.lookups',
],
}
)
| {
"content_hash": "d167c44587545506f1de6a8e5de8bb78",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 87,
"avg_line_length": 32.12,
"alnum_prop": 0.6270236612702366,
"repo_name": "glomex/gcdt-lookups",
"id": "9983fb57f86a0bb0b5090fd2304b1bd482f9007b",
"size": "1606",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40485"
}
],
"symlink_target": ""
} |
def info():
print('cgi-wsgi')
class CGIContext:
def __init__(self):
self.headers = {}
self.body = []
self.writing_headers = True
def append_body(self, lineOrLinesOrNil):
# print 'BODY: %s' % lineOrLinesOrNil
self.body.append(lineOrLinesOrNil or '')
def add_header(self, line):
key, value = line.split(': ', 1)
# print 'HEAD: %s : %s' % (key,value)
self.headers[key] = value
def writeln(self, line=''):
for line in line.split("\n"):
self.process_line(line)
def process_line(self, line):
if not self.writing_headers:
self.append_body(line)
elif not line:
self.writing_headers = False
else:
self.add_header(line)
class CGIApp:
def __call__(self, environ, start_response):
status = '200 OK'
context = CGIContext()
self.response(environ, context.writeln)
# print context.headers
start_response(status, context.headers.items())
return context.body
def response(self, environ, writeln):
writeln('Content-type: text/plain')
writeln()
writeln("""You should really implement your own response.
e.g.
class %s(cgi_wsgi.CGIApp):
def response(self, environ, writeln):
writeln('Content-type: text/html')
writeln()
writeln('<!DOCTYPE html>')
writeln('<p>Some response required</p>')
""" % self.__class__.__name__
) | {
"content_hash": "f8af7b68525210a83376e0736667f279",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 65,
"avg_line_length": 26.49122807017544,
"alnum_prop": 0.5668874172185431,
"repo_name": "timdiggins/cgi_wsgi",
"id": "65eb7a2b0021bb4a8c336fe1397a551f82f45b60",
"size": "1510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cgi_wsgi/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5636"
}
],
"symlink_target": ""
} |
import unittest
from ...worksheet import Worksheet
class TestRangeReturnValues(unittest.TestCase):
"""
Test the return value for various functions that handle 1 or 2D ranges.
"""
def test_range_return_values(self):
"""Test writing a worksheet with data out of bounds."""
worksheet = Worksheet()
max_row = 1048576
max_col = 16384
bound_error = -1
# Test some out of bound values.
got = worksheet.write_string(max_row, 0, 'Foo')
self.assertEqual(got, bound_error)
got = worksheet.write_string(0, max_col, 'Foo')
self.assertEqual(got, bound_error)
got = worksheet.write_string(max_row, max_col, 'Foo')
self.assertEqual(got, bound_error)
got = worksheet.write_number(max_row, 0, 123)
self.assertEqual(got, bound_error)
got = worksheet.write_number(0, max_col, 123)
self.assertEqual(got, bound_error)
got = worksheet.write_number(max_row, max_col, 123)
self.assertEqual(got, bound_error)
got = worksheet.write_blank(max_row, 0, None, 'format')
self.assertEqual(got, bound_error)
got = worksheet.write_blank(0, max_col, None, 'format')
self.assertEqual(got, bound_error)
got = worksheet.write_blank(max_row, max_col, None, 'format')
self.assertEqual(got, bound_error)
got = worksheet.write_formula(max_row, 0, '=A1')
self.assertEqual(got, bound_error)
got = worksheet.write_formula(0, max_col, '=A1')
self.assertEqual(got, bound_error)
got = worksheet.write_formula(max_row, max_col, '=A1')
self.assertEqual(got, bound_error)
got = worksheet.write_array_formula(0, 0, 0, max_col, '=A1')
self.assertEqual(got, bound_error)
got = worksheet.write_array_formula(0, 0, max_row, 0, '=A1')
self.assertEqual(got, bound_error)
got = worksheet.write_array_formula(0, max_col, 0, 0, '=A1')
self.assertEqual(got, bound_error)
got = worksheet.write_array_formula(max_row, 0, 0, 0, '=A1')
self.assertEqual(got, bound_error)
got = worksheet.write_array_formula(max_row, max_col, max_row, max_col, '=A1')
self.assertEqual(got, bound_error)
got = worksheet.merge_range(0, 0, 0, max_col, 'Foo')
self.assertEqual(got, bound_error)
got = worksheet.merge_range(0, 0, max_row, 0, 'Foo')
self.assertEqual(got, bound_error)
got = worksheet.merge_range(0, max_col, 0, 0, 'Foo')
self.assertEqual(got, bound_error)
got = worksheet.merge_range(max_row, 0, 0, 0, 'Foo')
self.assertEqual(got, bound_error)
# Column out of bounds.
got = worksheet.set_column(6, max_col, 17)
self.assertEqual(got, bound_error)
got = worksheet.set_column(max_col, 6, 17)
self.assertEqual(got, bound_error)
# Row out of bounds.
worksheet.set_row(max_row, 30)
# Reverse man and min column numbers
worksheet.set_column(0, 3, 17)
| {
"content_hash": "2cae7ea32834c17069c51243b9c09847",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 86,
"avg_line_length": 33.02150537634409,
"alnum_prop": 0.6125040703353957,
"repo_name": "jmcnamara/XlsxWriter",
"id": "3df12fb192d59d6c6bdb43a83d8f9c9e215bcbe8",
"size": "3284",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "xlsxwriter/test/worksheet/test_range_return_values.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7748"
},
{
"name": "Perl",
"bytes": "3503"
},
{
"name": "Python",
"bytes": "2807230"
},
{
"name": "Shell",
"bytes": "7964"
}
],
"symlink_target": ""
} |
import os
from data_utils import load_data
from keras.layers.core import Dense, Activation, Dropout, Flatten, Merge
from keras.models import Sequential
from numpy import array
from keras.callbacks import CSVLogger
from tensorflow.python.ops import control_flow_ops
from keras.callbacks import ModelCheckpoint, EarlyStopping
import tensorflow as tf
tf.python.control_flow_ops = tf
from keras import backend as K
training_data_path = 'data_training.csv'
valid_data_path = 'data_valid.csv'
def custom_objective(y_true, y_pred):
tensor = y_true - y_pred
squares = tf.square(tensor)
norm = tf.reduce_sum(squares)
return norm
def main():
model = Sequential()
# input dim, Re(U) represented as a vector , SU(8) length 64
# output dim 36,
# single step
model.add(Dense(output_dim=2000,input_dim=64))
model.add(Dense(4000,activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(4000,activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(input_dim=4000,output_dim=36))
train_input, train_output = load_data(training_data_path)
valid_input, valid_output = load_data(valid_data_path)
checkpoint = ModelCheckpoint('quant_model.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)
csv_logger = CSVLogger('training.log',separator=',', append=False)
model.compile(optimizer='adam',loss=custom_objective)
model.summary()
model.fit(train_input, train_output, validation_data=(valid_input,valid_output), nb_epoch=500, batch_size=64, callbacks=[checkpoint,csv_logger])
model.save('quant_model.h5')
if __name__ == '__main__':
main()
| {
"content_hash": "e2f38b93b621028c3e96156ba98b2e5e",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 150,
"avg_line_length": 29.310344827586206,
"alnum_prop": 0.7047058823529412,
"repo_name": "Swaddle/nnQcompiler",
"id": "6aca27cfa46982bd52f77f84187fe17bfd48c366",
"size": "1701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "su8/single_segment/train.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mathematica",
"bytes": "161906"
},
{
"name": "Python",
"bytes": "7065"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from bcc import BPF
import argparse
from time import strftime
from socket import inet_ntop, AF_INET, AF_INET6
from struct import pack
from time import sleep
from bcc import tcp
# arguments
examples = """examples:
./tcpdrop # trace kernel TCP drops
"""
parser = argparse.ArgumentParser(
description="Trace TCP drops by the kernel",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <uapi/linux/tcp.h>
#include <uapi/linux/ip.h>
#include <net/sock.h>
#include <bcc/proto.h>
BPF_STACK_TRACE(stack_traces, 1024);
// separate data structs for ipv4 and ipv6
struct ipv4_data_t {
u32 pid;
u64 ip;
u32 saddr;
u32 daddr;
u16 sport;
u16 dport;
u8 state;
u8 tcpflags;
u32 stack_id;
};
BPF_PERF_OUTPUT(ipv4_events);
struct ipv6_data_t {
u32 pid;
u64 ip;
unsigned __int128 saddr;
unsigned __int128 daddr;
u16 sport;
u16 dport;
u8 state;
u8 tcpflags;
u32 stack_id;
};
BPF_PERF_OUTPUT(ipv6_events);
static struct tcphdr *skb_to_tcphdr(const struct sk_buff *skb)
{
// unstable API. verify logic in tcp_hdr() -> skb_transport_header().
return (struct tcphdr *)(skb->head + skb->transport_header);
}
static inline struct iphdr *skb_to_iphdr(const struct sk_buff *skb)
{
// unstable API. verify logic in ip_hdr() -> skb_network_header().
return (struct iphdr *)(skb->head + skb->network_header);
}
// from include/net/tcp.h:
#ifndef tcp_flag_byte
#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
#endif
int trace_tcp_drop(struct pt_regs *ctx, struct sock *sk, struct sk_buff *skb)
{
if (sk == NULL)
return 0;
u32 pid = bpf_get_current_pid_tgid();
// pull in details from the packet headers and the sock struct
u16 family = sk->__sk_common.skc_family;
char state = sk->__sk_common.skc_state;
u16 sport = 0, dport = 0;
struct tcphdr *tcp = skb_to_tcphdr(skb);
struct iphdr *ip = skb_to_iphdr(skb);
u8 tcpflags = ((u_int8_t *)tcp)[13];
sport = tcp->source;
dport = tcp->dest;
sport = ntohs(sport);
dport = ntohs(dport);
if (family == AF_INET) {
struct ipv4_data_t data4 = {};
data4.pid = pid;
data4.ip = 4;
data4.saddr = ip->saddr;
data4.daddr = ip->daddr;
data4.dport = dport;
data4.sport = sport;
data4.state = state;
data4.tcpflags = tcpflags;
data4.stack_id = stack_traces.get_stackid(ctx, 0);
ipv4_events.perf_submit(ctx, &data4, sizeof(data4));
} else if (family == AF_INET6) {
struct ipv6_data_t data6 = {};
data6.pid = pid;
data6.ip = 6;
bpf_probe_read(&data6.saddr, sizeof(data6.saddr),
sk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
bpf_probe_read(&data6.daddr, sizeof(data6.daddr),
sk->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
data6.dport = dport;
data6.sport = sport;
data6.state = state;
data6.tcpflags = tcpflags;
data6.stack_id = stack_traces.get_stackid(ctx, 0);
ipv6_events.perf_submit(ctx, &data6, sizeof(data6));
}
// else drop
return 0;
}
"""
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# process event
def print_ipv4_event(cpu, data, size):
event = b["ipv4_events"].event(data)
print("%-8s %-6d %-2d %-20s > %-20s %s (%s)" % (
strftime("%H:%M:%S"), event.pid, event.ip,
"%s:%d" % (inet_ntop(AF_INET, pack('I', event.saddr)), event.sport),
"%s:%s" % (inet_ntop(AF_INET, pack('I', event.daddr)), event.dport),
tcp.tcpstate[event.state], tcp.flags2str(event.tcpflags)))
for addr in stack_traces.walk(event.stack_id):
sym = b.ksym(addr, show_offset=True)
print("\t%s" % sym)
print("")
def print_ipv6_event(cpu, data, size):
event = b["ipv6_events"].event(data)
print("%-8s %-6d %-2d %-20s > %-20s %s (%s)" % (
strftime("%H:%M:%S"), event.pid, event.ip,
"%s:%d" % (inet_ntop(AF_INET6, event.saddr), event.sport),
"%s:%d" % (inet_ntop(AF_INET6, event.daddr), event.dport),
tcp.tcpstate[event.state], tcp.flags2str(event.tcpflags)))
for addr in stack_traces.walk(event.stack_id):
sym = b.ksym(addr, show_offset=True)
print("\t%s" % sym)
print("")
# initialize BPF
b = BPF(text=bpf_text)
if b.get_kprobe_functions(b"tcp_drop"):
b.attach_kprobe(event="tcp_drop", fn_name="trace_tcp_drop")
else:
print("ERROR: tcp_drop() kernel function not found or traceable. "
"Older kernel versions not supported.")
exit()
stack_traces = b.get_table("stack_traces")
# header
print("%-8s %-6s %-2s %-20s > %-20s %s (%s)" % ("TIME", "PID", "IP",
"SADDR:SPORT", "DADDR:DPORT", "STATE", "FLAGS"))
# read events
b["ipv4_events"].open_perf_buffer(print_ipv4_event)
b["ipv6_events"].open_perf_buffer(print_ipv6_event)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
| {
"content_hash": "cfdf89e8037034978bfb43f0dc964c5f",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 77,
"avg_line_length": 29.072222222222223,
"alnum_prop": 0.6124593923179821,
"repo_name": "mcaleavya/bcc",
"id": "bf8634df68b88dca4762530fed1a9685fb96a688",
"size": "5839",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/tcpdrop.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "321768"
},
{
"name": "C++",
"bytes": "920975"
},
{
"name": "CMake",
"bytes": "38841"
},
{
"name": "HTML",
"bytes": "2979"
},
{
"name": "LLVM",
"bytes": "4379"
},
{
"name": "Limbo",
"bytes": "6069"
},
{
"name": "Lua",
"bytes": "298149"
},
{
"name": "Makefile",
"bytes": "1481"
},
{
"name": "P4",
"bytes": "9242"
},
{
"name": "Python",
"bytes": "1206933"
},
{
"name": "Shell",
"bytes": "17023"
},
{
"name": "Yacc",
"bytes": "19817"
}
],
"symlink_target": ""
} |
def get_total_seconds(timedelta):
"""
Backported for Python < 2.7
See http://docs.python.org/library/datetime.html.
"""
return ((timedelta.microseconds + (
timedelta.seconds +
timedelta.days * 24 * 60 * 60
) * 10**6) / float(10**6))
| {
"content_hash": "b37218838aecafbb615431e0aba87f4e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 53,
"avg_line_length": 26.09090909090909,
"alnum_prop": 0.5574912891986062,
"repo_name": "lukaszb/porunga",
"id": "f33fdcebe1dc6c3a1b82959ba163aad5704f0dbc",
"size": "288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "porunga/utils/backports.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "28729"
},
{
"name": "Shell",
"bytes": "280"
}
],
"symlink_target": ""
} |
from .control import *
# Little demo on how it could be used.
if __name__ == '__main__':
a = I2PController()
print(a.get_network_settings())
vals = a.get_router_info()
print(''.join([
'You are running i2p version ', str(vals['i2p.router.version']), '. ',
'It has been up for ', str(vals['i2p.router.uptime']), 'ms. ',
'Your router knows ', str(vals['i2p.router.netdb.knownpeers']),' peers.'
]))
| {
"content_hash": "836827ea1985e3b8a86cec66ac4bfcf3",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 80,
"avg_line_length": 37,
"alnum_prop": 0.5743243243243243,
"repo_name": "chris-barry/i2py",
"id": "d557ccdccd6be77ab81dea49cfe1b0b64419a27e",
"size": "545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "i2py/control/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "69781"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Data(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.template"
_path_str = "layout.template.data"
_valid_props = {
"bar",
"barpolar",
"box",
"candlestick",
"carpet",
"choropleth",
"choroplethmapbox",
"cone",
"contour",
"contourcarpet",
"densitymapbox",
"funnel",
"funnelarea",
"heatmap",
"heatmapgl",
"histogram",
"histogram2d",
"histogram2dcontour",
"icicle",
"image",
"indicator",
"isosurface",
"mesh3d",
"ohlc",
"parcats",
"parcoords",
"pie",
"pointcloud",
"sankey",
"scatter",
"scatter3d",
"scattercarpet",
"scattergeo",
"scattergl",
"scattermapbox",
"scatterpolar",
"scatterpolargl",
"scattersmith",
"scatterternary",
"splom",
"streamtube",
"sunburst",
"surface",
"table",
"treemap",
"violin",
"volume",
"waterfall",
}
# barpolar
# --------
@property
def barpolar(self):
"""
The 'barpolar' property is a tuple of instances of
Barpolar that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Barpolar
- A list or tuple of dicts of string/value properties that
will be passed to the Barpolar constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Barpolar]
"""
return self["barpolar"]
@barpolar.setter
def barpolar(self, val):
self["barpolar"] = val
# bar
# ---
@property
def bar(self):
"""
The 'bar' property is a tuple of instances of
Bar that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Bar
- A list or tuple of dicts of string/value properties that
will be passed to the Bar constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Bar]
"""
return self["bar"]
@bar.setter
def bar(self, val):
self["bar"] = val
# box
# ---
@property
def box(self):
"""
The 'box' property is a tuple of instances of
Box that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Box
- A list or tuple of dicts of string/value properties that
will be passed to the Box constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Box]
"""
return self["box"]
@box.setter
def box(self, val):
self["box"] = val
# candlestick
# -----------
@property
def candlestick(self):
"""
The 'candlestick' property is a tuple of instances of
Candlestick that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Candlestick
- A list or tuple of dicts of string/value properties that
will be passed to the Candlestick constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Candlestick]
"""
return self["candlestick"]
@candlestick.setter
def candlestick(self, val):
self["candlestick"] = val
# carpet
# ------
@property
def carpet(self):
"""
The 'carpet' property is a tuple of instances of
Carpet that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Carpet
- A list or tuple of dicts of string/value properties that
will be passed to the Carpet constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Carpet]
"""
return self["carpet"]
@carpet.setter
def carpet(self, val):
self["carpet"] = val
# choroplethmapbox
# ----------------
@property
def choroplethmapbox(self):
"""
The 'choroplethmapbox' property is a tuple of instances of
Choroplethmapbox that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Choroplethmapbox
- A list or tuple of dicts of string/value properties that
will be passed to the Choroplethmapbox constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Choroplethmapbox]
"""
return self["choroplethmapbox"]
@choroplethmapbox.setter
def choroplethmapbox(self, val):
self["choroplethmapbox"] = val
# choropleth
# ----------
@property
def choropleth(self):
"""
The 'choropleth' property is a tuple of instances of
Choropleth that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Choropleth
- A list or tuple of dicts of string/value properties that
will be passed to the Choropleth constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Choropleth]
"""
return self["choropleth"]
@choropleth.setter
def choropleth(self, val):
self["choropleth"] = val
# cone
# ----
@property
def cone(self):
"""
The 'cone' property is a tuple of instances of
Cone that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Cone
- A list or tuple of dicts of string/value properties that
will be passed to the Cone constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Cone]
"""
return self["cone"]
@cone.setter
def cone(self, val):
self["cone"] = val
# contourcarpet
# -------------
@property
def contourcarpet(self):
"""
The 'contourcarpet' property is a tuple of instances of
Contourcarpet that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Contourcarpet
- A list or tuple of dicts of string/value properties that
will be passed to the Contourcarpet constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Contourcarpet]
"""
return self["contourcarpet"]
@contourcarpet.setter
def contourcarpet(self, val):
self["contourcarpet"] = val
# contour
# -------
@property
def contour(self):
"""
The 'contour' property is a tuple of instances of
Contour that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Contour
- A list or tuple of dicts of string/value properties that
will be passed to the Contour constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Contour]
"""
return self["contour"]
@contour.setter
def contour(self, val):
self["contour"] = val
# densitymapbox
# -------------
@property
def densitymapbox(self):
"""
The 'densitymapbox' property is a tuple of instances of
Densitymapbox that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Densitymapbox
- A list or tuple of dicts of string/value properties that
will be passed to the Densitymapbox constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Densitymapbox]
"""
return self["densitymapbox"]
@densitymapbox.setter
def densitymapbox(self, val):
self["densitymapbox"] = val
# funnelarea
# ----------
@property
def funnelarea(self):
"""
The 'funnelarea' property is a tuple of instances of
Funnelarea that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Funnelarea
- A list or tuple of dicts of string/value properties that
will be passed to the Funnelarea constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Funnelarea]
"""
return self["funnelarea"]
@funnelarea.setter
def funnelarea(self, val):
self["funnelarea"] = val
# funnel
# ------
@property
def funnel(self):
"""
The 'funnel' property is a tuple of instances of
Funnel that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Funnel
- A list or tuple of dicts of string/value properties that
will be passed to the Funnel constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Funnel]
"""
return self["funnel"]
@funnel.setter
def funnel(self, val):
self["funnel"] = val
# heatmapgl
# ---------
@property
def heatmapgl(self):
"""
The 'heatmapgl' property is a tuple of instances of
Heatmapgl that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Heatmapgl
- A list or tuple of dicts of string/value properties that
will be passed to the Heatmapgl constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Heatmapgl]
"""
return self["heatmapgl"]
@heatmapgl.setter
def heatmapgl(self, val):
self["heatmapgl"] = val
# heatmap
# -------
@property
def heatmap(self):
"""
The 'heatmap' property is a tuple of instances of
Heatmap that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Heatmap
- A list or tuple of dicts of string/value properties that
will be passed to the Heatmap constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Heatmap]
"""
return self["heatmap"]
@heatmap.setter
def heatmap(self, val):
self["heatmap"] = val
# histogram2dcontour
# ------------------
@property
def histogram2dcontour(self):
"""
The 'histogram2dcontour' property is a tuple of instances of
Histogram2dContour that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Histogram2dContour
- A list or tuple of dicts of string/value properties that
will be passed to the Histogram2dContour constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Histogram2dContour]
"""
return self["histogram2dcontour"]
@histogram2dcontour.setter
def histogram2dcontour(self, val):
self["histogram2dcontour"] = val
# histogram2d
# -----------
@property
def histogram2d(self):
"""
The 'histogram2d' property is a tuple of instances of
Histogram2d that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Histogram2d
- A list or tuple of dicts of string/value properties that
will be passed to the Histogram2d constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Histogram2d]
"""
return self["histogram2d"]
@histogram2d.setter
def histogram2d(self, val):
self["histogram2d"] = val
# histogram
# ---------
@property
def histogram(self):
"""
The 'histogram' property is a tuple of instances of
Histogram that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Histogram
- A list or tuple of dicts of string/value properties that
will be passed to the Histogram constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Histogram]
"""
return self["histogram"]
@histogram.setter
def histogram(self, val):
self["histogram"] = val
# icicle
# ------
@property
def icicle(self):
"""
The 'icicle' property is a tuple of instances of
Icicle that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Icicle
- A list or tuple of dicts of string/value properties that
will be passed to the Icicle constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Icicle]
"""
return self["icicle"]
@icicle.setter
def icicle(self, val):
self["icicle"] = val
# image
# -----
@property
def image(self):
"""
The 'image' property is a tuple of instances of
Image that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Image
- A list or tuple of dicts of string/value properties that
will be passed to the Image constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Image]
"""
return self["image"]
@image.setter
def image(self, val):
self["image"] = val
# indicator
# ---------
@property
def indicator(self):
"""
The 'indicator' property is a tuple of instances of
Indicator that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Indicator
- A list or tuple of dicts of string/value properties that
will be passed to the Indicator constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Indicator]
"""
return self["indicator"]
@indicator.setter
def indicator(self, val):
self["indicator"] = val
# isosurface
# ----------
@property
def isosurface(self):
"""
The 'isosurface' property is a tuple of instances of
Isosurface that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Isosurface
- A list or tuple of dicts of string/value properties that
will be passed to the Isosurface constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Isosurface]
"""
return self["isosurface"]
@isosurface.setter
def isosurface(self, val):
self["isosurface"] = val
# mesh3d
# ------
@property
def mesh3d(self):
"""
The 'mesh3d' property is a tuple of instances of
Mesh3d that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Mesh3d
- A list or tuple of dicts of string/value properties that
will be passed to the Mesh3d constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Mesh3d]
"""
return self["mesh3d"]
@mesh3d.setter
def mesh3d(self, val):
self["mesh3d"] = val
# ohlc
# ----
@property
def ohlc(self):
"""
The 'ohlc' property is a tuple of instances of
Ohlc that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Ohlc
- A list or tuple of dicts of string/value properties that
will be passed to the Ohlc constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Ohlc]
"""
return self["ohlc"]
@ohlc.setter
def ohlc(self, val):
self["ohlc"] = val
# parcats
# -------
@property
def parcats(self):
"""
The 'parcats' property is a tuple of instances of
Parcats that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Parcats
- A list or tuple of dicts of string/value properties that
will be passed to the Parcats constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Parcats]
"""
return self["parcats"]
@parcats.setter
def parcats(self, val):
self["parcats"] = val
# parcoords
# ---------
@property
def parcoords(self):
"""
The 'parcoords' property is a tuple of instances of
Parcoords that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Parcoords
- A list or tuple of dicts of string/value properties that
will be passed to the Parcoords constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Parcoords]
"""
return self["parcoords"]
@parcoords.setter
def parcoords(self, val):
self["parcoords"] = val
# pie
# ---
@property
def pie(self):
"""
The 'pie' property is a tuple of instances of
Pie that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Pie
- A list or tuple of dicts of string/value properties that
will be passed to the Pie constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Pie]
"""
return self["pie"]
@pie.setter
def pie(self, val):
self["pie"] = val
# pointcloud
# ----------
@property
def pointcloud(self):
"""
The 'pointcloud' property is a tuple of instances of
Pointcloud that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Pointcloud
- A list or tuple of dicts of string/value properties that
will be passed to the Pointcloud constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Pointcloud]
"""
return self["pointcloud"]
@pointcloud.setter
def pointcloud(self, val):
self["pointcloud"] = val
# sankey
# ------
@property
def sankey(self):
"""
The 'sankey' property is a tuple of instances of
Sankey that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Sankey
- A list or tuple of dicts of string/value properties that
will be passed to the Sankey constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Sankey]
"""
return self["sankey"]
@sankey.setter
def sankey(self, val):
self["sankey"] = val
# scatter3d
# ---------
@property
def scatter3d(self):
"""
The 'scatter3d' property is a tuple of instances of
Scatter3d that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scatter3d
- A list or tuple of dicts of string/value properties that
will be passed to the Scatter3d constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scatter3d]
"""
return self["scatter3d"]
@scatter3d.setter
def scatter3d(self, val):
self["scatter3d"] = val
# scattercarpet
# -------------
@property
def scattercarpet(self):
"""
The 'scattercarpet' property is a tuple of instances of
Scattercarpet that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scattercarpet
- A list or tuple of dicts of string/value properties that
will be passed to the Scattercarpet constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scattercarpet]
"""
return self["scattercarpet"]
@scattercarpet.setter
def scattercarpet(self, val):
self["scattercarpet"] = val
# scattergeo
# ----------
@property
def scattergeo(self):
"""
The 'scattergeo' property is a tuple of instances of
Scattergeo that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scattergeo
- A list or tuple of dicts of string/value properties that
will be passed to the Scattergeo constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scattergeo]
"""
return self["scattergeo"]
@scattergeo.setter
def scattergeo(self, val):
self["scattergeo"] = val
# scattergl
# ---------
@property
def scattergl(self):
"""
The 'scattergl' property is a tuple of instances of
Scattergl that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scattergl
- A list or tuple of dicts of string/value properties that
will be passed to the Scattergl constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scattergl]
"""
return self["scattergl"]
@scattergl.setter
def scattergl(self, val):
self["scattergl"] = val
# scattermapbox
# -------------
@property
def scattermapbox(self):
"""
The 'scattermapbox' property is a tuple of instances of
Scattermapbox that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scattermapbox
- A list or tuple of dicts of string/value properties that
will be passed to the Scattermapbox constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scattermapbox]
"""
return self["scattermapbox"]
@scattermapbox.setter
def scattermapbox(self, val):
self["scattermapbox"] = val
# scatterpolargl
# --------------
@property
def scatterpolargl(self):
"""
The 'scatterpolargl' property is a tuple of instances of
Scatterpolargl that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scatterpolargl
- A list or tuple of dicts of string/value properties that
will be passed to the Scatterpolargl constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scatterpolargl]
"""
return self["scatterpolargl"]
@scatterpolargl.setter
def scatterpolargl(self, val):
self["scatterpolargl"] = val
# scatterpolar
# ------------
@property
def scatterpolar(self):
"""
The 'scatterpolar' property is a tuple of instances of
Scatterpolar that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scatterpolar
- A list or tuple of dicts of string/value properties that
will be passed to the Scatterpolar constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scatterpolar]
"""
return self["scatterpolar"]
@scatterpolar.setter
def scatterpolar(self, val):
self["scatterpolar"] = val
# scatter
# -------
@property
def scatter(self):
"""
The 'scatter' property is a tuple of instances of
Scatter that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scatter
- A list or tuple of dicts of string/value properties that
will be passed to the Scatter constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scatter]
"""
return self["scatter"]
@scatter.setter
def scatter(self, val):
self["scatter"] = val
# scattersmith
# ------------
@property
def scattersmith(self):
"""
The 'scattersmith' property is a tuple of instances of
Scattersmith that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scattersmith
- A list or tuple of dicts of string/value properties that
will be passed to the Scattersmith constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scattersmith]
"""
return self["scattersmith"]
@scattersmith.setter
def scattersmith(self, val):
self["scattersmith"] = val
# scatterternary
# --------------
@property
def scatterternary(self):
"""
The 'scatterternary' property is a tuple of instances of
Scatterternary that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scatterternary
- A list or tuple of dicts of string/value properties that
will be passed to the Scatterternary constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scatterternary]
"""
return self["scatterternary"]
@scatterternary.setter
def scatterternary(self, val):
self["scatterternary"] = val
# splom
# -----
@property
def splom(self):
"""
The 'splom' property is a tuple of instances of
Splom that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Splom
- A list or tuple of dicts of string/value properties that
will be passed to the Splom constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Splom]
"""
return self["splom"]
@splom.setter
def splom(self, val):
self["splom"] = val
# streamtube
# ----------
@property
def streamtube(self):
"""
The 'streamtube' property is a tuple of instances of
Streamtube that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Streamtube
- A list or tuple of dicts of string/value properties that
will be passed to the Streamtube constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Streamtube]
"""
return self["streamtube"]
@streamtube.setter
def streamtube(self, val):
self["streamtube"] = val
# sunburst
# --------
@property
def sunburst(self):
"""
The 'sunburst' property is a tuple of instances of
Sunburst that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Sunburst
- A list or tuple of dicts of string/value properties that
will be passed to the Sunburst constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Sunburst]
"""
return self["sunburst"]
@sunburst.setter
def sunburst(self, val):
self["sunburst"] = val
# surface
# -------
@property
def surface(self):
"""
The 'surface' property is a tuple of instances of
Surface that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Surface
- A list or tuple of dicts of string/value properties that
will be passed to the Surface constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Surface]
"""
return self["surface"]
@surface.setter
def surface(self, val):
self["surface"] = val
# table
# -----
@property
def table(self):
"""
The 'table' property is a tuple of instances of
Table that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Table
- A list or tuple of dicts of string/value properties that
will be passed to the Table constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Table]
"""
return self["table"]
@table.setter
def table(self, val):
self["table"] = val
# treemap
# -------
@property
def treemap(self):
"""
The 'treemap' property is a tuple of instances of
Treemap that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Treemap
- A list or tuple of dicts of string/value properties that
will be passed to the Treemap constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Treemap]
"""
return self["treemap"]
@treemap.setter
def treemap(self, val):
self["treemap"] = val
# violin
# ------
@property
def violin(self):
"""
The 'violin' property is a tuple of instances of
Violin that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Violin
- A list or tuple of dicts of string/value properties that
will be passed to the Violin constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Violin]
"""
return self["violin"]
@violin.setter
def violin(self, val):
self["violin"] = val
# volume
# ------
@property
def volume(self):
"""
The 'volume' property is a tuple of instances of
Volume that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Volume
- A list or tuple of dicts of string/value properties that
will be passed to the Volume constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Volume]
"""
return self["volume"]
@volume.setter
def volume(self, val):
self["volume"] = val
# waterfall
# ---------
@property
def waterfall(self):
"""
The 'waterfall' property is a tuple of instances of
Waterfall that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Waterfall
- A list or tuple of dicts of string/value properties that
will be passed to the Waterfall constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Waterfall]
"""
return self["waterfall"]
@waterfall.setter
def waterfall(self, val):
self["waterfall"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
barpolar
A tuple of :class:`plotly.graph_objects.Barpolar`
instances or dicts with compatible properties
bar
A tuple of :class:`plotly.graph_objects.Bar` instances
or dicts with compatible properties
box
A tuple of :class:`plotly.graph_objects.Box` instances
or dicts with compatible properties
candlestick
A tuple of :class:`plotly.graph_objects.Candlestick`
instances or dicts with compatible properties
carpet
A tuple of :class:`plotly.graph_objects.Carpet`
instances or dicts with compatible properties
choroplethmapbox
A tuple of
:class:`plotly.graph_objects.Choroplethmapbox`
instances or dicts with compatible properties
choropleth
A tuple of :class:`plotly.graph_objects.Choropleth`
instances or dicts with compatible properties
cone
A tuple of :class:`plotly.graph_objects.Cone` instances
or dicts with compatible properties
contourcarpet
A tuple of :class:`plotly.graph_objects.Contourcarpet`
instances or dicts with compatible properties
contour
A tuple of :class:`plotly.graph_objects.Contour`
instances or dicts with compatible properties
densitymapbox
A tuple of :class:`plotly.graph_objects.Densitymapbox`
instances or dicts with compatible properties
funnelarea
A tuple of :class:`plotly.graph_objects.Funnelarea`
instances or dicts with compatible properties
funnel
A tuple of :class:`plotly.graph_objects.Funnel`
instances or dicts with compatible properties
heatmapgl
A tuple of :class:`plotly.graph_objects.Heatmapgl`
instances or dicts with compatible properties
heatmap
A tuple of :class:`plotly.graph_objects.Heatmap`
instances or dicts with compatible properties
histogram2dcontour
A tuple of
:class:`plotly.graph_objects.Histogram2dContour`
instances or dicts with compatible properties
histogram2d
A tuple of :class:`plotly.graph_objects.Histogram2d`
instances or dicts with compatible properties
histogram
A tuple of :class:`plotly.graph_objects.Histogram`
instances or dicts with compatible properties
icicle
A tuple of :class:`plotly.graph_objects.Icicle`
instances or dicts with compatible properties
image
A tuple of :class:`plotly.graph_objects.Image`
instances or dicts with compatible properties
indicator
A tuple of :class:`plotly.graph_objects.Indicator`
instances or dicts with compatible properties
isosurface
A tuple of :class:`plotly.graph_objects.Isosurface`
instances or dicts with compatible properties
mesh3d
A tuple of :class:`plotly.graph_objects.Mesh3d`
instances or dicts with compatible properties
ohlc
A tuple of :class:`plotly.graph_objects.Ohlc` instances
or dicts with compatible properties
parcats
A tuple of :class:`plotly.graph_objects.Parcats`
instances or dicts with compatible properties
parcoords
A tuple of :class:`plotly.graph_objects.Parcoords`
instances or dicts with compatible properties
pie
A tuple of :class:`plotly.graph_objects.Pie` instances
or dicts with compatible properties
pointcloud
A tuple of :class:`plotly.graph_objects.Pointcloud`
instances or dicts with compatible properties
sankey
A tuple of :class:`plotly.graph_objects.Sankey`
instances or dicts with compatible properties
scatter3d
A tuple of :class:`plotly.graph_objects.Scatter3d`
instances or dicts with compatible properties
scattercarpet
A tuple of :class:`plotly.graph_objects.Scattercarpet`
instances or dicts with compatible properties
scattergeo
A tuple of :class:`plotly.graph_objects.Scattergeo`
instances or dicts with compatible properties
scattergl
A tuple of :class:`plotly.graph_objects.Scattergl`
instances or dicts with compatible properties
scattermapbox
A tuple of :class:`plotly.graph_objects.Scattermapbox`
instances or dicts with compatible properties
scatterpolargl
A tuple of :class:`plotly.graph_objects.Scatterpolargl`
instances or dicts with compatible properties
scatterpolar
A tuple of :class:`plotly.graph_objects.Scatterpolar`
instances or dicts with compatible properties
scatter
A tuple of :class:`plotly.graph_objects.Scatter`
instances or dicts with compatible properties
scattersmith
A tuple of :class:`plotly.graph_objects.Scattersmith`
instances or dicts with compatible properties
scatterternary
A tuple of :class:`plotly.graph_objects.Scatterternary`
instances or dicts with compatible properties
splom
A tuple of :class:`plotly.graph_objects.Splom`
instances or dicts with compatible properties
streamtube
A tuple of :class:`plotly.graph_objects.Streamtube`
instances or dicts with compatible properties
sunburst
A tuple of :class:`plotly.graph_objects.Sunburst`
instances or dicts with compatible properties
surface
A tuple of :class:`plotly.graph_objects.Surface`
instances or dicts with compatible properties
table
A tuple of :class:`plotly.graph_objects.Table`
instances or dicts with compatible properties
treemap
A tuple of :class:`plotly.graph_objects.Treemap`
instances or dicts with compatible properties
violin
A tuple of :class:`plotly.graph_objects.Violin`
instances or dicts with compatible properties
volume
A tuple of :class:`plotly.graph_objects.Volume`
instances or dicts with compatible properties
waterfall
A tuple of :class:`plotly.graph_objects.Waterfall`
instances or dicts with compatible properties
"""
def __init__(
self,
arg=None,
barpolar=None,
bar=None,
box=None,
candlestick=None,
carpet=None,
choroplethmapbox=None,
choropleth=None,
cone=None,
contourcarpet=None,
contour=None,
densitymapbox=None,
funnelarea=None,
funnel=None,
heatmapgl=None,
heatmap=None,
histogram2dcontour=None,
histogram2d=None,
histogram=None,
icicle=None,
image=None,
indicator=None,
isosurface=None,
mesh3d=None,
ohlc=None,
parcats=None,
parcoords=None,
pie=None,
pointcloud=None,
sankey=None,
scatter3d=None,
scattercarpet=None,
scattergeo=None,
scattergl=None,
scattermapbox=None,
scatterpolargl=None,
scatterpolar=None,
scatter=None,
scattersmith=None,
scatterternary=None,
splom=None,
streamtube=None,
sunburst=None,
surface=None,
table=None,
treemap=None,
violin=None,
volume=None,
waterfall=None,
**kwargs,
):
"""
Construct a new Data object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.template.Data`
barpolar
A tuple of :class:`plotly.graph_objects.Barpolar`
instances or dicts with compatible properties
bar
A tuple of :class:`plotly.graph_objects.Bar` instances
or dicts with compatible properties
box
A tuple of :class:`plotly.graph_objects.Box` instances
or dicts with compatible properties
candlestick
A tuple of :class:`plotly.graph_objects.Candlestick`
instances or dicts with compatible properties
carpet
A tuple of :class:`plotly.graph_objects.Carpet`
instances or dicts with compatible properties
choroplethmapbox
A tuple of
:class:`plotly.graph_objects.Choroplethmapbox`
instances or dicts with compatible properties
choropleth
A tuple of :class:`plotly.graph_objects.Choropleth`
instances or dicts with compatible properties
cone
A tuple of :class:`plotly.graph_objects.Cone` instances
or dicts with compatible properties
contourcarpet
A tuple of :class:`plotly.graph_objects.Contourcarpet`
instances or dicts with compatible properties
contour
A tuple of :class:`plotly.graph_objects.Contour`
instances or dicts with compatible properties
densitymapbox
A tuple of :class:`plotly.graph_objects.Densitymapbox`
instances or dicts with compatible properties
funnelarea
A tuple of :class:`plotly.graph_objects.Funnelarea`
instances or dicts with compatible properties
funnel
A tuple of :class:`plotly.graph_objects.Funnel`
instances or dicts with compatible properties
heatmapgl
A tuple of :class:`plotly.graph_objects.Heatmapgl`
instances or dicts with compatible properties
heatmap
A tuple of :class:`plotly.graph_objects.Heatmap`
instances or dicts with compatible properties
histogram2dcontour
A tuple of
:class:`plotly.graph_objects.Histogram2dContour`
instances or dicts with compatible properties
histogram2d
A tuple of :class:`plotly.graph_objects.Histogram2d`
instances or dicts with compatible properties
histogram
A tuple of :class:`plotly.graph_objects.Histogram`
instances or dicts with compatible properties
icicle
A tuple of :class:`plotly.graph_objects.Icicle`
instances or dicts with compatible properties
image
A tuple of :class:`plotly.graph_objects.Image`
instances or dicts with compatible properties
indicator
A tuple of :class:`plotly.graph_objects.Indicator`
instances or dicts with compatible properties
isosurface
A tuple of :class:`plotly.graph_objects.Isosurface`
instances or dicts with compatible properties
mesh3d
A tuple of :class:`plotly.graph_objects.Mesh3d`
instances or dicts with compatible properties
ohlc
A tuple of :class:`plotly.graph_objects.Ohlc` instances
or dicts with compatible properties
parcats
A tuple of :class:`plotly.graph_objects.Parcats`
instances or dicts with compatible properties
parcoords
A tuple of :class:`plotly.graph_objects.Parcoords`
instances or dicts with compatible properties
pie
A tuple of :class:`plotly.graph_objects.Pie` instances
or dicts with compatible properties
pointcloud
A tuple of :class:`plotly.graph_objects.Pointcloud`
instances or dicts with compatible properties
sankey
A tuple of :class:`plotly.graph_objects.Sankey`
instances or dicts with compatible properties
scatter3d
A tuple of :class:`plotly.graph_objects.Scatter3d`
instances or dicts with compatible properties
scattercarpet
A tuple of :class:`plotly.graph_objects.Scattercarpet`
instances or dicts with compatible properties
scattergeo
A tuple of :class:`plotly.graph_objects.Scattergeo`
instances or dicts with compatible properties
scattergl
A tuple of :class:`plotly.graph_objects.Scattergl`
instances or dicts with compatible properties
scattermapbox
A tuple of :class:`plotly.graph_objects.Scattermapbox`
instances or dicts with compatible properties
scatterpolargl
A tuple of :class:`plotly.graph_objects.Scatterpolargl`
instances or dicts with compatible properties
scatterpolar
A tuple of :class:`plotly.graph_objects.Scatterpolar`
instances or dicts with compatible properties
scatter
A tuple of :class:`plotly.graph_objects.Scatter`
instances or dicts with compatible properties
scattersmith
A tuple of :class:`plotly.graph_objects.Scattersmith`
instances or dicts with compatible properties
scatterternary
A tuple of :class:`plotly.graph_objects.Scatterternary`
instances or dicts with compatible properties
splom
A tuple of :class:`plotly.graph_objects.Splom`
instances or dicts with compatible properties
streamtube
A tuple of :class:`plotly.graph_objects.Streamtube`
instances or dicts with compatible properties
sunburst
A tuple of :class:`plotly.graph_objects.Sunburst`
instances or dicts with compatible properties
surface
A tuple of :class:`plotly.graph_objects.Surface`
instances or dicts with compatible properties
table
A tuple of :class:`plotly.graph_objects.Table`
instances or dicts with compatible properties
treemap
A tuple of :class:`plotly.graph_objects.Treemap`
instances or dicts with compatible properties
violin
A tuple of :class:`plotly.graph_objects.Violin`
instances or dicts with compatible properties
volume
A tuple of :class:`plotly.graph_objects.Volume`
instances or dicts with compatible properties
waterfall
A tuple of :class:`plotly.graph_objects.Waterfall`
instances or dicts with compatible properties
Returns
-------
Data
"""
super(Data, self).__init__("data")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.template.Data
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.template.Data`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("barpolar", None)
_v = barpolar if barpolar is not None else _v
if _v is not None:
self["barpolar"] = _v
_v = arg.pop("bar", None)
_v = bar if bar is not None else _v
if _v is not None:
self["bar"] = _v
_v = arg.pop("box", None)
_v = box if box is not None else _v
if _v is not None:
self["box"] = _v
_v = arg.pop("candlestick", None)
_v = candlestick if candlestick is not None else _v
if _v is not None:
self["candlestick"] = _v
_v = arg.pop("carpet", None)
_v = carpet if carpet is not None else _v
if _v is not None:
self["carpet"] = _v
_v = arg.pop("choroplethmapbox", None)
_v = choroplethmapbox if choroplethmapbox is not None else _v
if _v is not None:
self["choroplethmapbox"] = _v
_v = arg.pop("choropleth", None)
_v = choropleth if choropleth is not None else _v
if _v is not None:
self["choropleth"] = _v
_v = arg.pop("cone", None)
_v = cone if cone is not None else _v
if _v is not None:
self["cone"] = _v
_v = arg.pop("contourcarpet", None)
_v = contourcarpet if contourcarpet is not None else _v
if _v is not None:
self["contourcarpet"] = _v
_v = arg.pop("contour", None)
_v = contour if contour is not None else _v
if _v is not None:
self["contour"] = _v
_v = arg.pop("densitymapbox", None)
_v = densitymapbox if densitymapbox is not None else _v
if _v is not None:
self["densitymapbox"] = _v
_v = arg.pop("funnelarea", None)
_v = funnelarea if funnelarea is not None else _v
if _v is not None:
self["funnelarea"] = _v
_v = arg.pop("funnel", None)
_v = funnel if funnel is not None else _v
if _v is not None:
self["funnel"] = _v
_v = arg.pop("heatmapgl", None)
_v = heatmapgl if heatmapgl is not None else _v
if _v is not None:
self["heatmapgl"] = _v
_v = arg.pop("heatmap", None)
_v = heatmap if heatmap is not None else _v
if _v is not None:
self["heatmap"] = _v
_v = arg.pop("histogram2dcontour", None)
_v = histogram2dcontour if histogram2dcontour is not None else _v
if _v is not None:
self["histogram2dcontour"] = _v
_v = arg.pop("histogram2d", None)
_v = histogram2d if histogram2d is not None else _v
if _v is not None:
self["histogram2d"] = _v
_v = arg.pop("histogram", None)
_v = histogram if histogram is not None else _v
if _v is not None:
self["histogram"] = _v
_v = arg.pop("icicle", None)
_v = icicle if icicle is not None else _v
if _v is not None:
self["icicle"] = _v
_v = arg.pop("image", None)
_v = image if image is not None else _v
if _v is not None:
self["image"] = _v
_v = arg.pop("indicator", None)
_v = indicator if indicator is not None else _v
if _v is not None:
self["indicator"] = _v
_v = arg.pop("isosurface", None)
_v = isosurface if isosurface is not None else _v
if _v is not None:
self["isosurface"] = _v
_v = arg.pop("mesh3d", None)
_v = mesh3d if mesh3d is not None else _v
if _v is not None:
self["mesh3d"] = _v
_v = arg.pop("ohlc", None)
_v = ohlc if ohlc is not None else _v
if _v is not None:
self["ohlc"] = _v
_v = arg.pop("parcats", None)
_v = parcats if parcats is not None else _v
if _v is not None:
self["parcats"] = _v
_v = arg.pop("parcoords", None)
_v = parcoords if parcoords is not None else _v
if _v is not None:
self["parcoords"] = _v
_v = arg.pop("pie", None)
_v = pie if pie is not None else _v
if _v is not None:
self["pie"] = _v
_v = arg.pop("pointcloud", None)
_v = pointcloud if pointcloud is not None else _v
if _v is not None:
self["pointcloud"] = _v
_v = arg.pop("sankey", None)
_v = sankey if sankey is not None else _v
if _v is not None:
self["sankey"] = _v
_v = arg.pop("scatter3d", None)
_v = scatter3d if scatter3d is not None else _v
if _v is not None:
self["scatter3d"] = _v
_v = arg.pop("scattercarpet", None)
_v = scattercarpet if scattercarpet is not None else _v
if _v is not None:
self["scattercarpet"] = _v
_v = arg.pop("scattergeo", None)
_v = scattergeo if scattergeo is not None else _v
if _v is not None:
self["scattergeo"] = _v
_v = arg.pop("scattergl", None)
_v = scattergl if scattergl is not None else _v
if _v is not None:
self["scattergl"] = _v
_v = arg.pop("scattermapbox", None)
_v = scattermapbox if scattermapbox is not None else _v
if _v is not None:
self["scattermapbox"] = _v
_v = arg.pop("scatterpolargl", None)
_v = scatterpolargl if scatterpolargl is not None else _v
if _v is not None:
self["scatterpolargl"] = _v
_v = arg.pop("scatterpolar", None)
_v = scatterpolar if scatterpolar is not None else _v
if _v is not None:
self["scatterpolar"] = _v
_v = arg.pop("scatter", None)
_v = scatter if scatter is not None else _v
if _v is not None:
self["scatter"] = _v
_v = arg.pop("scattersmith", None)
_v = scattersmith if scattersmith is not None else _v
if _v is not None:
self["scattersmith"] = _v
_v = arg.pop("scatterternary", None)
_v = scatterternary if scatterternary is not None else _v
if _v is not None:
self["scatterternary"] = _v
_v = arg.pop("splom", None)
_v = splom if splom is not None else _v
if _v is not None:
self["splom"] = _v
_v = arg.pop("streamtube", None)
_v = streamtube if streamtube is not None else _v
if _v is not None:
self["streamtube"] = _v
_v = arg.pop("sunburst", None)
_v = sunburst if sunburst is not None else _v
if _v is not None:
self["sunburst"] = _v
_v = arg.pop("surface", None)
_v = surface if surface is not None else _v
if _v is not None:
self["surface"] = _v
_v = arg.pop("table", None)
_v = table if table is not None else _v
if _v is not None:
self["table"] = _v
_v = arg.pop("treemap", None)
_v = treemap if treemap is not None else _v
if _v is not None:
self["treemap"] = _v
_v = arg.pop("violin", None)
_v = violin if violin is not None else _v
if _v is not None:
self["violin"] = _v
_v = arg.pop("volume", None)
_v = volume if volume is not None else _v
if _v is not None:
self["volume"] = _v
_v = arg.pop("waterfall", None)
_v = waterfall if waterfall is not None else _v
if _v is not None:
self["waterfall"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "112c18a60aaaaf92e54cd96fb91bdf6f",
"timestamp": "",
"source": "github",
"line_count": 1760,
"max_line_length": 101,
"avg_line_length": 32.39545454545455,
"alnum_prop": 0.5829416304195314,
"repo_name": "plotly/plotly.py",
"id": "42a910e0c02f83f53cb668f433dc428205ee7554",
"size": "57016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/layout/template/_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import argparse
import os
import sys # exit
import shutil # copyfile
import math # pi
import subprocess # Popen
rapterRoot = "/home/bontius/workspace/RAPter/";
rapterExec = os.path.join( rapterRoot, "RAPter", "build", "Release", "bin", "rapter" );
def show( primitivesPath, associationsPath, title, args ):
cmd = os.path.join("..","rapterVis --show%s --scale %f --pop-limit %d -p %s -a %s --cloud %s --title %s --angle-gens %s --use-tags --no-clusters --statuses -1,1 --no-pop --dir-colours --no-rel --no-scale --bg-colour 1.,1.,1. --no-rel" \
% ( args.flag3D, args.scale, args.popLimit, primitivesPath, associationsPath, args.cloud, title, args.angleGensStr ) );
print cmd
#print os.spawnlp( os.P_NOWAIT, "..", cmd )
subprocess.Popen( cmd, shell=True );
def call( cmd, dry = True, noExit = False ):
print("%s" % (cmd))
if dry:
print "DRY"
else:
print "RUN"
if not dry:
ret = os.system(cmd) >> 8 # python thing
if ret != 0:
if not noExit:
print("call returned error ", ret, ", aborting")
sys.exit()
return ret
def runRepr( rprPrims, rprAssoc, rprIter, args, angleGens, keepSingles ):
print( "Running with prims: %s, assoc: %s, iteration: %d" % (rprPrims,rprAssoc,rprIter) );
rprRepr = "representatives_it%d.csv" % rprIter # representatives output, contains one patch for each dId
rprReprAssoc = "points_representatives_it%d.csv" % rprIter # representatives output, contains associations for representative primitives only
rprCands = "candidates_representatives_it%d.csv" % rprIter # candidates generated from representatives
rprReprOpt = "representatives_it%d.bonmin.csv" % rprIter # new representatives chosen from candidates
rprPrimBak = "%s.lvl1.csv" % os.path.splitext(rprPrims)[0] # "`cutExt $rprPrims`".lvl1.csv
rprNextId = rprIter + 1; #`expr $c + 1`; #candidates will output here automatically...so we need to know
rprPw = args.pw
rprAngLimit = args.angleLimit
# representatives
cmd = "%s --represent%s -p %s -a %s -sc %f --cloud %s --angle-gens %s" \
% (args.rapterExec, args.flag3D, rprPrims, rprAssoc, args.scale, args.cloud, angleGens );
#my_exec "$executable --represent$flag3D -p $rprPrims -a $rprAssoc -sc $scale --cloud cloud.ply --angle-gens $anglegens"
call( cmd, args.dry );
if not args.dry:
#echo "mv representatives.csv $rprRepr"
#mv representatives.csv $rprRepr
shutil.move("representatives.csv",rprRepr);
#echo "mv points_representatives.csv $rprReprAssoc"
#mv points_representatives.csv $rprReprAssoc
shutil.move("points_representatives.csv",rprReprAssoc);
# ShowRepr
#cmd="../globOptVis --show$flag3D--scale $scale --pop-limit $poplimit --title \"Representatives\" --angle-gens $angleGens --use-tags --no-clusters --statuses -1,1 --no-pop --dir-colours --no-scale --bg-colour .9,.9,.9 --ids --no-rel -p $repr -a $assoc $"
#my_exec "../globOptVis --show$flag3D --scale $scale --pop-limit $poplimit -p $rprRepr -a $rprReprAssoc --title \"Representatives\" $visdefparam &"
# Generate from Repr
if not args.dry:
#echo "mv candidates_it${rprNextId}.csv candidates_it${rprNextId}_tmp.csv" # move tmp out of the way
#mv candidates_it${rprNextId}.csv candidates_it${rprNextId}_tmp.csv # move tmp out of the way
if os.path.isfile( "candidates_it%d.csv" % rprNextId ):
shutil.move( "candidates_it%d.csv" % rprNextId, "candidates_it%d_tmp.csv" % rprNextId );
#cmd = "$executable --generate$flag3D $tripletSafe -sc $scale -al $rprAngLimit -ald ${cand_anglediv} --small-mode 0 --patch-pop-limit $poplimit --angle-gens $candAngleGens --small-thresh-mult $smallThresh -p $rprRepr --assoc $rprReprAssoc --keep-singles"
#my_exec "%s --generate%s -sc $scale -al $rprAngLimit -ald 1.0 --patch-pop-limit $poplimit -p $rprRepr --assoc $rprReprAssoc --angle-gens $candAngleGens --small-thresh-mult %f --small-mode 0 %s %s"
cmd = "%s --generate%s -sc %f --cloud %s -al %f -ald 1.0 --patch-pop-limit %d -p %s --assoc %s --angle-gens %s --small-thresh-mult %f --small-mode 0 %s %s" \
% ( args.rapterExec, args.flag3D, args.scale, args.cloud, rprAngLimit, args.popLimit, rprRepr, rprReprAssoc, candAngleGens, \
args.smallThreshMult, args.tripletSafe, keepSingles );
call( cmd, args.dry );
if not args.dry:
# echo "mv candidates_it${rprNextId}.csv $rprCands"
# mv candidates_it${rprNextId}.csv $rprCands
shutil.move( "candidates_it%d.csv" % rprNextId, rprCands );
# echo "mv candidates_it${rprNextId}_tmp.csv candidates_it${rprNextId}.csv"
# mv candidates_it${rprNextId}_tmp.csv candidates_it${rprNextId}.csv
if os.path.isfile( "candidates_it%d_tmp.csv" % rprNextId ):
shutil.move( "candidates_it%d_tmp.csv" % rprNextId, "candidates_it%d.csv" % rprNextId ); # move back tmp
# Show candidates
#my_exec "../globOptVis --show$flag3D --scale $scale --pop-limit $poplimit -p $rprCands -a $rprReprAssoc --title \"GlobOpt-repr_candidates\" $visdefparam &"
# Formulate
# my_exec "$executable --formulate$flag3D $formParams --scale $scale --cloud cloud.ply --unary $unary --pw $rprPw --cmp $cmp --constr-mode patch --dir-bias $dirbias --patch-pop-limit $poplimit --angle-gens $anglegens --candidates $rprCands -a $rprReprAssoc --freq-weight $freqweight --cost-fn $pwCostFunc"
cmd = "%s --formulate%s --scale %f --unary %f --pw %f --spat-weight %f --spat-dist-mult 2. --patch-pop-limit %d --angle-gens %s --cloud %s --candidates %s -a %s --collapse-angle-deg %f --trunc-angle %f --constr-mode patch --dir-bias 0 --no-clusters --cmp 0 --freq-weight 0 --cost-fn spatsqrt" \
% ( args.rapterExec, args.flag3D, args.scale, args.data, rprPw, args.spatial, args.popLimit, angleGens, args.cloud, rprCands, rprReprAssoc, collapseThreshDeg, args.angleLimit)
call( cmd, args.dry );
rprDiagF = "diag_it%d.gv" % rprIter;
rprDiagFTmp = "%s%s" % (rprDiagF,"RprTmp");
if not args.dry:
# echo "cp primitives_it${rprIter}.bonmin.csv primitives_it${rprIter}_rprtmp.csv"
# cp primitives_it${rprIter}.bonmin.csv primitives_it${rprIter}_rprtmp.csv
shutil.copyfile( "primitives_it%d.bonmin.csv" % rprIter, "primitives_it%d_rprtmp.csv" % rprIter );
if os.path.isfile( rprDiagF ): # backup diag_itx.gv
#echo "mv $rprDiagF $rprDiagFTmp";
# mv $rprDiagF "$rprDiagFTmp"
shutil.move( rprDiagF, rprDiagFTmp );
# my_exec "$executable --solver$flag3D bonmin --problem problem -v --time -1 --angle-gens $anglegens --bmode $algCode --candidates $rprCands"
cmd = "%s --solver%s bonmin --problem problem -v --time -1 --angle-gens %s --bmode %d --candidates %s" \
% (args.rapterExec, args.flag3D, angleGens, args.algCode, rprCands )
call (cmd, args.dry );
if not args.dry:
# echo "cp primitives_it${rprIter}.bonmin.csv $rprReprOpt"
# cp primitives_it${rprIter}.bonmin.csv $rprReprOpt
shutil.copyfile( "primitives_it%d.bonmin.csv" % rprIter, rprReprOpt );
# echo "cp primitives_it${rprIter}_rprtmp.csv primitives_it${rprIter}.bonmin.csv"
# cp primitives_it${rprIter}_rprtmp.csv primitives_it${rprIter}.bonmin.csv
shutil.copyfile( "primitives_it%d_rprtmp.csv" % rprIter, "primitives_it%d.bonmin.csv" % rprIter );
# echo "mv $rprDiagF diag_it${rprIter}.lvl2.gv"
# mv $rprDiagF diag_it${rprIter}.lvl2.gv
shutil.move( rprDiagF, "diag_it%d.lvl2.gv" % rprIter );
# restore diag_itx.gv
if os.path.isfile( rprDiagFTmp ):
# echo "mv $rprDiagFTmp $rprDiagF"
# mv "$rprDiagFTmp" $rprDiagF
shutil.move( rprDiagFTmp, rprDiagF );
# rm "$rprDiagFTmp";
#os.remove( rprDiagFTmp );
#my_exec "../globOptVis --show$flag3D -p $rprReprOpt -a $rprReprAssoc --title \"GlobOpt-RepresentativesOptimized\" --scale $scale --pop-limit $poplimit $visdefparam &"
# apply representatives - outputs subs.csv
#my_exec "$executable --representBack$flag3D --repr $rprReprOpt -p $rprPrims -a $rprAssoc -sc $scale --cloud cloud.ply --angle-gens $anglegens"
cmd = "%s --representBack%s --repr %s -p %s -a %s -sc %f --cloud %s --angle-gens %s" \
% (args.rapterExec, args.flag3D, rprReprOpt, rprPrims, rprAssoc, args.scale, args.cloud, angleGens );
call( cmd, args.dry );
if not args.dry:
# echo "mv $rprPrims $rprPrimBak"
# mv $rprPrims $rprPrimBak
shutil.move( rprPrims, rprPrimBak );
# echo "mv subs.csv $rprPrims" #substitue for input
# mv subs.csv $rprPrims
shutil.move( "subs.csv", rprPrims );
parser = argparse.ArgumentParser()
suggestedGroup = parser.add_argument_group('suggested');
suggestedGroup.add_argument( "-s" , "--scale" , dest="scale" , type=float, default=0.05, help="Scale (rho) parameter, the smallest feature size to preserve [0.001..0.05]", required=True)
suggestedGroup.add_argument( "--al", "--angle-limit", dest="angleLimit" , type=float, default=15 , help="Angle threshlod (tau) parameter in degrees [5..45]")
suggestedGroup.add_argument( "--pw", "--pairwise" , dest="pw" , type=float, default=1.0 , help="Weight of pairwise term [0.1..10^6]" )
suggestedGroup.add_argument( "-t" , "--area-thresh-start" , dest="smallThreshMult", type=float, default= 4., help="Start with planes, that are scale * smallThreshMult large. Increase this, if optimisation too slow. [powers of 2].")
optionalGroup = parser.add_argument_group('optional');
optionalGroup.add_argument( "--ag", "--angle-gens" , dest="angleGens" , type=float, default=[0,90], help="Weight of pairwise term [0.1..10^6]", action="append" )
optionalGroup.add_argument( "--it", "--iterations" , dest="nbExtraIterations", type=int, default=15, help="How many iterations to run [5..20]")
optionalGroup.add_argument( "--cl", "--cloud" , dest="cloud" , type=str , default = "cloud.ply", help="Pointcloud in ply format [cloud.ply]");
optionalGroup.add_argument( "-l" , "--lines" , dest="lines" , action="store_true" , help="Work in 2D with lines instead of planes." )
runOptGroup = parser.add_argument_group('run options');
runOptGroup.add_argument( "--dry", action="store_true" , help="Show the calls, but don't run." )
runOptGroup.add_argument( "--no-vis", dest="noVis", action="store_false", default = False, help="Disable visualization (enabled by default)" )
optionalGroup.add_argument( "--pl", "--popLimit" , dest="popLimit" , type=int , default=5 , help="Filters primitives having less than this many points assigned [3..100]")
optionalGroup.add_argument( "--sp", "--spatial" , dest="spatial" , type=float, help="Weight of spatial term [0.1, pw/10., pw/5., pw/2.]" )
optionalGroup.add_argument("--vl" , "--var-limit" , dest="variableLimit", type=int , default=1000, help="Maximum number of variables (primitives) for the optimisation. [500..3000]")
optionalGroup.add_argument( "-d" , "--data" , dest="data" , type=float, default=1e5 , help="Weight of data term [10^5, 10^6]" )
optionalGroup.add_argument( "-p" , "--primitives" , dest="primitives" , type=str , help="Input primitives, e.g. existing segmentation segments.csv" )
optionalGroup.add_argument( "-a" , "--assoc" , dest="associations" , type=str , help="Input point-primitive associations, e.g. existing segmentation's points_segments.csv" )
optionalGroup.add_argument("--segment-scale-mult" , dest="segmentScaleMultiplier", type=float, default=1.0, help="Multiply scale by this value for the segmentation step. [0.5, 1.0, 2.0]")
optionalGroup.add_argument("--ald", "--angle-limit-divisor", dest="angleLimitDivisor" , type=float, default=1.0, help="Divide angle threshold (tau) by this number for candidate generation. [2.0, 1.0, 0.5]")
optionalGroup.add_argument("--alg-code" , dest="algCode" , type=int , default=0 , help="Bonmin algorithm enum codes. 0: B_BB, 1: OA, 2: QG, 3: Hyb, 4: ECP, 5: IFP. [0]");
args = parser.parse_args()
if not os.path.isfile(args.cloud):
print("Need \"%s\" to exist, assuming it's the pointcloud" % args.cloud );
sys.exit(1);
# if not args.scale:
# print("Need scale -s, --scale")
# exit
# if not args.angleLimit:
# print("Need angleLimit! Set using '-al' or '--angle-limit'!")
# exit
# convert to radians
args.angleLimit = args.angleLimit / 180.0 * math.pi
args.angleGensStr = ",".join( str(e) for e in args.angleGens )
print( "--popLimit %d \twill keep all primitives, that have more than this number of assigned points" % (args.popLimit) );
if not args.spatial:
args.spatial = args.pw / 10.
print( "--spatial %.3f" % (args.spatial) );
setattr( args, "rapterExec", rapterExec );
if not args.lines:
setattr( args, "flag3D" ,"3D" );
setattr( args, "tripletSafe","--triplet-safe" ); # Values: ["", "--triplet-safe"]
useAllGens = min(5, args.nbExtraIterations-1 ); # start with parallel generation only
else:
setattr( args, "flag3D" ,"" );
setattr( args, "tripletSafe","" );
useAllGens = 0
########################################################################################################################
# Do segmentation
if not args.primitives or not args.associaitons:
cmd = "%s --segment%s --scale %f --angle-limit %f --angle-gens %s --patch-pop-limit %d --dist-limit-mult %f --cloud %s" \
% ( rapterExec, args.flag3D, args.scale, args.angleLimit, args.angleGensStr, args.popLimit, args.segmentScaleMultiplier, args.cloud )
call( cmd, args.dry );
# # save output
if ( os.path.isfile("patches.csv") and os.path.isfile("points_primitives.csv") ):
if os.path.isfile("segments.csv"):
shutil.copyfile( "segments.csv", "segments.csv.bak" );
shutil.copyfile( "patches.csv", "segments.csv" )
if os.path.isfile("points_segments.csv"):
shutil.copyfile( "points_segments.csv", "points_segments.csv.bak" );
shutil.copyfile( "points_primitives.csv", "points_segments.csv" )
if not args.noVis:
show( "segments.csv", "points_segments.csv", "\"RAPter - Segmentation\"", args );
########################################################################################################################
########################################################################################################################
angleGens = "0"
candAngleGens = "0" # used to mirror anglegens, but keep const "0" for generate
primitives = "patches.csv"
associations = "points_primitives.csv"
keepSingles = "--keep-singles"
allowPromoted = "--allow-promoted"
smallThreshDiv = 2. # area threshold stepsize
smallThreshLimit = 0. # when to stop decreasing area threshold
promRem = 0 # remaining primitives to promote
collapseThreshDeg = 0.4 # initialize optimisation with the closest two orientations merged, if their difference is < colleapseThreshDeg degrees.
adopt = 0
adoptChanged = False
decreaseLevel = False
iteration = 0
while iteration <= args.nbExtraIterations:
# decresase, unless there is more to do on the same level
if decreaseLevel:
args.smallThreshMult = float(int(args.smallThreshMult / smallThreshDiv));
# if we reached the bottom working scale (ideally 0)
if args.smallThreshMult <= smallThreshLimit:
args.smallThreshMult = int(smallThreshLimit) # make sure it's integer
if decreaseLevel: # if we don't have to promote any more patches on this level
adopt = "1" # if we promoted all patches, we can allow points to get re-assigned
if not adoptChanged:
adoptChanged = True # only enter here once
useAllGens = iteration + 2 # if we promoted all patches in the scene, do a 90 round
args.nbExtraIterations = max(args.nbExtraIterations,useAllGens + 3) # do k more rounds after the 90 round
# reset to false, meaning we will continue decreasing, unless generate flips it again
decreaseLevel = True
print( "smallThreshMult: %d" % args.smallThreshMult );
print( "__________________________________________________________" );
print( "Start iteration %d" % iteration );
prevId = iteration - 1;
nextId = iteration + 1;
if iteration > 0:
primitives = "primitives_merged_it%d.csv" % prevId;
associations = "points_primitives_it%d.csv" % prevId;
# (2) Generate - generate candidates
cmd = "%s --generate%s -sc %f -al %f -ald %f --patch-pop-limit %d -p %s --assoc %s --cloud %s --angle-gens %s --small-thresh-mult %f --var-limit %d --small-mode 0 %s %s %s" \
% (rapterExec, args.flag3D, args.scale, args.angleLimit, args.angleLimitDivisor, args.popLimit, primitives, associations, args.cloud, candAngleGens, \
args.smallThreshMult, args.variableLimit, \
args.tripletSafe, keepSingles, allowPromoted );
promRem = call( cmd, args.dry, True );
print "[rapter.py] Remaining smalls to promote: ", promRem
# Don't decrase area threshold until no more candidates to promote
if promRem != 0:
decreaseLevel = False; # set to true each iteration
# (3) Formulate - create optimisation problem
cmd = "%s --formulate%s --scale %f --unary %f --pw %f --spat-weight %f --spat-dist-mult 2. --patch-pop-limit %d --angle-gens %s --cloud %s --candidates candidates_it%d.csv -a %s --collapse-angle-deg %f --trunc-angle %f --constr-mode patch --dir-bias 0 --no-clusters --cmp 0 --freq-weight 0 --cost-fn spatsqrt" \
% ( rapterExec, args.flag3D, args.scale, args.data, args.pw, args.spatial, args.popLimit, angleGens, args.cloud, iteration, associations, collapseThreshDeg, args.angleLimit)
call( cmd, args.dry );
# (4) Solve
cmd = "%s --solver%s bonmin --problem problem -v --time -1 --bmode %d --angle-gens %s --candidates candidates_it%d.csv --cloud %s" \
% ( rapterExec, args.flag3D, args.algCode, angleGens, iteration, args.cloud );
call( cmd, args.dry )
if not args.noVis:
show( "primitives_it%d.bonmin.csv" % iteration, associations, "\"RAPter - Iteration%d\"" % iteration, args );
if iteration == useAllGens:
angleGens = ','.join( str(e) for e in args.angleGens );
candAngleGens = angleGens;
# TODO
runRepr( "primitives_it%d.bonmin.csv" % iteration, associations, iteration, args, angleGens, keepSingles );
# (6) CoPlanarity
cmd = "%s --merge%s --scale %f --adopt %s --prims primitives_it%d.bonmin.csv -a %s --angle-gens %s --patch-pop-limit %d --cloud %s" \
% ( rapterExec, args.flag3D, args.scale, adopt, iteration, associations, angleGens, args.popLimit, args.cloud );
call( cmd, args.dry )
# Don't copy promoted patches' directions to other patches after 4 iterations (c==3), since they are not reliable anymore
if iteration == 3:
allowPromoted = ""
# Don't throw away single directions before the 3rd (c==1) iteration.
# This will keep large patches, even if they don't copy to anywhere for later.
if iteration == 1:
keepSingles = ""
# If we are still promoting small patches on this working scale, make sure to run more iterations
if iteration == args.nbExtraIterations and promRem != 0:
args.nbExtraIterations += 1;
# Increment iteration counter (while loop)
iteration += 1;
| {
"content_hash": "2888dd05bb3914ffe8d7c8e5657e1c22",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 315,
"avg_line_length": 59.961077844311376,
"alnum_prop": 0.6277026014879912,
"repo_name": "amonszpart/globOpt",
"id": "2666d4bf45ef8be080a38f801b513a332114d4bd",
"size": "20046",
"binary": false,
"copies": "1",
"ref": "refs/heads/spatially_smooth",
"path": "RAPter/scripts/rapter.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "79972"
},
{
"name": "C++",
"bytes": "1358703"
},
{
"name": "CMake",
"bytes": "31244"
},
{
"name": "CSS",
"bytes": "950"
},
{
"name": "HTML",
"bytes": "35161"
},
{
"name": "M",
"bytes": "353"
},
{
"name": "Mathematica",
"bytes": "2707"
},
{
"name": "Matlab",
"bytes": "96797"
},
{
"name": "Objective-C",
"bytes": "250"
},
{
"name": "Python",
"bytes": "132051"
},
{
"name": "Shell",
"bytes": "64248"
},
{
"name": "TeX",
"bytes": "27525"
}
],
"symlink_target": ""
} |
import os
import random
class Subject:
def __init__(self, name):
self.name = name
self.state = PipelineState()
class PipelineState:
def __init__(self):
self.has_metric = False
self.has_initial_map = False
self.has_final_map = False
def MapsDir(base, source, target, protocol):
return os.path.join(base, source, 'protocols', protocol, 'maps', target)
def StatsDir(base, source, target, protocol):
return MapsDir(base, source, target, protocol) + os.sep + 'stats'
def ReadFloatFromFile(filename):
fp = open(filename, 'r')
val = float(fp.readline().strip())
fp.close()
return val
def LoadSubjects(path, protocol=None, target=None):
path = os.path.expanduser(path)
names = os.listdir(path)
res = map(lambda x : Subject(x), names)
random_numbers = random.sample(range(len(res)), len(res))
for i in range(len(res)):
res[i].random = random_numbers[i]
if protocol is not None:
for i in range(len(res)):
metric_filename = os.path.join(
path, res[i].name, 'protocols', protocol, 'metric.u'
)
res[i].state.has_metric = os.path.exists(metric_filename)
if target is not None:
for i in range(len(res)):
initial_map_name = MapsDir(path, target, res[i].name, protocol) +\
os.sep + 'initial.map'
res[i].state.has_initial_map = os.path.exists(initial_map_name)
final_map_name = MapsDir(path, target, res[i].name, protocol) +\
os.sep + 'final.map'
res[i].state.has_final_map = os.path.exists(final_map_name)
harmonic_energy_name =\
StatsDir(path, target, res[i].name, protocol) + os.sep + 'final.energy'
res[i].state.harmonic_energy = '-1'
if os.path.exists(harmonic_energy_name):
res[i].state.harmonic_energy = ReadFloatFromFile(harmonic_energy_name)
elastic_energy_name =\
StatsDir(path, target, res[i].name, protocol) + os.sep + 'elastic_energy'\
+ os.sep + 'total_energy.txt'
res[i].state.elastic_energy = '-1'
if os.path.exists(elastic_energy_name):
res[i].state.elastic_energy = ReadFloatFromFile(elastic_energy_name)
return res
| {
"content_hash": "38048385f48300aba12aed6482965fd9",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 90,
"avg_line_length": 37.95161290322581,
"alnum_prop": 0.5898852528686783,
"repo_name": "alextsui05/mcmp",
"id": "04ed97e181a91f33f1a64cbf9bf073ef9322501c",
"size": "2353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mcmp/Subject.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "181503"
},
{
"name": "Python",
"bytes": "4007"
}
],
"symlink_target": ""
} |
from datetime import datetime
from decimal import Decimal
import numpy as np
import pytest
from pandas._libs import lib
from pandas.compat import IS64
from pandas.errors import (
PerformanceWarning,
SpecificationError,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Grouper,
Index,
MultiIndex,
RangeIndex,
Series,
Timedelta,
Timestamp,
date_range,
to_datetime,
)
import pandas._testing as tm
from pandas.core.arrays import BooleanArray
import pandas.core.common as com
from pandas.core.groupby.base import maybe_normalize_deprecated_kernels
from pandas.tests.groupby import get_groupby_method_args
def test_repr():
# GH18203
result = repr(Grouper(key="A", level="B"))
expected = "Grouper(key='A', level='B', axis=0, sort=False, dropna=True)"
assert result == expected
@pytest.mark.parametrize("dtype", ["int64", "int32", "float64", "float32"])
def test_basic(dtype):
data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3, group_keys=False)
for k, v in grouped:
assert len(v) == 3
agged = grouped.aggregate(np.mean)
assert agged[1] == 1
tm.assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
tm.assert_series_equal(agged, grouped.mean())
tm.assert_series_equal(grouped.agg(np.sum), grouped.sum())
expected = grouped.apply(lambda x: x * x.sum())
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
tm.assert_series_equal(transformed, expected)
value_grouped = data.groupby(data)
tm.assert_series_equal(
value_grouped.aggregate(np.mean), agged, check_index_type=False
)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate({"one": np.mean, "two": np.std})
group_constants = {0: 10, 1: 20, 2: 30}
agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
assert agged[1] == 21
# corner cases
msg = "Must produce aggregated value"
# exception raised is type Exception
with pytest.raises(Exception, match=msg):
grouped.aggregate(lambda x: x * 2)
def test_groupby_nonobject_dtype(mframe, df_mixed_floats):
key = mframe.index.codes[0]
grouped = mframe.groupby(key)
result = grouped.sum()
expected = mframe.groupby(key.astype("O")).sum()
tm.assert_frame_equal(result, expected)
# GH 3911, mixed frame non-conversion
df = df_mixed_floats.copy()
df["value"] = range(len(df))
def max_value(group):
return group.loc[group["value"].idxmax()]
applied = df.groupby("A").apply(max_value)
result = applied.dtypes
expected = df.dtypes
tm.assert_series_equal(result, expected)
def test_groupby_return_type():
# GH2893, return a reduced type
df1 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 2, "val2": 27},
{"val1": 2, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df1.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
df2 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 1, "val2": 27},
{"val1": 1, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df2.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1, 1], [1, 1]], columns=["X", "Y"])
with tm.assert_produces_warning(FutureWarning):
result = df.groupby("X", squeeze=False).count()
assert isinstance(result, DataFrame)
def test_inconsistent_return_type():
# GH5592
# inconsistent return type
df = DataFrame(
{
"A": ["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"],
"B": Series(np.arange(7), dtype="int64"),
"C": date_range("20130101", periods=7),
}
)
def f(grp):
return grp.iloc[0]
expected = df.groupby("A").first()[["B"]]
result = df.groupby("A").apply(f)[["B"]]
tm.assert_frame_equal(result, expected)
def f(grp):
if grp.name == "Tiger":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Tiger"] = np.nan
tm.assert_frame_equal(result, e)
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Pony"] = np.nan
tm.assert_frame_equal(result, e)
# 5592 revisited, with datetimes
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["C"]]
e = df.groupby("A").first()[["C"]]
e.loc["Pony"] = pd.NaT
tm.assert_frame_equal(result, e)
# scalar outputs
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0].loc["C"]
result = df.groupby("A").apply(f)
e = df.groupby("A").first()["C"].copy()
e.loc["Pony"] = np.nan
e.name = None
tm.assert_series_equal(result, e)
def test_pass_args_kwargs(ts, tsframe):
def f(x, q=None, axis=0):
return np.percentile(x, q, axis=axis)
g = lambda x: np.percentile(x, 80, axis=0)
# Series
ts_grouped = ts.groupby(lambda x: x.month)
agg_result = ts_grouped.agg(np.percentile, 80, axis=0)
apply_result = ts_grouped.apply(np.percentile, 80, axis=0)
trans_result = ts_grouped.transform(np.percentile, 80, axis=0)
agg_expected = ts_grouped.quantile(0.8)
trans_expected = ts_grouped.transform(g)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
agg_result = ts_grouped.agg(f, q=80)
apply_result = ts_grouped.apply(f, q=80)
trans_result = ts_grouped.transform(f, q=80)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
# DataFrame
for as_index in [True, False]:
df_grouped = tsframe.groupby(lambda x: x.month, as_index=as_index)
agg_result = df_grouped.agg(np.percentile, 80, axis=0)
apply_result = df_grouped.apply(DataFrame.quantile, 0.8)
expected = df_grouped.quantile(0.8)
tm.assert_frame_equal(apply_result, expected, check_names=False)
tm.assert_frame_equal(agg_result, expected)
apply_result = df_grouped.apply(DataFrame.quantile, [0.4, 0.8])
expected_seq = df_grouped.quantile([0.4, 0.8])
tm.assert_frame_equal(apply_result, expected_seq, check_names=False)
agg_result = df_grouped.agg(f, q=80)
apply_result = df_grouped.apply(DataFrame.quantile, q=0.8)
tm.assert_frame_equal(agg_result, expected)
tm.assert_frame_equal(apply_result, expected, check_names=False)
@pytest.mark.parametrize("as_index", [True, False])
def test_pass_args_kwargs_duplicate_columns(tsframe, as_index):
# go through _aggregate_frame with self.axis == 0 and duplicate columns
tsframe.columns = ["A", "B", "A", "C"]
gb = tsframe.groupby(lambda x: x.month, as_index=as_index)
res = gb.agg(np.percentile, 80, axis=0)
ex_data = {
1: tsframe[tsframe.index.month == 1].quantile(0.8),
2: tsframe[tsframe.index.month == 2].quantile(0.8),
}
expected = DataFrame(ex_data).T
if not as_index:
# TODO: try to get this more consistent?
expected.index = Index(range(2))
tm.assert_frame_equal(res, expected)
def test_len():
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
assert len(grouped) == len(df)
grouped = df.groupby([lambda x: x.year, lambda x: x.month])
expected = len({(x.year, x.month) for x in df.index})
assert len(grouped) == expected
# issue 11016
df = DataFrame({"a": [np.nan] * 3, "b": [1, 2, 3]})
assert len(df.groupby("a")) == 0
assert len(df.groupby("b")) == 3
assert len(df.groupby(["a", "b"])) == 3
def test_basic_regression():
# regression
result = Series([1.0 * x for x in list(range(1, 10)) * 10])
data = np.random.random(1100) * 10.0
groupings = Series(data)
grouped = result.groupby(groupings)
grouped.mean()
@pytest.mark.parametrize(
"dtype", ["float64", "float32", "int64", "int32", "int16", "int8"]
)
def test_with_na_groups(dtype):
index = Index(np.arange(10))
values = Series(np.ones(10), index, dtype=dtype)
labels = Series(
[np.nan, "foo", "bar", "bar", np.nan, np.nan, "bar", "bar", np.nan, "foo"],
index=index,
)
# this SHOULD be an int
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
# assert issubclass(agged.dtype.type, np.integer)
# explicitly return a float from my function
def f(x):
return float(len(x))
agged = grouped.agg(f)
expected = Series([4.0, 2.0], index=["bar", "foo"])
tm.assert_series_equal(agged, expected)
def test_indices_concatenation_order():
# GH 2808
def f1(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(levels=[[]] * 2, codes=[[]] * 2, names=["b", "c"])
res = DataFrame(columns=["a"], index=multiindex)
return res
else:
y = y.set_index(["b", "c"])
return y
def f2(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
return DataFrame()
else:
y = y.set_index(["b", "c"])
return y
def f3(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(
levels=[[]] * 2, codes=[[]] * 2, names=["foo", "bar"]
)
res = DataFrame(columns=["a", "b"], index=multiindex)
return res
else:
return y
df = DataFrame({"a": [1, 2, 2, 2], "b": range(4), "c": range(5, 9)})
df2 = DataFrame({"a": [3, 2, 2, 2], "b": range(4), "c": range(5, 9)})
# correct result
result1 = df.groupby("a").apply(f1)
result2 = df2.groupby("a").apply(f1)
tm.assert_frame_equal(result1, result2)
# should fail (not the same number of levels)
msg = "Cannot concat indices that do not have the same number of levels"
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f2)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f2)
# should fail (incorrect shape)
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f3)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f3)
def test_attr_wrapper(ts):
grouped = ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
tm.assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {name: gp.describe() for name, gp in grouped}
expected = DataFrame(expected).T
tm.assert_frame_equal(result, expected)
# get attribute
result = grouped.dtype
expected = grouped.agg(lambda x: x.dtype)
tm.assert_series_equal(result, expected)
# make sure raises error
msg = "'SeriesGroupBy' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
getattr(grouped, "foo")
def test_frame_groupby(tsframe):
grouped = tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == 5
assert len(aggregated.columns) == 4
# by string
tscopy = tsframe.copy()
tscopy["weekday"] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby("weekday").aggregate(np.mean)
tm.assert_frame_equal(stragged, aggregated, check_names=False)
# transform
grouped = tsframe.head(30).groupby(lambda x: x.weekday())
transformed = grouped.transform(lambda x: x - x.mean())
assert len(transformed) == 30
assert len(transformed.columns) == 4
# transform propagate
transformed = grouped.transform(lambda x: x.mean())
for name, group in grouped:
mean = group.mean()
for idx in group.index:
tm.assert_series_equal(transformed.xs(idx), mean, check_names=False)
# iterate
for weekday, group in grouped:
assert group.index[0].weekday() == weekday
# groups / group_indices
groups = grouped.groups
indices = grouped.indices
for k, v in groups.items():
samething = tsframe.index.take(indices[k])
assert (samething == v).all()
def test_frame_groupby_columns(tsframe):
mapping = {"A": 0, "B": 0, "C": 1, "D": 1}
grouped = tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == len(tsframe)
assert len(aggregated.columns) == 2
# transform
tf = lambda x: x - x.mean()
groupedT = tsframe.T.groupby(mapping, axis=0)
tm.assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
# iterate
for k, v in grouped:
assert len(v.columns) == 2
def test_frame_set_name_single(df):
grouped = df.groupby("A")
msg = "The default value of numeric_only"
with tm.assert_produces_warning(FutureWarning, match=msg):
result = grouped.mean()
assert result.index.name == "A"
with tm.assert_produces_warning(FutureWarning, match=msg):
result = df.groupby("A", as_index=False).mean()
assert result.index.name != "A"
with tm.assert_produces_warning(FutureWarning, match=msg):
result = grouped.agg(np.mean)
assert result.index.name == "A"
result = grouped.agg({"C": np.mean, "D": np.std})
assert result.index.name == "A"
result = grouped["C"].mean()
assert result.index.name == "A"
result = grouped["C"].agg(np.mean)
assert result.index.name == "A"
result = grouped["C"].agg([np.mean, np.std])
assert result.index.name == "A"
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"foo": np.mean, "bar": np.std})
def test_multi_func(df):
col1 = df["A"]
col2 = df["B"]
grouped = df.groupby([col1.get, col2.get])
msg = "The default value of numeric_only"
with tm.assert_produces_warning(FutureWarning, match=msg):
agged = grouped.mean()
expected = df.groupby(["A", "B"]).mean()
# TODO groupby get drops names
tm.assert_frame_equal(
agged.loc[:, ["C", "D"]], expected.loc[:, ["C", "D"]], check_names=False
)
# some "groups" with no data
df = DataFrame(
{
"v1": np.random.randn(6),
"v2": np.random.randn(6),
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
},
index=["one", "two", "three", "four", "five", "six"],
)
# only verify that it works for now
grouped = df.groupby(["k1", "k2"])
grouped.agg(np.sum)
def test_multi_key_multiple_functions(df):
grouped = df.groupby(["A", "B"])["C"]
agged = grouped.agg([np.mean, np.std])
expected = DataFrame({"mean": grouped.agg(np.mean), "std": grouped.agg(np.std)})
tm.assert_frame_equal(agged, expected)
def test_frame_multi_key_function_list():
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
grouped = data.groupby(["A", "B"])
funcs = [np.mean, np.std]
with tm.assert_produces_warning(
FutureWarning, match=r"\['C'\] did not aggregate successfully"
):
agged = grouped.agg(funcs)
expected = pd.concat(
[grouped["D"].agg(funcs), grouped["E"].agg(funcs), grouped["F"].agg(funcs)],
keys=["D", "E", "F"],
axis=1,
)
assert isinstance(agged.index, MultiIndex)
assert isinstance(expected.index, MultiIndex)
tm.assert_frame_equal(agged, expected)
@pytest.mark.parametrize("op", [lambda x: x.sum(), lambda x: x.mean()])
def test_groupby_multiple_columns(df, op):
data = df
grouped = data.groupby(["A", "B"])
result1 = op(grouped)
keys = []
values = []
for n1, gp1 in data.groupby("A"):
for n2, gp2 in gp1.groupby("B"):
keys.append((n1, n2))
values.append(op(gp2.loc[:, ["C", "D"]]))
mi = MultiIndex.from_tuples(keys, names=["A", "B"])
expected = pd.concat(values, axis=1).T
expected.index = mi
# a little bit crude
for col in ["C", "D"]:
result_col = op(grouped[col])
pivoted = result1[col]
exp = expected[col]
tm.assert_series_equal(result_col, exp)
tm.assert_series_equal(pivoted, exp)
# test single series works the same
result = data["C"].groupby([data["A"], data["B"]]).mean()
expected = data.groupby(["A", "B"]).mean()["C"]
tm.assert_series_equal(result, expected)
def test_as_index_select_column():
# GH 5764
df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
result = df.groupby("A", as_index=False)["B"].get_group(1)
expected = Series([2, 4], name="B")
tm.assert_series_equal(result, expected)
result = df.groupby("A", as_index=False, group_keys=True)["B"].apply(
lambda x: x.cumsum()
)
expected = Series(
[2, 6, 6], name="B", index=MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)])
)
tm.assert_series_equal(result, expected)
def test_groupby_as_index_select_column_sum_empty_df():
# GH 35246
df = DataFrame(columns=Index(["A", "B", "C"], name="alpha"))
left = df.groupby(by="A", as_index=False)["B"].sum(numeric_only=False)
expected = DataFrame(columns=df.columns[:2], index=range(0))
tm.assert_frame_equal(left, expected)
def test_groupby_as_index_agg(df):
grouped = df.groupby("A", as_index=False)
# single-key
msg = "The default value of numeric_only"
with tm.assert_produces_warning(FutureWarning, match=msg):
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
with tm.assert_produces_warning(FutureWarning, match=msg):
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
grouped = df.groupby("A", as_index=True)
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"Q": np.sum})
# multi-key
grouped = df.groupby(["A", "B"], as_index=False)
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
expected3 = grouped["C"].sum()
expected3 = DataFrame(expected3).rename(columns={"C": "Q"})
result3 = grouped["C"].agg({"Q": np.sum})
tm.assert_frame_equal(result3, expected3)
# GH7115 & GH8112 & GH8582
df = DataFrame(np.random.randint(0, 100, (50, 3)), columns=["jim", "joe", "jolie"])
ts = Series(np.random.randint(5, 10, 50), name="jim")
gr = df.groupby(ts)
gr.nth(0) # invokes set_selection_from_grouper internally
tm.assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))
for attr in ["mean", "max", "count", "idxmax", "cumsum", "all"]:
gr = df.groupby(ts, as_index=False)
left = getattr(gr, attr)()
gr = df.groupby(ts.values, as_index=True)
right = getattr(gr, attr)().reset_index(drop=True)
tm.assert_frame_equal(left, right)
def test_ops_not_as_index(reduction_func):
# GH 10355, 21090
# Using as_index=False should not modify grouped column
if reduction_func in ("corrwith", "nth", "ngroup"):
pytest.skip(f"GH 5755: Test not applicable for {reduction_func}")
warn = FutureWarning if reduction_func == "mad" else None
df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=["a", "b"])
with tm.assert_produces_warning(warn, match="The 'mad' method is deprecated"):
expected = getattr(df.groupby("a"), reduction_func)()
if reduction_func == "size":
expected = expected.rename("size")
expected = expected.reset_index()
if reduction_func != "size":
# 32 bit compat -> groupby preserves dtype whereas reset_index casts to int64
expected["a"] = expected["a"].astype(df["a"].dtype)
g = df.groupby("a", as_index=False)
with tm.assert_produces_warning(warn, match="The 'mad' method is deprecated"):
result = getattr(g, reduction_func)()
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(warn, match="The 'mad' method is deprecated"):
result = g.agg(reduction_func)
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(warn, match="The 'mad' method is deprecated"):
result = getattr(g["b"], reduction_func)()
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(warn, match="The 'mad' method is deprecated"):
result = g["b"].agg(reduction_func)
tm.assert_frame_equal(result, expected)
def test_as_index_series_return_frame(df):
grouped = df.groupby("A", as_index=False)
grouped2 = df.groupby(["A", "B"], as_index=False)
msg = "The default value of numeric_only"
with tm.assert_produces_warning(FutureWarning, match=msg):
result = grouped["C"].agg(np.sum)
expected = grouped.agg(np.sum).loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].agg(np.sum)
expected2 = grouped2.agg(np.sum).loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
result = grouped["C"].sum()
with tm.assert_produces_warning(FutureWarning, match=msg):
expected = grouped.sum().loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].sum()
expected2 = grouped2.sum().loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
def test_as_index_series_column_slice_raises(df):
# GH15072
grouped = df.groupby("A", as_index=False)
msg = r"Column\(s\) C already selected"
with pytest.raises(IndexError, match=msg):
grouped["C"].__getitem__("D")
def test_groupby_as_index_cython(df):
data = df
# single-key
grouped = data.groupby("A", as_index=False)
msg = "The default value of numeric_only"
with tm.assert_produces_warning(FutureWarning, match=msg):
result = grouped.mean()
expected = data.groupby(["A"]).mean()
expected.insert(0, "A", expected.index)
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
# multi-key
grouped = data.groupby(["A", "B"], as_index=False)
result = grouped.mean()
expected = data.groupby(["A", "B"]).mean()
arrays = list(zip(*expected.index.values))
expected.insert(0, "A", arrays[0])
expected.insert(1, "B", arrays[1])
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_series_scalar(df):
grouped = df.groupby(["A", "B"], as_index=False)
# GH #421
result = grouped["C"].agg(len)
expected = grouped.agg(len).loc[:, ["A", "B", "C"]]
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_corner(df, ts):
msg = "as_index=False only valid with DataFrame"
with pytest.raises(TypeError, match=msg):
ts.groupby(lambda x: x.weekday(), as_index=False)
msg = "as_index=False only valid for axis=0"
with pytest.raises(ValueError, match=msg):
df.groupby(lambda x: x.lower(), as_index=False, axis=1)
def test_groupby_multiple_key():
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
agged = grouped.sum()
tm.assert_almost_equal(df.values, agged.values)
grouped = df.T.groupby(
[lambda x: x.year, lambda x: x.month, lambda x: x.day], axis=1
)
agged = grouped.agg(lambda x: x.sum())
tm.assert_index_equal(agged.index, df.columns)
tm.assert_almost_equal(df.T.values, agged.values)
agged = grouped.agg(lambda x: x.sum())
tm.assert_almost_equal(df.T.values, agged.values)
def test_groupby_multi_corner(df):
# test that having an all-NA column doesn't mess you up
df = df.copy()
df["bad"] = np.nan
agged = df.groupby(["A", "B"]).mean()
expected = df.groupby(["A", "B"]).mean()
expected["bad"] = np.nan
tm.assert_frame_equal(agged, expected)
def test_omit_nuisance(df):
grouped = df.groupby("A")
msg = "The default value of numeric_only"
with tm.assert_produces_warning(FutureWarning, match=msg):
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
df = df.loc[:, ["A", "C", "D"]]
df["E"] = datetime.now()
grouped = df.groupby("A")
with tm.assert_produces_warning(FutureWarning, match=msg):
result = grouped.agg(np.sum)
expected = grouped.sum()
tm.assert_frame_equal(result, expected)
# won't work with axis = 1
grouped = df.groupby({"A": 0, "C": 0, "D": 1, "E": 1}, axis=1)
msg = "does not support reduction 'sum'"
with pytest.raises(TypeError, match=msg):
grouped.agg(lambda x: x.sum(0, numeric_only=False))
@pytest.mark.parametrize(
"agg_function",
["max", "min"],
)
def test_keep_nuisance_agg(df, agg_function):
# GH 38815
grouped = df.groupby("A")
result = getattr(grouped, agg_function)()
expected = result.copy()
expected.loc["bar", "B"] = getattr(df.loc[df["A"] == "bar", "B"], agg_function)()
expected.loc["foo", "B"] = getattr(df.loc[df["A"] == "foo", "B"], agg_function)()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"agg_function",
["sum", "mean", "prod", "std", "var", "sem", "median"],
)
@pytest.mark.parametrize("numeric_only", [lib.no_default, True, False])
def test_omit_nuisance_agg(df, agg_function, numeric_only):
# GH 38774, GH 38815
if numeric_only is lib.no_default or (not numeric_only and agg_function != "sum"):
# sum doesn't drop strings
warn = FutureWarning
else:
warn = None
grouped = df.groupby("A")
if agg_function in ("var", "std", "sem") and numeric_only is False:
# Added numeric_only as part of GH#46560; these do not drop nuisance
# columns when numeric_only is False
klass = TypeError if agg_function == "var" else ValueError
with pytest.raises(klass, match="could not convert string to float"):
getattr(grouped, agg_function)(numeric_only=numeric_only)
else:
if numeric_only is lib.no_default:
msg = (
f"The default value of numeric_only in DataFrameGroupBy.{agg_function}"
)
else:
msg = "Dropping invalid columns"
with tm.assert_produces_warning(warn, match=msg):
result = getattr(grouped, agg_function)(numeric_only=numeric_only)
if (
(numeric_only is lib.no_default or not numeric_only)
# These methods drop non-numeric columns even when numeric_only is False
and agg_function not in ("mean", "prod", "median")
):
columns = ["A", "B", "C", "D"]
else:
columns = ["A", "C", "D"]
if agg_function == "sum" and numeric_only is False:
# sum doesn't drop nuisance string columns
warn = None
elif agg_function in ("sum", "std", "var", "sem") and numeric_only is not True:
warn = FutureWarning
else:
warn = None
msg = "The default value of numeric_only"
with tm.assert_produces_warning(warn, match=msg):
expected = getattr(df.loc[:, columns].groupby("A"), agg_function)(
numeric_only=numeric_only
)
tm.assert_frame_equal(result, expected)
def test_omit_nuisance_warnings(df):
# GH 38815
with tm.assert_produces_warning(FutureWarning, filter_level="always"):
grouped = df.groupby("A")
result = grouped.skew()
expected = df.loc[:, ["A", "C", "D"]].groupby("A").skew()
tm.assert_frame_equal(result, expected)
def test_omit_nuisance_python_multiple(three_group):
grouped = three_group.groupby(["A", "B"])
msg = "The default value of numeric_only"
with tm.assert_produces_warning(FutureWarning, match=msg):
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
def test_empty_groups_corner(mframe):
# handle empty groups
df = DataFrame(
{
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
"k3": ["foo", "bar"] * 3,
"v1": np.random.randn(6),
"v2": np.random.randn(6),
}
)
grouped = df.groupby(["k1", "k2"])
msg = "The default value of numeric_only"
with tm.assert_produces_warning(FutureWarning, match=msg):
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped = mframe[3:5].groupby(level=0)
agged = grouped.apply(lambda x: x.mean())
agged_A = grouped["A"].apply(np.mean)
tm.assert_series_equal(agged["A"], agged_A)
assert agged.index.name == "first"
def test_nonsense_func():
df = DataFrame([0])
msg = r"unsupported operand type\(s\) for \+: 'int' and 'str'"
with pytest.raises(TypeError, match=msg):
df.groupby(lambda x: x + "foo")
def test_wrap_aggregated_output_multindex(mframe):
df = mframe.T
df["baz", "two"] = "peekaboo"
keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]
msg = "The default value of numeric_only"
with tm.assert_produces_warning(FutureWarning, match=msg):
agged = df.groupby(keys).agg(np.mean)
assert isinstance(agged.columns, MultiIndex)
def aggfun(ser):
if ser.name == ("foo", "one"):
raise TypeError
else:
return ser.sum()
with tm.assert_produces_warning(FutureWarning, match="Dropping invalid columns"):
agged2 = df.groupby(keys).aggregate(aggfun)
assert len(agged2.columns) + 1 == len(df.columns)
def test_groupby_level_apply(mframe):
result = mframe.groupby(level=0).count()
assert result.index.name == "first"
result = mframe.groupby(level=1).count()
assert result.index.name == "second"
result = mframe["A"].groupby(level=0).count()
assert result.index.name == "first"
def test_groupby_level_mapper(mframe):
deleveled = mframe.reset_index()
mapper0 = {"foo": 0, "bar": 0, "baz": 1, "qux": 1}
mapper1 = {"one": 0, "two": 0, "three": 1}
result0 = mframe.groupby(mapper0, level=0).sum()
result1 = mframe.groupby(mapper1, level=1).sum()
mapped_level0 = np.array([mapper0.get(x) for x in deleveled["first"]])
mapped_level1 = np.array([mapper1.get(x) for x in deleveled["second"]])
expected0 = mframe.groupby(mapped_level0).sum()
expected1 = mframe.groupby(mapped_level1).sum()
expected0.index.name, expected1.index.name = "first", "second"
tm.assert_frame_equal(result0, expected0)
tm.assert_frame_equal(result1, expected1)
def test_groupby_level_nonmulti():
# GH 1313, GH 13901
s = Series([1, 2, 3, 10, 4, 5, 20, 6], Index([1, 2, 3, 1, 4, 5, 2, 6], name="foo"))
expected = Series([11, 22, 3, 4, 5, 6], Index(range(1, 7), name="foo"))
result = s.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[0]).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=-1).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[-1]).sum()
tm.assert_series_equal(result, expected)
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=1)
with pytest.raises(ValueError, match=msg):
s.groupby(level=-2)
msg = "No group keys passed!"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[])
msg = "multiple levels only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[0, 0])
with pytest.raises(ValueError, match=msg):
s.groupby(level=[0, 1])
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[1])
def test_groupby_complex():
# GH 12902
a = Series(data=np.arange(4) * (1 + 2j), index=[0, 0, 1, 1])
expected = Series((1 + 2j, 5 + 10j))
result = a.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = a.sum(level=0)
tm.assert_series_equal(result, expected)
def test_groupby_complex_numbers():
# GH 17927
df = DataFrame(
[
{"a": 1, "b": 1 + 1j},
{"a": 1, "b": 1 + 2j},
{"a": 4, "b": 1},
]
)
expected = DataFrame(
np.array([1, 1, 1], dtype=np.int64),
index=Index([(1 + 1j), (1 + 2j), (1 + 0j)], name="b"),
columns=Index(["a"], dtype="object"),
)
result = df.groupby("b", sort=False).count()
tm.assert_frame_equal(result, expected)
# Sorted by the magnitude of the complex numbers
expected.index = Index([(1 + 0j), (1 + 1j), (1 + 2j)], name="b")
result = df.groupby("b", sort=True).count()
tm.assert_frame_equal(result, expected)
def test_groupby_series_indexed_differently():
s1 = Series(
[5.0, -9.0, 4.0, 100.0, -5.0, 55.0, 6.7],
index=Index(["a", "b", "c", "d", "e", "f", "g"]),
)
s2 = Series(
[1.0, 1.0, 4.0, 5.0, 5.0, 7.0], index=Index(["a", "b", "d", "f", "g", "h"])
)
grouped = s1.groupby(s2)
agged = grouped.mean()
exp = s1.groupby(s2.reindex(s1.index).get).mean()
tm.assert_series_equal(agged, exp)
def test_groupby_with_hier_columns():
tuples = list(
zip(
*[
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
)
)
index = MultiIndex.from_tuples(tuples)
columns = MultiIndex.from_tuples(
[("A", "cat"), ("B", "dog"), ("B", "cat"), ("A", "dog")]
)
df = DataFrame(np.random.randn(8, 4), index=index, columns=columns)
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).mean()
tm.assert_index_equal(result.index, df.index)
result = df.groupby(level=0).agg(np.mean)
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0).apply(lambda x: x.mean())
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))
tm.assert_index_equal(result.columns, Index(["A", "B"]))
tm.assert_index_equal(result.index, df.index)
# add a nuisance column
sorted_columns, _ = columns.sortlevel(0)
df["A", "foo"] = "bar"
msg = "The default value of numeric_only"
with tm.assert_produces_warning(FutureWarning, match=msg):
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, df.columns[:-1])
def test_grouping_ndarray(df):
grouped = df.groupby(df["A"].values)
msg = "The default value of numeric_only"
with tm.assert_produces_warning(FutureWarning, match=msg):
result = grouped.sum()
expected = df.groupby("A").sum()
tm.assert_frame_equal(
result, expected, check_names=False
) # Note: no names when grouping by value
def test_groupby_wrong_multi_labels():
index = Index([0, 1, 2, 3, 4], name="index")
data = DataFrame(
{
"foo": ["foo1", "foo1", "foo2", "foo1", "foo3"],
"bar": ["bar1", "bar2", "bar2", "bar1", "bar1"],
"baz": ["baz1", "baz1", "baz1", "baz2", "baz2"],
"spam": ["spam2", "spam3", "spam2", "spam1", "spam1"],
"data": [20, 30, 40, 50, 60],
},
index=index,
)
grouped = data.groupby(["foo", "bar", "baz", "spam"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_groupby_series_with_name(df):
msg = "The default value of numeric_only"
with tm.assert_produces_warning(FutureWarning, match=msg):
result = df.groupby(df["A"]).mean()
result2 = df.groupby(df["A"], as_index=False).mean()
assert result.index.name == "A"
assert "A" in result2
result = df.groupby([df["A"], df["B"]]).mean()
result2 = df.groupby([df["A"], df["B"]], as_index=False).mean()
assert result.index.names == ("A", "B")
assert "A" in result2
assert "B" in result2
def test_seriesgroupby_name_attr(df):
# GH 6265
result = df.groupby("A")["C"]
assert result.count().name == "C"
assert result.mean().name == "C"
testFunc = lambda x: np.sum(x) * 2
assert result.agg(testFunc).name == "C"
def test_consistency_name():
# GH 12363
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
expected = df.groupby(["A"]).B.count()
result = df.B.groupby(df.A).count()
tm.assert_series_equal(result, expected)
def test_groupby_name_propagation(df):
# GH 6124
def summarize(df, name=None):
return Series({"count": 1, "mean": 2, "omissions": 3}, name=name)
def summarize_random_name(df):
# Provide a different name for each Series. In this case, groupby
# should not attempt to propagate the Series name since they are
# inconsistent.
return Series({"count": 1, "mean": 2, "omissions": 3}, name=df.iloc[0]["A"])
metrics = df.groupby("A").apply(summarize)
assert metrics.columns.name is None
metrics = df.groupby("A").apply(summarize, "metrics")
assert metrics.columns.name == "metrics"
metrics = df.groupby("A").apply(summarize_random_name)
assert metrics.columns.name is None
def test_groupby_nonstring_columns():
df = DataFrame([np.arange(10) for x in range(10)])
grouped = df.groupby(0)
result = grouped.mean()
expected = df.groupby(df[0]).mean()
tm.assert_frame_equal(result, expected)
def test_groupby_mixed_type_columns():
# GH 13432, unorderable types in py3
df = DataFrame([[0, 1, 2]], columns=["A", "B", 0])
expected = DataFrame([[1, 2]], columns=["B", 0], index=Index([0], name="A"))
result = df.groupby("A").first()
tm.assert_frame_equal(result, expected)
result = df.groupby("A").sum()
tm.assert_frame_equal(result, expected)
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:Mean of:RuntimeWarning")
def test_cython_grouper_series_bug_noncontig():
arr = np.empty((100, 100))
arr.fill(np.nan)
obj = Series(arr[:, 0])
inds = np.tile(range(10), 10)
result = obj.groupby(inds).agg(Series.median)
assert result.isna().all()
def test_series_grouper_noncontig_index():
index = Index(tm.rands_array(10, 100))
values = Series(np.random.randn(50), index=index[::2])
labels = np.random.randint(0, 5, 50)
# it works!
grouped = values.groupby(labels)
# accessing the index elements causes segfault
f = lambda x: len(set(map(id, x.index)))
grouped.agg(f)
def test_convert_objects_leave_decimal_alone():
s = Series(range(5))
labels = np.array(["a", "b", "c", "d", "e"], dtype="O")
def convert_fast(x):
return Decimal(str(x.mean()))
def convert_force_pure(x):
# base will be length 0
assert len(x.values.base) > 0
return Decimal(str(x.mean()))
grouped = s.groupby(labels)
result = grouped.agg(convert_fast)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
result = grouped.agg(convert_force_pure)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_groupby_dtype_inference_empty():
# GH 6733
df = DataFrame({"x": [], "range": np.arange(0, dtype="int64")})
assert df["x"].dtype == np.float64
result = df.groupby("x").first()
exp_index = Index([], name="x", dtype=np.float64)
expected = DataFrame({"range": Series([], index=exp_index, dtype="int64")})
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_groupby_unit64_float_conversion():
# GH: 30859 groupby converts unit64 to floats sometimes
df = DataFrame({"first": [1], "second": [1], "value": [16148277970000000000]})
result = df.groupby(["first", "second"])["value"].max()
expected = Series(
[16148277970000000000],
MultiIndex.from_product([[1], [1]], names=["first", "second"]),
name="value",
)
tm.assert_series_equal(result, expected)
def test_groupby_list_infer_array_like(df):
msg = "The default value of numeric_only"
with tm.assert_produces_warning(FutureWarning, match=msg):
result = df.groupby(list(df["A"])).mean()
expected = df.groupby(df["A"]).mean()
tm.assert_frame_equal(result, expected, check_names=False)
with pytest.raises(KeyError, match=r"^'foo'$"):
df.groupby(list(df["A"][:-1]))
# pathological case of ambiguity
df = DataFrame({"foo": [0, 1], "bar": [3, 4], "val": np.random.randn(2)})
result = df.groupby(["foo", "bar"]).mean()
expected = df.groupby([df["foo"], df["bar"]]).mean()[["val"]]
def test_groupby_keys_same_size_as_index():
# GH 11185
freq = "s"
index = date_range(
start=Timestamp("2015-09-29T11:34:44-0700"), periods=2, freq=freq
)
df = DataFrame([["A", 10], ["B", 15]], columns=["metric", "values"], index=index)
result = df.groupby([Grouper(level=0, freq=freq), "metric"]).mean()
expected = df.set_index([df.index, "metric"]).astype(float)
tm.assert_frame_equal(result, expected)
def test_groupby_one_row():
# GH 11741
msg = r"^'Z'$"
df1 = DataFrame(np.random.randn(1, 4), columns=list("ABCD"))
with pytest.raises(KeyError, match=msg):
df1.groupby("Z")
df2 = DataFrame(np.random.randn(2, 4), columns=list("ABCD"))
with pytest.raises(KeyError, match=msg):
df2.groupby("Z")
def test_groupby_nat_exclude():
# GH 6992
df = DataFrame(
{
"values": np.random.randn(8),
"dt": [
np.nan,
Timestamp("2013-01-01"),
np.nan,
Timestamp("2013-02-01"),
np.nan,
Timestamp("2013-02-01"),
np.nan,
Timestamp("2013-01-01"),
],
"str": [np.nan, "a", np.nan, "a", np.nan, "a", np.nan, "b"],
}
)
grouped = df.groupby("dt")
expected = [Index([1, 7]), Index([3, 5])]
keys = sorted(grouped.groups.keys())
assert len(keys) == 2
for k, e in zip(keys, expected):
# grouped.groups keys are np.datetime64 with system tz
# not to be affected by tz, only compare values
tm.assert_index_equal(grouped.groups[k], e)
# confirm obj is not filtered
tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df)
assert grouped.ngroups == 2
expected = {
Timestamp("2013-01-01 00:00:00"): np.array([1, 7], dtype=np.intp),
Timestamp("2013-02-01 00:00:00"): np.array([3, 5], dtype=np.intp),
}
for k in grouped.indices:
tm.assert_numpy_array_equal(grouped.indices[k], expected[k])
tm.assert_frame_equal(grouped.get_group(Timestamp("2013-01-01")), df.iloc[[1, 7]])
tm.assert_frame_equal(grouped.get_group(Timestamp("2013-02-01")), df.iloc[[3, 5]])
with pytest.raises(KeyError, match=r"^NaT$"):
grouped.get_group(pd.NaT)
nan_df = DataFrame(
{"nan": [np.nan, np.nan, np.nan], "nat": [pd.NaT, pd.NaT, pd.NaT]}
)
assert nan_df["nan"].dtype == "float64"
assert nan_df["nat"].dtype == "datetime64[ns]"
for key in ["nan", "nat"]:
grouped = nan_df.groupby(key)
assert grouped.groups == {}
assert grouped.ngroups == 0
assert grouped.indices == {}
with pytest.raises(KeyError, match=r"^nan$"):
grouped.get_group(np.nan)
with pytest.raises(KeyError, match=r"^NaT$"):
grouped.get_group(pd.NaT)
def test_groupby_two_group_keys_all_nan():
# GH #36842: Grouping over two group keys shouldn't raise an error
df = DataFrame({"a": [np.nan, np.nan], "b": [np.nan, np.nan], "c": [1, 2]})
result = df.groupby(["a", "b"]).indices
assert result == {}
def test_groupby_2d_malformed():
d = DataFrame(index=range(2))
d["group"] = ["g1", "g2"]
d["zeros"] = [0, 0]
d["ones"] = [1, 1]
d["label"] = ["l1", "l2"]
msg = "The default value of numeric_only"
with tm.assert_produces_warning(FutureWarning, match=msg):
tmp = d.groupby(["group"]).mean()
res_values = np.array([[0.0, 1.0], [0.0, 1.0]])
tm.assert_index_equal(tmp.columns, Index(["zeros", "ones"]))
tm.assert_numpy_array_equal(tmp.values, res_values)
def test_int32_overflow():
B = np.concatenate((np.arange(10000), np.arange(10000), np.arange(5000)))
A = np.arange(25000)
df = DataFrame({"A": A, "B": B, "C": A, "D": B, "E": np.random.randn(25000)})
left = df.groupby(["A", "B", "C", "D"]).sum()
right = df.groupby(["D", "C", "B", "A"]).sum()
assert len(left) == len(right)
def test_groupby_sort_multi():
df = DataFrame(
{
"a": ["foo", "bar", "baz"],
"b": [3, 2, 1],
"c": [0, 1, 2],
"d": np.random.randn(3),
}
)
tups = [tuple(row) for row in df[["a", "b", "c"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["a", "b", "c"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]])
tups = [tuple(row) for row in df[["c", "a", "b"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["c", "a", "b"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups)
tups = [tuple(x) for x in df[["b", "c", "a"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["b", "c", "a"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]])
df = DataFrame(
{"a": [0, 1, 2, 0, 1, 2], "b": [0, 0, 0, 1, 1, 1], "d": np.random.randn(6)}
)
grouped = df.groupby(["a", "b"])["d"]
result = grouped.sum()
def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
tups = [tuple(row) for row in df[keys].values]
tups = com.asarray_tuplesafe(tups)
expected = f(df.groupby(tups)[field])
for k, v in expected.items():
assert result[k] == v
_check_groupby(df, result, ["a", "b"], "d")
def test_dont_clobber_name_column():
df = DataFrame(
{"key": ["a", "a", "a", "b", "b", "b"], "name": ["foo", "bar", "baz"] * 2}
)
result = df.groupby("key", group_keys=False).apply(lambda x: x)
tm.assert_frame_equal(result, df)
def test_skip_group_keys():
tsf = tm.makeTimeDataFrame()
grouped = tsf.groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values(by="A")[:3])
pieces = [group.sort_values(by="A")[:3] for key, group in grouped]
expected = pd.concat(pieces)
tm.assert_frame_equal(result, expected)
grouped = tsf["A"].groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values()[:3])
pieces = [group.sort_values()[:3] for key, group in grouped]
expected = pd.concat(pieces)
tm.assert_series_equal(result, expected)
def test_no_nonsense_name(float_frame):
# GH #995
s = float_frame["C"].copy()
s.name = None
result = s.groupby(float_frame["A"]).agg(np.sum)
assert result.name is None
def test_multifunc_sum_bug():
# GH #1065
x = DataFrame(np.arange(9).reshape(3, 3))
x["test"] = 0
x["fl"] = [1.3, 1.5, 1.6]
grouped = x.groupby("test")
result = grouped.agg({"fl": "sum", 2: "size"})
assert result["fl"].dtype == np.float64
def test_handle_dict_return_value(df):
def f(group):
return {"max": group.max(), "min": group.min()}
def g(group):
return Series({"max": group.max(), "min": group.min()})
result = df.groupby("A")["C"].apply(f)
expected = df.groupby("A")["C"].apply(g)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("grouper", ["A", ["A", "B"]])
def test_set_group_name(df, grouper):
def f(group):
assert group.name is not None
return group
def freduce(group):
assert group.name is not None
return group.sum()
def foo(x):
return freduce(x)
grouped = df.groupby(grouper, group_keys=False)
# make sure all these work
grouped.apply(f)
grouped.aggregate(freduce)
grouped.aggregate({"C": freduce, "D": freduce})
grouped.transform(f)
grouped["C"].apply(f)
grouped["C"].aggregate(freduce)
grouped["C"].aggregate([freduce, foo])
grouped["C"].transform(f)
def test_group_name_available_in_inference_pass():
# gh-15062
df = DataFrame({"a": [0, 0, 1, 1, 2, 2], "b": np.arange(6)})
names = []
def f(group):
names.append(group.name)
return group.copy()
df.groupby("a", sort=False, group_keys=False).apply(f)
expected_names = [0, 1, 2]
assert names == expected_names
def test_no_dummy_key_names(df):
# see gh-1291
msg = "The default value of numeric_only"
with tm.assert_produces_warning(FutureWarning, match=msg):
result = df.groupby(df["A"].values).sum()
assert result.index.name is None
with tm.assert_produces_warning(FutureWarning, match=msg):
result = df.groupby([df["A"].values, df["B"].values]).sum()
assert result.index.names == (None, None)
def test_groupby_sort_multiindex_series():
# series multiindex groupby sort argument was not being passed through
# _compress_group_index
# GH 9444
index = MultiIndex(
levels=[[1, 2], [1, 2]],
codes=[[0, 0, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0]],
names=["a", "b"],
)
mseries = Series([0, 1, 2, 3, 4, 5], index=index)
index = MultiIndex(
levels=[[1, 2], [1, 2]], codes=[[0, 0, 1], [1, 0, 0]], names=["a", "b"]
)
mseries_result = Series([0, 2, 4], index=index)
result = mseries.groupby(level=["a", "b"], sort=False).first()
tm.assert_series_equal(result, mseries_result)
result = mseries.groupby(level=["a", "b"], sort=True).first()
tm.assert_series_equal(result, mseries_result.sort_index())
def test_groupby_reindex_inside_function():
periods = 1000
ind = date_range(start="2012/1/1", freq="5min", periods=periods)
df = DataFrame({"high": np.arange(periods), "low": np.arange(periods)}, index=ind)
def agg_before(func, fix=False):
"""
Run an aggregate func on the subset of data.
"""
def _func(data):
d = data.loc[data.index.map(lambda x: x.hour < 11)].dropna()
if fix:
data[data.index[0]]
if len(d) == 0:
return None
return func(d)
return _func
grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))
closure_bad = grouped.agg({"high": agg_before(np.max)})
closure_good = grouped.agg({"high": agg_before(np.max, True)})
tm.assert_frame_equal(closure_bad, closure_good)
def test_groupby_multiindex_missing_pair():
# GH9049
df = DataFrame(
{
"group1": ["a", "a", "a", "b"],
"group2": ["c", "c", "d", "c"],
"value": [1, 1, 1, 5],
}
)
df = df.set_index(["group1", "group2"])
df_grouped = df.groupby(level=["group1", "group2"], sort=True)
res = df_grouped.agg("sum")
idx = MultiIndex.from_tuples(
[("a", "c"), ("a", "d"), ("b", "c")], names=["group1", "group2"]
)
exp = DataFrame([[2], [1], [5]], index=idx, columns=["value"])
tm.assert_frame_equal(res, exp)
def test_groupby_multiindex_not_lexsorted():
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[("a", ""), ("b1", "c1"), ("b2", "c2")], names=["b", "c"]
)
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
assert lexsorted_df.columns._is_lexsorted()
# define the non-lexsorted version
not_lexsorted_df = DataFrame(
columns=["a", "b", "c", "d"], data=[[1, "b1", "c1", 3], [1, "b2", "c2", 4]]
)
not_lexsorted_df = not_lexsorted_df.pivot_table(
index="a", columns=["b", "c"], values="d"
)
not_lexsorted_df = not_lexsorted_df.reset_index()
assert not not_lexsorted_df.columns._is_lexsorted()
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.groupby("a").mean()
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.groupby("a").mean()
tm.assert_frame_equal(expected, result)
# a transforming function should work regardless of sort
# GH 14776
df = DataFrame(
{"x": ["a", "a", "b", "a"], "y": [1, 1, 2, 2], "z": [1, 2, 3, 4]}
).set_index(["x", "y"])
assert not df.index._is_lexsorted()
for level in [0, 1, [0, 1]]:
for sort in [False, True]:
result = df.groupby(level=level, sort=sort, group_keys=False).apply(
DataFrame.drop_duplicates
)
expected = df
tm.assert_frame_equal(expected, result)
result = (
df.sort_index()
.groupby(level=level, sort=sort, group_keys=False)
.apply(DataFrame.drop_duplicates)
)
expected = df.sort_index()
tm.assert_frame_equal(expected, result)
def test_index_label_overlaps_location():
# checking we don't have any label/location confusion in the
# wake of GH5375
df = DataFrame(list("ABCDE"), index=[2, 0, 2, 1, 1])
g = df.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = df.iloc[[1, 3, 4]]
tm.assert_frame_equal(actual, expected)
ser = df[0]
g = ser.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = ser.take([1, 3, 4])
tm.assert_series_equal(actual, expected)
# and again, with a generic Index of floats
df.index = df.index.astype(float)
g = df.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = df.iloc[[1, 3, 4]]
tm.assert_frame_equal(actual, expected)
ser = df[0]
g = ser.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = ser.take([1, 3, 4])
tm.assert_series_equal(actual, expected)
def test_transform_doesnt_clobber_ints():
# GH 7972
n = 6
x = np.arange(n)
df = DataFrame({"a": x // 2, "b": 2.0 * x, "c": 3.0 * x})
df2 = DataFrame({"a": x // 2 * 1.0, "b": 2.0 * x, "c": 3.0 * x})
gb = df.groupby("a")
result = gb.transform("mean")
gb2 = df2.groupby("a")
expected = gb2.transform("mean")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"sort_column",
["ints", "floats", "strings", ["ints", "floats"], ["ints", "strings"]],
)
@pytest.mark.parametrize(
"group_column", ["int_groups", "string_groups", ["int_groups", "string_groups"]]
)
def test_groupby_preserves_sort(sort_column, group_column):
# Test to ensure that groupby always preserves sort order of original
# object. Issue #8588 and #9651
df = DataFrame(
{
"int_groups": [3, 1, 0, 1, 0, 3, 3, 3],
"string_groups": ["z", "a", "z", "a", "a", "g", "g", "g"],
"ints": [8, 7, 4, 5, 2, 9, 1, 1],
"floats": [2.3, 5.3, 6.2, -2.4, 2.2, 1.1, 1.1, 5],
"strings": ["z", "d", "a", "e", "word", "word2", "42", "47"],
}
)
# Try sorting on different types and with different group types
df = df.sort_values(by=sort_column)
g = df.groupby(group_column)
def test_sort(x):
tm.assert_frame_equal(x, x.sort_values(by=sort_column))
g.apply(test_sort)
def test_pivot_table_values_key_error():
# This test is designed to replicate the error in issue #14938
df = DataFrame(
{
"eventDate": date_range(datetime.today(), periods=20, freq="M").tolist(),
"thename": range(0, 20),
}
)
df["year"] = df.set_index("eventDate").index.year
df["month"] = df.set_index("eventDate").index.month
with pytest.raises(KeyError, match="'badname'"):
df.reset_index().pivot_table(
index="year", columns="month", values="badname", aggfunc="count"
)
@pytest.mark.parametrize("columns", ["C", ["C"]])
@pytest.mark.parametrize("keys", [["A"], ["A", "B"]])
@pytest.mark.parametrize(
"values",
[
[True],
[0],
[0.0],
["a"],
Categorical([0]),
[to_datetime(0)],
date_range(0, 1, 1, tz="US/Eastern"),
pd.array([0], dtype="Int64"),
pd.array([0], dtype="Float64"),
pd.array([False], dtype="boolean"),
],
ids=[
"bool",
"int",
"float",
"str",
"cat",
"dt64",
"dt64tz",
"Int64",
"Float64",
"boolean",
],
)
@pytest.mark.parametrize("method", ["attr", "agg", "apply"])
@pytest.mark.parametrize(
"op", ["idxmax", "idxmin", "mad", "min", "max", "sum", "prod", "skew"]
)
@pytest.mark.filterwarnings("ignore:Dropping invalid columns:FutureWarning")
@pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning")
def test_empty_groupby(columns, keys, values, method, op, request, using_array_manager):
# GH8093 & GH26411
override_dtype = None
if (
isinstance(values, Categorical)
and not isinstance(columns, list)
and op in ["sum", "prod", "skew", "mad"]
):
# handled below GH#41291
if using_array_manager and op == "mad":
right_msg = "Cannot interpret 'CategoricalDtype.* as a data type"
msg = "Regex pattern \"'Categorical' does not implement.*" + right_msg
mark = pytest.mark.xfail(raises=AssertionError, match=msg)
request.node.add_marker(mark)
elif (
isinstance(values, Categorical)
and len(keys) == 1
and op in ["idxmax", "idxmin"]
):
mark = pytest.mark.xfail(
raises=ValueError, match="attempt to get arg(min|max) of an empty sequence"
)
request.node.add_marker(mark)
elif (
isinstance(values, Categorical)
and len(keys) == 1
and not isinstance(columns, list)
):
mark = pytest.mark.xfail(
raises=TypeError, match="'Categorical' does not implement"
)
request.node.add_marker(mark)
elif isinstance(values, Categorical) and len(keys) == 1 and op in ["sum", "prod"]:
mark = pytest.mark.xfail(
raises=AssertionError, match="(DataFrame|Series) are different"
)
request.node.add_marker(mark)
elif (
isinstance(values, Categorical)
and len(keys) == 2
and op in ["min", "max", "sum"]
):
mark = pytest.mark.xfail(
raises=AssertionError, match="(DataFrame|Series) are different"
)
request.node.add_marker(mark)
elif (
op == "mad"
and not isinstance(columns, list)
and isinstance(values, pd.DatetimeIndex)
and values.tz is not None
and using_array_manager
):
mark = pytest.mark.xfail(
raises=TypeError,
match=r"Cannot interpret 'datetime64\[ns, US/Eastern\]' as a data type",
)
request.node.add_marker(mark)
elif isinstance(values, BooleanArray) and op in ["sum", "prod"]:
# We expect to get Int64 back for these
override_dtype = "Int64"
if isinstance(values[0], bool) and op in ("prod", "sum"):
# sum/product of bools is an integer
override_dtype = "int64"
df = DataFrame({"A": values, "B": values, "C": values}, columns=list("ABC"))
if hasattr(values, "dtype"):
# check that we did the construction right
assert (df.dtypes == values.dtype).all()
df = df.iloc[:0]
gb = df.groupby(keys, group_keys=False)[columns]
def get_result():
warn = FutureWarning if op == "mad" else None
with tm.assert_produces_warning(
warn, match="The 'mad' method is deprecated", raise_on_extra_warnings=False
):
if method == "attr":
return getattr(gb, op)()
else:
return getattr(gb, method)(op)
if columns == "C":
# i.e. SeriesGroupBy
if op in ["prod", "sum", "skew"]:
# ops that require more than just ordered-ness
if df.dtypes[0].kind == "M":
# GH#41291
# datetime64 -> prod and sum are invalid
if op == "skew":
msg = "does not support reduction 'skew'"
else:
msg = "datetime64 type does not support"
with pytest.raises(TypeError, match=msg):
get_result()
return
if op in ["prod", "sum", "skew", "mad"]:
if isinstance(values, Categorical):
# GH#41291
if op == "mad":
# mad calls mean, which Categorical doesn't implement
msg = "does not support reduction 'mean'"
elif op == "skew":
msg = f"does not support reduction '{op}'"
else:
msg = "category type does not support"
with pytest.raises(TypeError, match=msg):
get_result()
return
else:
# ie. DataFrameGroupBy
if op in ["prod", "sum"]:
# ops that require more than just ordered-ness
if df.dtypes[0].kind == "M":
# GH#41291
# datetime64 -> prod and sum are invalid
result = get_result()
# with numeric_only=True, these are dropped, and we get
# an empty DataFrame back
expected = df.set_index(keys)[[]]
tm.assert_equal(result, expected)
return
elif isinstance(values, Categorical):
# GH#41291
# Categorical doesn't implement sum or prod
result = get_result()
# with numeric_only=True, these are dropped, and we get
# an empty DataFrame back
expected = df.set_index(keys)[[]]
if len(keys) != 1 and op == "prod":
# TODO: why just prod and not sum?
# Categorical is special without 'observed=True'
lev = Categorical([0], dtype=values.dtype)
mi = MultiIndex.from_product([lev, lev], names=["A", "B"])
expected = DataFrame([], columns=[], index=mi)
tm.assert_equal(result, expected)
return
elif df.dtypes[0] == object:
# FIXME: the test is actually wrong here, xref #41341
result = get_result()
# In this case we have list-of-list, will raise TypeError,
# and subsequently be dropped as nuisance columns
expected = df.set_index(keys)[[]]
tm.assert_equal(result, expected)
return
if (
op in ["mad", "min", "max", "skew"]
and isinstance(values, Categorical)
and len(keys) == 1
):
# Categorical doesn't implement, so with numeric_only=True
# these are dropped and we get an empty DataFrame back
result = get_result()
expected = df.set_index(keys)[[]]
# with numeric_only=True, these are dropped, and we get
# an empty DataFrame back
if len(keys) != 1:
# Categorical is special without 'observed=True'
lev = Categorical([0], dtype=values.dtype)
mi = MultiIndex.from_product([lev, lev], names=keys)
expected = DataFrame([], columns=[], index=mi)
else:
# all columns are dropped, but we end up with one row
# Categorical is special without 'observed=True'
lev = Categorical([0], dtype=values.dtype)
ci = Index(lev, name=keys[0])
expected = DataFrame([], columns=[], index=ci)
# expected = df.set_index(keys)[columns]
tm.assert_equal(result, expected)
return
result = get_result()
expected = df.set_index(keys)[columns]
if override_dtype is not None:
expected = expected.astype(override_dtype)
if len(keys) == 1:
expected.index.name = keys[0]
tm.assert_equal(result, expected)
def test_empty_groupby_apply_nonunique_columns():
# GH#44417
df = DataFrame(np.random.randn(0, 4))
df[3] = df[3].astype(np.int64)
df.columns = [0, 1, 2, 0]
gb = df.groupby(df[1], group_keys=False)
res = gb.apply(lambda x: x)
assert (res.dtypes == df.dtypes).all()
def test_tuple_as_grouping():
# https://github.com/pandas-dev/pandas/issues/18314
df = DataFrame(
{
("a", "b"): [1, 1, 1, 1],
"a": [2, 2, 2, 2],
"b": [2, 2, 2, 2],
"c": [1, 1, 1, 1],
}
)
with pytest.raises(KeyError, match=r"('a', 'b')"):
df[["a", "b", "c"]].groupby(("a", "b"))
result = df.groupby(("a", "b"))["c"].sum()
expected = Series([4], name="c", index=Index([1], name=("a", "b")))
tm.assert_series_equal(result, expected)
def test_tuple_correct_keyerror():
# https://github.com/pandas-dev/pandas/issues/18798
df = DataFrame(1, index=range(3), columns=MultiIndex.from_product([[1, 2], [3, 4]]))
with pytest.raises(KeyError, match=r"^\(7, 8\)$"):
df.groupby((7, 8)).mean()
def test_groupby_agg_ohlc_non_first():
# GH 21716
df = DataFrame(
[[1], [1]],
columns=Index(["foo"], name="mycols"),
index=date_range("2018-01-01", periods=2, freq="D", name="dti"),
)
expected = DataFrame(
[[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]],
columns=MultiIndex.from_tuples(
(
("foo", "sum", "foo"),
("foo", "ohlc", "open"),
("foo", "ohlc", "high"),
("foo", "ohlc", "low"),
("foo", "ohlc", "close"),
),
names=["mycols", None, None],
),
index=date_range("2018-01-01", periods=2, freq="D", name="dti"),
)
result = df.groupby(Grouper(freq="D")).agg(["sum", "ohlc"])
tm.assert_frame_equal(result, expected)
def test_groupby_multiindex_nat():
# GH 9236
values = [
(pd.NaT, "a"),
(datetime(2012, 1, 2), "a"),
(datetime(2012, 1, 2), "b"),
(datetime(2012, 1, 3), "a"),
]
mi = MultiIndex.from_tuples(values, names=["date", None])
ser = Series([3, 2, 2.5, 4], index=mi)
result = ser.groupby(level=1).mean()
expected = Series([3.0, 2.5], index=["a", "b"])
tm.assert_series_equal(result, expected)
def test_groupby_empty_list_raises():
# GH 5289
values = zip(range(10), range(10))
df = DataFrame(values, columns=["apple", "b"])
msg = "Grouper and axis must be same length"
with pytest.raises(ValueError, match=msg):
df.groupby([[]])
def test_groupby_multiindex_series_keys_len_equal_group_axis():
# GH 25704
index_array = [["x", "x"], ["a", "b"], ["k", "k"]]
index_names = ["first", "second", "third"]
ri = MultiIndex.from_arrays(index_array, names=index_names)
s = Series(data=[1, 2], index=ri)
result = s.groupby(["first", "third"]).sum()
index_array = [["x"], ["k"]]
index_names = ["first", "third"]
ei = MultiIndex.from_arrays(index_array, names=index_names)
expected = Series([3], index=ei)
tm.assert_series_equal(result, expected)
def test_groupby_groups_in_BaseGrouper():
# GH 26326
# Test if DataFrame grouped with a pandas.Grouper has correct groups
mi = MultiIndex.from_product([["A", "B"], ["C", "D"]], names=["alpha", "beta"])
df = DataFrame({"foo": [1, 2, 1, 2], "bar": [1, 2, 3, 4]}, index=mi)
result = df.groupby([Grouper(level="alpha"), "beta"])
expected = df.groupby(["alpha", "beta"])
assert result.groups == expected.groups
result = df.groupby(["beta", Grouper(level="alpha")])
expected = df.groupby(["beta", "alpha"])
assert result.groups == expected.groups
@pytest.mark.parametrize("group_name", ["x", ["x"]])
def test_groupby_axis_1(group_name):
# GH 27614
df = DataFrame(
np.arange(12).reshape(3, 4), index=[0, 1, 0], columns=[10, 20, 10, 20]
)
df.index.name = "y"
df.columns.name = "x"
results = df.groupby(group_name, axis=1).sum()
expected = df.T.groupby(group_name).sum().T
tm.assert_frame_equal(results, expected)
# test on MI column
iterables = [["bar", "baz", "foo"], ["one", "two"]]
mi = MultiIndex.from_product(iterables=iterables, names=["x", "x1"])
df = DataFrame(np.arange(18).reshape(3, 6), index=[0, 1, 0], columns=mi)
results = df.groupby(group_name, axis=1).sum()
expected = df.T.groupby(group_name).sum().T
tm.assert_frame_equal(results, expected)
@pytest.mark.parametrize(
"op, expected",
[
(
"shift",
{
"time": [
None,
None,
Timestamp("2019-01-01 12:00:00"),
Timestamp("2019-01-01 12:30:00"),
None,
None,
]
},
),
(
"bfill",
{
"time": [
Timestamp("2019-01-01 12:00:00"),
Timestamp("2019-01-01 12:30:00"),
Timestamp("2019-01-01 14:00:00"),
Timestamp("2019-01-01 14:30:00"),
Timestamp("2019-01-01 14:00:00"),
Timestamp("2019-01-01 14:30:00"),
]
},
),
(
"ffill",
{
"time": [
Timestamp("2019-01-01 12:00:00"),
Timestamp("2019-01-01 12:30:00"),
Timestamp("2019-01-01 12:00:00"),
Timestamp("2019-01-01 12:30:00"),
Timestamp("2019-01-01 14:00:00"),
Timestamp("2019-01-01 14:30:00"),
]
},
),
],
)
def test_shift_bfill_ffill_tz(tz_naive_fixture, op, expected):
# GH19995, GH27992: Check that timezone does not drop in shift, bfill, and ffill
tz = tz_naive_fixture
data = {
"id": ["A", "B", "A", "B", "A", "B"],
"time": [
Timestamp("2019-01-01 12:00:00"),
Timestamp("2019-01-01 12:30:00"),
None,
None,
Timestamp("2019-01-01 14:00:00"),
Timestamp("2019-01-01 14:30:00"),
],
}
df = DataFrame(data).assign(time=lambda x: x.time.dt.tz_localize(tz))
grouped = df.groupby("id")
result = getattr(grouped, op)()
expected = DataFrame(expected).assign(time=lambda x: x.time.dt.tz_localize(tz))
tm.assert_frame_equal(result, expected)
def test_groupby_only_none_group():
# see GH21624
# this was crashing with "ValueError: Length of passed values is 1, index implies 0"
df = DataFrame({"g": [None], "x": 1})
actual = df.groupby("g")["x"].transform("sum")
expected = Series([np.nan], name="x")
tm.assert_series_equal(actual, expected)
def test_groupby_duplicate_index():
# GH#29189 the groupby call here used to raise
ser = Series([2, 5, 6, 8], index=[2.0, 4.0, 4.0, 5.0])
gb = ser.groupby(level=0)
result = gb.mean()
expected = Series([2, 5.5, 8], index=[2.0, 4.0, 5.0])
tm.assert_series_equal(result, expected)
@pytest.mark.filterwarnings("ignore:.*is deprecated.*:FutureWarning")
def test_group_on_empty_multiindex(transformation_func, request):
# GH 47787
# With one row, those are transforms so the schema should be the same
if transformation_func == "tshift":
mark = pytest.mark.xfail(raises=NotImplementedError)
request.node.add_marker(mark)
df = DataFrame(
data=[[1, Timestamp("today"), 3, 4]],
columns=["col_1", "col_2", "col_3", "col_4"],
)
df["col_3"] = df["col_3"].astype(int)
df["col_4"] = df["col_4"].astype(int)
df = df.set_index(["col_1", "col_2"])
if transformation_func == "fillna":
args = ("ffill",)
elif transformation_func == "tshift":
args = (1, "D")
else:
args = ()
result = df.iloc[:0].groupby(["col_1"]).transform(transformation_func, *args)
expected = df.groupby(["col_1"]).transform(transformation_func, *args).iloc[:0]
if transformation_func in ("diff", "shift"):
expected = expected.astype(int)
tm.assert_equal(result, expected)
result = (
df["col_3"].iloc[:0].groupby(["col_1"]).transform(transformation_func, *args)
)
expected = (
df["col_3"].groupby(["col_1"]).transform(transformation_func, *args).iloc[:0]
)
if transformation_func in ("diff", "shift"):
expected = expected.astype(int)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"idx",
[
Index(["a", "a"], name="foo"),
MultiIndex.from_tuples((("a", "a"), ("a", "a")), names=["foo", "bar"]),
],
)
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
def test_dup_labels_output_shape(groupby_func, idx):
if groupby_func in {"size", "ngroup", "cumcount"}:
pytest.skip(f"Not applicable for {groupby_func}")
# TODO(2.0) Remove after pad/backfill deprecation enforced
groupby_func = maybe_normalize_deprecated_kernels(groupby_func)
warn = FutureWarning if groupby_func in ("mad", "tshift") else None
df = DataFrame([[1, 1]], columns=idx)
grp_by = df.groupby([0])
if groupby_func == "tshift":
df.index = [Timestamp("today")]
# args.extend([1, "D"])
args = get_groupby_method_args(groupby_func, df)
with tm.assert_produces_warning(warn, match="is deprecated"):
result = getattr(grp_by, groupby_func)(*args)
assert result.shape == (1, 2)
tm.assert_index_equal(result.columns, idx)
def test_groupby_crash_on_nunique(axis):
# Fix following 30253
dti = date_range("2016-01-01", periods=2, name="foo")
df = DataFrame({("A", "B"): [1, 2], ("A", "C"): [1, 3], ("D", "B"): [0, 0]})
df.columns.names = ("bar", "baz")
df.index = dti
axis_number = df._get_axis_number(axis)
if not axis_number:
df = df.T
gb = df.groupby(axis=axis_number, level=0)
result = gb.nunique()
expected = DataFrame({"A": [1, 2], "D": [1, 1]}, index=dti)
expected.columns.name = "bar"
if not axis_number:
expected = expected.T
tm.assert_frame_equal(result, expected)
if axis_number == 0:
# same thing, but empty columns
gb2 = df[[]].groupby(axis=axis_number, level=0)
exp = expected[[]]
else:
# same thing, but empty rows
gb2 = df.loc[[]].groupby(axis=axis_number, level=0)
# default for empty when we can't infer a dtype is float64
exp = expected.loc[[]].astype(np.float64)
res = gb2.nunique()
tm.assert_frame_equal(res, exp)
def test_groupby_list_level():
# GH 9790
expected = DataFrame(np.arange(0, 9).reshape(3, 3), dtype=float)
result = expected.groupby(level=[0]).mean()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"max_seq_items, expected",
[
(5, "{0: [0], 1: [1], 2: [2], 3: [3], 4: [4]}"),
(4, "{0: [0], 1: [1], 2: [2], 3: [3], ...}"),
(1, "{0: [0], ...}"),
],
)
def test_groups_repr_truncates(max_seq_items, expected):
# GH 1135
df = DataFrame(np.random.randn(5, 1))
df["a"] = df.index
with pd.option_context("display.max_seq_items", max_seq_items):
result = df.groupby("a").groups.__repr__()
assert result == expected
result = df.groupby(np.array(df.a)).groups.__repr__()
assert result == expected
def test_group_on_two_row_multiindex_returns_one_tuple_key():
# GH 18451
df = DataFrame([{"a": 1, "b": 2, "c": 99}, {"a": 1, "b": 2, "c": 88}])
df = df.set_index(["a", "b"])
grp = df.groupby(["a", "b"])
result = grp.indices
expected = {(1, 2): np.array([0, 1], dtype=np.int64)}
assert len(result) == 1
key = (1, 2)
assert (result[key] == expected[key]).all()
@pytest.mark.parametrize(
"klass, attr, value",
[
(DataFrame, "level", "a"),
(DataFrame, "as_index", False),
(DataFrame, "sort", False),
(DataFrame, "group_keys", False),
(DataFrame, "squeeze", True),
(DataFrame, "observed", True),
(DataFrame, "dropna", False),
pytest.param(
Series,
"axis",
1,
marks=pytest.mark.xfail(
reason="GH 35443: Attribute currently not passed on to series"
),
),
(Series, "level", "a"),
(Series, "as_index", False),
(Series, "sort", False),
(Series, "group_keys", False),
(Series, "squeeze", True),
(Series, "observed", True),
(Series, "dropna", False),
],
)
@pytest.mark.filterwarnings(
"ignore:The `squeeze` parameter is deprecated:FutureWarning"
)
def test_subsetting_columns_keeps_attrs(klass, attr, value):
# GH 9959 - When subsetting columns, don't drop attributes
df = DataFrame({"a": [1], "b": [2], "c": [3]})
if attr != "axis":
df = df.set_index("a")
expected = df.groupby("a", **{attr: value})
result = expected[["b"]] if klass is DataFrame else expected["b"]
assert getattr(result, attr) == getattr(expected, attr)
def test_subsetting_columns_axis_1():
# GH 37725
g = DataFrame({"A": [1], "B": [2], "C": [3]}).groupby([0, 0, 1], axis=1)
match = "Cannot subset columns when using axis=1"
with pytest.raises(ValueError, match=match):
g[["A", "B"]].sum()
@pytest.mark.parametrize("func", ["sum", "any", "shift"])
def test_groupby_column_index_name_lost(func):
# GH: 29764 groupby loses index sometimes
expected = Index(["a"], name="idx")
df = DataFrame([[1]], columns=expected)
df_grouped = df.groupby([1])
result = getattr(df_grouped, func)().columns
tm.assert_index_equal(result, expected)
def test_groupby_duplicate_columns():
# GH: 31735
df = DataFrame(
{"A": ["f", "e", "g", "h"], "B": ["a", "b", "c", "d"], "C": [1, 2, 3, 4]}
).astype(object)
df.columns = ["A", "B", "B"]
result = df.groupby([0, 0, 0, 0]).min()
expected = DataFrame([["e", "a", 1]], columns=["A", "B", "B"])
tm.assert_frame_equal(result, expected)
def test_groupby_series_with_tuple_name():
# GH 37755
ser = Series([1, 2, 3, 4], index=[1, 1, 2, 2], name=("a", "a"))
ser.index.name = ("b", "b")
result = ser.groupby(level=0).last()
expected = Series([2, 4], index=[1, 2], name=("a", "a"))
expected.index.name = ("b", "b")
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(not IS64, reason="GH#38778: fail on 32-bit system")
@pytest.mark.parametrize(
"func, values", [("sum", [97.0, 98.0]), ("mean", [24.25, 24.5])]
)
def test_groupby_numerical_stability_sum_mean(func, values):
# GH#38778
data = [1e16, 1e16, 97, 98, -5e15, -5e15, -5e15, -5e15]
df = DataFrame({"group": [1, 2] * 4, "a": data, "b": data})
result = getattr(df.groupby("group"), func)()
expected = DataFrame({"a": values, "b": values}, index=Index([1, 2], name="group"))
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(not IS64, reason="GH#38778: fail on 32-bit system")
def test_groupby_numerical_stability_cumsum():
# GH#38934
data = [1e16, 1e16, 97, 98, -5e15, -5e15, -5e15, -5e15]
df = DataFrame({"group": [1, 2] * 4, "a": data, "b": data})
result = df.groupby("group").cumsum()
exp_data = (
[1e16] * 2 + [1e16 + 96, 1e16 + 98] + [5e15 + 97, 5e15 + 98] + [97.0, 98.0]
)
expected = DataFrame({"a": exp_data, "b": exp_data})
tm.assert_frame_equal(result, expected, check_exact=True)
def test_groupby_cumsum_skipna_false():
# GH#46216 don't propagate np.nan above the diagonal
arr = np.random.randn(5, 5)
df = DataFrame(arr)
for i in range(5):
df.iloc[i, i] = np.nan
df["A"] = 1
gb = df.groupby("A")
res = gb.cumsum(skipna=False)
expected = df[[0, 1, 2, 3, 4]].cumsum(skipna=False)
tm.assert_frame_equal(res, expected)
def test_groupby_cumsum_timedelta64():
# GH#46216 don't ignore is_datetimelike in libgroupby.group_cumsum
dti = date_range("2016-01-01", periods=5)
ser = Series(dti) - dti[0]
ser[2] = pd.NaT
df = DataFrame({"A": 1, "B": ser})
gb = df.groupby("A")
res = gb.cumsum(numeric_only=False, skipna=True)
exp = DataFrame({"B": [ser[0], ser[1], pd.NaT, ser[4], ser[4] * 2]})
tm.assert_frame_equal(res, exp)
res = gb.cumsum(numeric_only=False, skipna=False)
exp = DataFrame({"B": [ser[0], ser[1], pd.NaT, pd.NaT, pd.NaT]})
tm.assert_frame_equal(res, exp)
def test_groupby_mean_duplicate_index(rand_series_with_duplicate_datetimeindex):
dups = rand_series_with_duplicate_datetimeindex
result = dups.groupby(level=0).mean()
expected = dups.groupby(dups.index).mean()
tm.assert_series_equal(result, expected)
def test_groupby_all_nan_groups_drop():
# GH 15036
s = Series([1, 2, 3], [np.nan, np.nan, np.nan])
result = s.groupby(s.index).sum()
expected = Series([], index=Index([], dtype=np.float64), dtype=np.int64)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numeric_only", [True, False])
def test_groupby_empty_multi_column(as_index, numeric_only):
# GH 15106 & GH 41998
df = DataFrame(data=[], columns=["A", "B", "C"])
gb = df.groupby(["A", "B"], as_index=as_index)
result = gb.sum(numeric_only=numeric_only)
if as_index:
index = MultiIndex([[], []], [[], []], names=["A", "B"])
columns = ["C"] if not numeric_only else []
else:
index = RangeIndex(0)
columns = ["A", "B", "C"] if not numeric_only else ["A", "B"]
expected = DataFrame([], columns=columns, index=index)
tm.assert_frame_equal(result, expected)
def test_groupby_aggregation_non_numeric_dtype():
# GH #43108
df = DataFrame(
[["M", [1]], ["M", [1]], ["W", [10]], ["W", [20]]], columns=["MW", "v"]
)
expected = DataFrame(
{
"v": [[1, 1], [10, 20]],
},
index=Index(["M", "W"], dtype="object", name="MW"),
)
gb = df.groupby(by=["MW"])
result = gb.sum()
tm.assert_frame_equal(result, expected)
def test_groupby_aggregation_multi_non_numeric_dtype():
# GH #42395
df = DataFrame(
{
"x": [1, 0, 1, 1, 0],
"y": [Timedelta(i, "days") for i in range(1, 6)],
"z": [Timedelta(i * 10, "days") for i in range(1, 6)],
}
)
expected = DataFrame(
{
"y": [Timedelta(i, "days") for i in range(7, 9)],
"z": [Timedelta(i * 10, "days") for i in range(7, 9)],
},
index=Index([0, 1], dtype="int64", name="x"),
)
gb = df.groupby(by=["x"])
result = gb.sum()
tm.assert_frame_equal(result, expected)
def test_groupby_aggregation_numeric_with_non_numeric_dtype():
# GH #43108
df = DataFrame(
{
"x": [1, 0, 1, 1, 0],
"y": [Timedelta(i, "days") for i in range(1, 6)],
"z": list(range(1, 6)),
}
)
expected = DataFrame(
{"z": [7, 8]},
index=Index([0, 1], dtype="int64", name="x"),
)
gb = df.groupby(by=["x"])
msg = "The default value of numeric_only"
with tm.assert_produces_warning(FutureWarning, match=msg):
result = gb.sum()
tm.assert_frame_equal(result, expected)
def test_groupby_filtered_df_std():
# GH 16174
dicts = [
{"filter_col": False, "groupby_col": True, "bool_col": True, "float_col": 10.5},
{"filter_col": True, "groupby_col": True, "bool_col": True, "float_col": 20.5},
{"filter_col": True, "groupby_col": True, "bool_col": True, "float_col": 30.5},
]
df = DataFrame(dicts)
df_filter = df[df["filter_col"] == True] # noqa:E712
dfgb = df_filter.groupby("groupby_col")
result = dfgb.std()
expected = DataFrame(
[[0.0, 0.0, 7.071068]],
columns=["filter_col", "bool_col", "float_col"],
index=Index([True], name="groupby_col"),
)
tm.assert_frame_equal(result, expected)
def test_datetime_categorical_multikey_groupby_indices():
# GH 26859
df = DataFrame(
{
"a": Series(list("abc")),
"b": Series(
to_datetime(["2018-01-01", "2018-02-01", "2018-03-01"]),
dtype="category",
),
"c": Categorical.from_codes([-1, 0, 1], categories=[0, 1]),
}
)
result = df.groupby(["a", "b"]).indices
expected = {
("a", Timestamp("2018-01-01 00:00:00")): np.array([0]),
("b", Timestamp("2018-02-01 00:00:00")): np.array([1]),
("c", Timestamp("2018-03-01 00:00:00")): np.array([2]),
}
assert result == expected
def test_rolling_wrong_param_min_period():
# GH34037
name_l = ["Alice"] * 5 + ["Bob"] * 5
val_l = [np.nan, np.nan, 1, 2, 3] + [np.nan, 1, 2, 3, 4]
test_df = DataFrame([name_l, val_l]).T
test_df.columns = ["name", "val"]
result_error_msg = r"__init__\(\) got an unexpected keyword argument 'min_period'"
with pytest.raises(TypeError, match=result_error_msg):
test_df.groupby("name")["val"].rolling(window=2, min_period=1).sum()
def test_pad_backfill_deprecation():
# GH 33396
s = Series([1, 2, 3])
with tm.assert_produces_warning(FutureWarning, match="backfill"):
s.groupby(level=0).backfill()
with tm.assert_produces_warning(FutureWarning, match="pad"):
s.groupby(level=0).pad()
def test_by_column_values_with_same_starting_value():
# GH29635
df = DataFrame(
{
"Name": ["Thomas", "Thomas", "Thomas John"],
"Credit": [1200, 1300, 900],
"Mood": ["sad", "happy", "happy"],
}
)
aggregate_details = {"Mood": Series.mode, "Credit": "sum"}
result = df.groupby(["Name"]).agg(aggregate_details)
expected_result = DataFrame(
{
"Mood": [["happy", "sad"], "happy"],
"Credit": [2500, 900],
"Name": ["Thomas", "Thomas John"],
}
).set_index("Name")
tm.assert_frame_equal(result, expected_result)
def test_groupby_none_in_first_mi_level():
# GH#47348
arr = [[None, 1, 0, 1], [2, 3, 2, 3]]
ser = Series(1, index=MultiIndex.from_arrays(arr, names=["a", "b"]))
result = ser.groupby(level=[0, 1]).sum()
expected = Series(
[1, 2], MultiIndex.from_tuples([(0.0, 2), (1.0, 3)], names=["a", "b"])
)
tm.assert_series_equal(result, expected)
def test_groupby_none_column_name():
# GH#47348
df = DataFrame({None: [1, 1, 2, 2], "b": [1, 1, 2, 3], "c": [4, 5, 6, 7]})
result = df.groupby(by=[None]).sum()
expected = DataFrame({"b": [2, 5], "c": [9, 13]}, index=Index([1, 2], name=None))
tm.assert_frame_equal(result, expected)
def test_single_element_list_grouping():
# GH 42795
df = DataFrame(
{"a": [np.nan, 1], "b": [np.nan, 5], "c": [np.nan, 2]}, index=["x", "y"]
)
msg = (
"In a future version of pandas, a length 1 "
"tuple will be returned when iterating over "
"a groupby with a grouper equal to a list of "
"length 1. Don't supply a list with a single grouper "
"to avoid this warning."
)
with tm.assert_produces_warning(FutureWarning, match=msg):
values, _ = next(iter(df.groupby(["a"])))
@pytest.mark.parametrize("func", ["sum", "cumsum"])
def test_groupby_sum_avoid_casting_to_float(func):
# GH#37493
val = 922337203685477580
df = DataFrame({"a": 1, "b": [val]})
result = getattr(df.groupby("a"), func)() - val
expected = DataFrame({"b": [0]}, index=Index([1], name="a"))
if func == "cumsum":
expected = expected.reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_groupby_sum_support_mask(any_numeric_ea_dtype):
# GH#37493
df = DataFrame({"a": 1, "b": [1, 2, pd.NA]}, dtype=any_numeric_ea_dtype)
result = df.groupby("a").sum()
expected = DataFrame(
{"b": [3]},
index=Index([1], name="a", dtype=any_numeric_ea_dtype),
dtype=any_numeric_ea_dtype,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("val, dtype", [(111, "int"), (222, "uint")])
def test_groupby_overflow(val, dtype):
# GH#37493
df = DataFrame({"a": 1, "b": [val, val]}, dtype=f"{dtype}8")
result = df.groupby("a").sum()
expected = DataFrame(
{"b": [val * 2]},
index=Index([1], name="a", dtype=f"{dtype}64"),
dtype=f"{dtype}64",
)
tm.assert_frame_equal(result, expected)
result = df.groupby("a").cumsum()
expected = DataFrame({"b": [val, val * 2]}, dtype=f"{dtype}64")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("skipna, val", [(True, 3), (False, pd.NA)])
def test_groupby_cumsum_mask(any_numeric_ea_dtype, skipna, val):
# GH#37493
df = DataFrame({"a": 1, "b": [1, pd.NA, 2]}, dtype=any_numeric_ea_dtype)
result = df.groupby("a").cumsum(skipna=skipna)
expected = DataFrame(
{"b": [1, pd.NA, val]},
dtype=any_numeric_ea_dtype,
)
tm.assert_frame_equal(result, expected)
| {
"content_hash": "66a1fd499c420435e0dfb8c17ad7548e",
"timestamp": "",
"source": "github",
"line_count": 2900,
"max_line_length": 88,
"avg_line_length": 32.072758620689655,
"alnum_prop": 0.5703518938620163,
"repo_name": "datapythonista/pandas",
"id": "1af94434ca1fa37af34d078edbddb5bc0e56c012",
"size": "93012",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pandas/tests/groupby/test_groupby.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C",
"bytes": "355524"
},
{
"name": "CSS",
"bytes": "1662"
},
{
"name": "Cython",
"bytes": "1178139"
},
{
"name": "Dockerfile",
"bytes": "1933"
},
{
"name": "HTML",
"bytes": "456449"
},
{
"name": "Makefile",
"bytes": "505"
},
{
"name": "Python",
"bytes": "19048364"
},
{
"name": "Shell",
"bytes": "10511"
},
{
"name": "Smarty",
"bytes": "8486"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
} |
import random
import tensorflow as tf
from rqsa.agent import Agent
from rqsa.environment import GymEnvironment, SimpleGymEnvironment
from config import get_config
flags = tf.app.flags
# Model
flags.DEFINE_string('model', 'm1', 'Type of model')
flags.DEFINE_boolean('dueling', False, 'Whether to use dueling deep q-network')
flags.DEFINE_boolean('double_q', False, 'Whether to use double q-learning')
# Environment
flags.DEFINE_string('env_name', 'Breakout-v0', 'The name of gym environment to use')
flags.DEFINE_integer('action_repeat', 4, 'The number of action to be repeated')
# Etc
flags.DEFINE_boolean('use_gpu', True, 'Whether to use gpu or not')
flags.DEFINE_string('gpu_fraction', '1/1', 'idx / # of gpu fraction e.g. 1/3, 2/3, 3/3')
flags.DEFINE_boolean('display', False, 'Whether to do display the game screen or not')
flags.DEFINE_boolean('is_train', True, 'Whether to do training or testing')
flags.DEFINE_integer('random_seed', 123, 'Value of random seed')
FLAGS = flags.FLAGS
# Set random seed
tf.set_random_seed(FLAGS.random_seed)
random.seed(FLAGS.random_seed)
if FLAGS.gpu_fraction == '':
raise ValueError("--gpu_fraction should be defined")
def calc_gpu_fraction(fraction_string):
idx, num = fraction_string.split('/')
idx, num = float(idx), float(num)
fraction = 1 / (num - idx + 1)
print " [*] GPU : %.4f" % fraction
return fraction
def main(_):
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=calc_gpu_fraction(FLAGS.gpu_fraction))
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
config = get_config(FLAGS) or FLAGS
if config.env_type == 'simple':
env = SimpleGymEnvironment(config)
else:
env = GymEnvironment(config)
if not FLAGS.use_gpu:
config.cnn_format = 'NHWC'
agent = Agent(config, env, sess)
if FLAGS.is_train:
agent.train()
else:
agent.play()
if __name__ == '__main__':
tf.app.run()
| {
"content_hash": "1699aba2b9193f1e8f04515df72b5ad5",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 88,
"avg_line_length": 29.53030303030303,
"alnum_prop": 0.694202154951257,
"repo_name": "jonathanmei/rqsa",
"id": "86142d5f1b375dd95db23b5f5ccf39aa5b6321c8",
"size": "1949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "69810"
}
],
"symlink_target": ""
} |
import datetime
from decimal import Decimal
import warnings
try:
import unittest2 as unittest
except ImportError:
import unittest
from agate import Table
from agate.aggregations import *
from agate.data_types import *
from agate.exceptions import *
from agate.warns import NullCalculationWarning
class TestSimpleAggregation(unittest.TestCase):
def setUp(self):
self.rows = (
(1, 2, 'a'),
(2, 3, 'b'),
(None, 4, 'c')
)
self.number_type = Number()
self.text_type = Text()
self.column_names = ['one', 'two', 'three']
self.column_types = [self.number_type, self.number_type, self.text_type]
self.table = Table(self.rows, self.column_names, self.column_types)
def test_summary(self):
summary = Summary('one', Boolean(), lambda c: 2 in c)
self.assertIsInstance(summary.get_aggregate_data_type(None), Boolean)
summary.validate(self.table)
self.assertEqual(summary.run(self.table), True)
def test_has_nulls(self):
has_nulls = HasNulls('one')
self.assertIsInstance(has_nulls.get_aggregate_data_type(None), Boolean)
has_nulls.validate(self.table)
self.assertEqual(has_nulls.run(self.table), True)
def test_any(self):
with self.assertRaises(ValueError):
Any('one').validate(self.table)
Any('one', lambda d: d).validate(self.table)
self.assertIsInstance(Any('one').get_aggregate_data_type(None), Boolean)
self.assertEqual(Any('one', lambda d: d == 2).run(self.table), True)
self.assertEqual(Any('one', lambda d: d == 5).run(self.table), False)
def test_all(self):
with self.assertRaises(ValueError):
All('one').validate(self.table)
All('one', lambda d: d).validate(self.table)
self.assertIsInstance(All('one').get_aggregate_data_type(None), Boolean)
self.assertEqual(All('one', lambda d: d != 5).run(self.table), True)
self.assertEqual(All('one', lambda d: d == 2).run(self.table), False)
def test_count(self):
rows = (
(1, 2, 'a'),
(2, 3, 'b'),
(None, 4, 'c'),
(1, 2, 'a'),
(1, 2, 'a')
)
table = Table(rows, self.column_names, self.column_types)
self.assertIsInstance(Count().get_aggregate_data_type(table), Number)
Count().validate(self.table)
self.assertEqual(Count().run(table), 5)
self.assertEqual(Count().run(table), 5)
def test_count_column(self):
rows = (
(1, 2, 'a'),
(2, 3, 'b'),
(None, 4, 'c'),
(1, 2, 'a'),
(1, 2, 'a')
)
table = Table(rows, self.column_names, self.column_types)
self.assertIsInstance(Count('one').get_aggregate_data_type(table), Number)
Count('one').validate(self.table)
self.assertEqual(Count('one').run(table), 4)
self.assertEqual(Count('two').run(table), 5)
def test_count_value(self):
rows = (
(1, 2, 'a'),
(2, 3, 'b'),
(None, 4, 'c'),
(1, 2, 'a'),
(1, 2, 'a')
)
table = Table(rows, self.column_names, self.column_types)
self.assertIsInstance(Count('one', 1).get_aggregate_data_type(table), Number)
Count('one', 1).validate(self.table)
self.assertEqual(Count('one', 1).run(table), 3)
self.assertEqual(Count('one', 4).run(table), 0)
self.assertEqual(Count('one', None).run(table), 1)
class TestBooleanAggregation(unittest.TestCase):
def test_any(self):
rows = [
[True],
[False],
[None]
]
table = Table(rows, ['test'], [Boolean()])
Any('test').validate(table)
self.assertEqual(Any('test').run(table), True)
rows = [
[False],
[False],
[None]
]
table = Table(rows, ['test'], [Boolean()])
Any('test').validate(table)
self.assertEqual(Any('test').run(table), False)
def test_all(self):
rows = [
[True],
[True],
[None]
]
table = Table(rows, ['test'], [Boolean()])
All('test').validate(table)
self.assertEqual(All('test').run(table), False)
rows = [
[True],
[True],
[True]
]
table = Table(rows, ['test'], [Boolean()])
All('test').validate(table)
self.assertEqual(All('test').run(table), True)
class TestDateTimeAggregation(unittest.TestCase):
def test_min(self):
rows = [
[datetime.datetime(1994, 3, 3, 6, 31)],
[datetime.datetime(1994, 3, 3, 6, 30, 30)],
[datetime.datetime(1994, 3, 3, 6, 30)],
]
table = Table(rows, ['test'], [DateTime()])
self.assertIsInstance(Min('test').get_aggregate_data_type(table), DateTime)
Min('test').validate(table)
self.assertEqual(Min('test').run(table), datetime.datetime(1994, 3, 3, 6, 30))
def test_max(self):
rows = [
[datetime.datetime(1994, 3, 3, 6, 31)],
[datetime.datetime(1994, 3, 3, 6, 30, 30)],
[datetime.datetime(1994, 3, 3, 6, 30)],
]
table = Table(rows, ['test'], [DateTime()])
self.assertIsInstance(Max('test').get_aggregate_data_type(table), DateTime)
Max('test').validate(table)
self.assertEqual(Max('test').run(table), datetime.datetime(1994, 3, 3, 6, 31))
class TestNumberAggregation(unittest.TestCase):
def setUp(self):
self.rows = (
(Decimal('1.1'), Decimal('2.19'), 'a'),
(Decimal('2.7'), Decimal('3.42'), 'b'),
(None, Decimal('4.1'), 'c'),
(Decimal('2.7'), Decimal('3.42'), 'c')
)
self.number_type = Number()
self.text_type = Text()
self.column_names = ['one', 'two', 'three']
self.column_types = [self.number_type, self.number_type, self.text_type]
self.table = Table(self.rows, self.column_names, self.column_types)
def test_max_precision(self):
with self.assertRaises(DataTypeError):
MaxPrecision('three').validate(self.table)
self.assertIsInstance(MaxPrecision('one').get_aggregate_data_type(self.table), Number)
MaxPrecision('one').validate(self.table)
self.assertEqual(MaxPrecision('one').run(self.table), 1)
self.assertEqual(MaxPrecision('two').run(self.table), 2)
def test_sum(self):
with self.assertRaises(DataTypeError):
Sum('three').validate(self.table)
Sum('one').validate(self.table)
self.assertEqual(Sum('one').run(self.table), Decimal('6.5'))
self.assertEqual(Sum('two').run(self.table), Decimal('13.13'))
def test_min(self):
with self.assertRaises(DataTypeError):
Min('three').validate(self.table)
Min('one').validate(self.table)
self.assertEqual(Min('one').run(self.table), Decimal('1.1'))
self.assertEqual(Min('two').run(self.table), Decimal('2.19'))
def test_max(self):
with self.assertRaises(DataTypeError):
Max('three').validate(self.table)
Max('one').validate(self.table)
self.assertEqual(Max('one').run(self.table), Decimal('2.7'))
self.assertEqual(Max('two').run(self.table), Decimal('4.1'))
def test_mean(self):
warnings.simplefilter('error')
with self.assertRaises(NullCalculationWarning):
Mean('one').validate(self.table)
with self.assertRaises(DataTypeError):
Mean('three').validate(self.table)
Mean('two').validate(self.table)
self.assertEqual(Mean('two').run(self.table), Decimal('3.2825'))
def test_mean_with_nulls(self):
warnings.simplefilter('ignore')
Mean('one').validate(self.table)
self.assertAlmostEqual(Mean('one').run(self.table), Decimal('2.16666666'))
def test_median(self):
warnings.simplefilter('error')
with self.assertRaises(NullCalculationWarning):
Median('one').validate(self.table)
with self.assertRaises(DataTypeError):
Median('three').validate(self.table)
Median('two').validate(self.table)
self.assertIsInstance(Median('two').get_aggregate_data_type(self.table), Number)
self.assertEqual(Median('two').run(self.table), Decimal('3.42'))
def test_mode(self):
warnings.simplefilter('error')
with self.assertRaises(NullCalculationWarning):
Mode('one').validate(self.table)
with self.assertRaises(DataTypeError):
Mode('three').validate(self.table)
Mode('two').validate(self.table)
self.assertIsInstance(Mode('two').get_aggregate_data_type(self.table), Number)
self.assertEqual(Mode('two').run(self.table), Decimal('3.42'))
def test_iqr(self):
warnings.simplefilter('error')
with self.assertRaises(NullCalculationWarning):
IQR('one').validate(self.table)
with self.assertRaises(DataTypeError):
IQR('three').validate(self.table)
IQR('two').validate(self.table)
self.assertIsInstance(IQR('two').get_aggregate_data_type(self.table), Number)
self.assertEqual(IQR('two').run(self.table), Decimal('0.955'))
def test_variance(self):
warnings.simplefilter('error')
with self.assertRaises(NullCalculationWarning):
Variance('one').validate(self.table)
with self.assertRaises(DataTypeError):
Variance('three').validate(self.table)
Variance('two').validate(self.table)
self.assertIsInstance(Variance('two').get_aggregate_data_type(self.table), Number)
self.assertEqual(
Variance('two').run(self.table).quantize(Decimal('0.0001')),
Decimal('0.6332')
)
def test_population_variance(self):
warnings.simplefilter('error')
with self.assertRaises(NullCalculationWarning):
PopulationVariance('one').validate(self.table)
with self.assertRaises(DataTypeError):
PopulationVariance('three').validate(self.table)
PopulationVariance('two').validate(self.table)
self.assertIsInstance(PopulationVariance('two').get_aggregate_data_type(self.table), Number)
self.assertEqual(
PopulationVariance('two').run(self.table).quantize(Decimal('0.0001')),
Decimal('0.4749')
)
def test_stdev(self):
warnings.simplefilter('error')
with self.assertRaises(NullCalculationWarning):
StDev('one').validate(self.table)
with self.assertRaises(DataTypeError):
StDev('three').validate(self.table)
StDev('two').validate(self.table)
self.assertIsInstance(StDev('two').get_aggregate_data_type(self.table), Number)
self.assertAlmostEqual(
StDev('two').run(self.table).quantize(Decimal('0.0001')),
Decimal('0.7958')
)
def test_population_stdev(self):
warnings.simplefilter('error')
with self.assertRaises(NullCalculationWarning):
PopulationStDev('one').validate(self.table)
with self.assertRaises(DataTypeError):
PopulationStDev('three').validate(self.table)
PopulationStDev('two').validate(self.table)
self.assertIsInstance(PopulationStDev('two').get_aggregate_data_type(self.table), Number)
self.assertAlmostEqual(
PopulationStDev('two').run(self.table).quantize(Decimal('0.0001')),
Decimal('0.6891')
)
def test_mad(self):
warnings.simplefilter('error')
with self.assertRaises(NullCalculationWarning):
MAD('one').validate(self.table)
with self.assertRaises(DataTypeError):
MAD('three').validate(self.table)
MAD('two').validate(self.table)
self.assertIsInstance(MAD('two').get_aggregate_data_type(self.table), Number)
self.assertAlmostEqual(MAD('two').run(self.table), Decimal('0'))
def test_percentiles(self):
warnings.simplefilter('error')
with self.assertRaises(NullCalculationWarning):
Percentiles('one').validate(self.table)
with self.assertRaises(DataTypeError):
Percentiles('three').validate(self.table)
Percentiles('two').validate(self.table)
rows = [(n,) for n in range(1, 1001)]
table = Table(rows, ['ints'], [self.number_type])
percentiles = Percentiles('ints').run(table)
self.assertEqual(percentiles[0], Decimal('1'))
self.assertEqual(percentiles[25], Decimal('250.5'))
self.assertEqual(percentiles[50], Decimal('500.5'))
self.assertEqual(percentiles[75], Decimal('750.5'))
self.assertEqual(percentiles[99], Decimal('990.5'))
self.assertEqual(percentiles[100], Decimal('1000'))
def test_percentiles_locate(self):
rows = [(n,) for n in range(1, 1001)]
table = Table(rows, ['ints'], [self.number_type])
percentiles = Percentiles('ints').run(table)
self.assertEqual(percentiles.locate(251), Decimal('25'))
self.assertEqual(percentiles.locate(260), Decimal('25'))
self.assertEqual(percentiles.locate(261), Decimal('26'))
with self.assertRaises(ValueError):
percentiles.locate(0)
with self.assertRaises(ValueError):
percentiles.locate(1012)
def test_quartiles(self):
"""
CDF quartile tests from:
http://www.amstat.org/publications/jse/v14n3/langford.html#Parzen1979
"""
warnings.simplefilter('error')
with self.assertRaises(NullCalculationWarning):
Quartiles('one').validate(self.table)
with self.assertRaises(DataTypeError):
Quartiles('three').validate(self.table)
Quartiles('two').validate(self.table)
# N = 4
rows = [(n,) for n in [1, 2, 3, 4]]
table = Table(rows, ['ints'], [self.number_type])
quartiles = Quartiles('ints').run(table)
for i, v in enumerate(['1', '1.5', '2.5', '3.5', '4']):
self.assertEqual(quartiles[i], Decimal(v))
# N = 5
rows = [(n,) for n in [1, 2, 3, 4, 5]]
table = Table(rows, ['ints'], [self.number_type])
quartiles = Quartiles('ints').run(table)
for i, v in enumerate(['1', '2', '3', '4', '5']):
self.assertEqual(quartiles[i], Decimal(v))
# N = 6
rows = [(n,) for n in [1, 2, 3, 4, 5, 6]]
table = Table(rows, ['ints'], [self.number_type])
quartiles = Quartiles('ints').run(table)
for i, v in enumerate(['1', '2', '3.5', '5', '6']):
self.assertEqual(quartiles[i], Decimal(v))
# N = 7
rows = [(n,) for n in [1, 2, 3, 4, 5, 6, 7]]
table = Table(rows, ['ints'], [self.number_type])
quartiles = Quartiles('ints').run(table)
for i, v in enumerate(['1', '2', '4', '6', '7']):
self.assertEqual(quartiles[i], Decimal(v))
# N = 8 (doubled)
rows = [(n,) for n in [1, 1, 2, 2, 3, 3, 4, 4]]
table = Table(rows, ['ints'], [self.number_type])
quartiles = Quartiles('ints').run(table)
for i, v in enumerate(['1', '1.5', '2.5', '3.5', '4']):
self.assertEqual(quartiles[i], Decimal(v))
# N = 10 (doubled)
rows = [(n,) for n in [1, 1, 2, 2, 3, 3, 4, 4, 5, 5]]
table = Table(rows, ['ints'], [self.number_type])
quartiles = Quartiles('ints').run(table)
for i, v in enumerate(['1', '2', '3', '4', '5']):
self.assertEqual(quartiles[i], Decimal(v))
# N = 12 (doubled)
rows = [(n,) for n in [1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]]
table = Table(rows, ['ints'], [self.number_type])
quartiles = Quartiles('ints').run(table)
for i, v in enumerate(['1', '2', '3.5', '5', '6']):
self.assertEqual(quartiles[i], Decimal(v))
# N = 14 (doubled)
rows = [(n,) for n in [1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7]]
table = Table(rows, ['ints'], [self.number_type])
quartiles = Quartiles('ints').run(table)
for i, v in enumerate(['1', '2', '4', '6', '7']):
self.assertEqual(quartiles[i], Decimal(v))
def test_quartiles_locate(self):
"""
CDF quartile tests from:
http://www.amstat.org/publications/jse/v14n3/langford.html#Parzen1979
"""
# N = 4
rows = [(n,) for n in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]
table = Table(rows, ['ints'], [self.number_type])
quartiles = Quartiles('ints').run(table)
self.assertEqual(quartiles.locate(2), Decimal('0'))
self.assertEqual(quartiles.locate(4), Decimal('1'))
self.assertEqual(quartiles.locate(6), Decimal('2'))
self.assertEqual(quartiles.locate(8), Decimal('3'))
with self.assertRaises(ValueError):
quartiles.locate(0)
with self.assertRaises(ValueError):
quartiles.locate(11)
def test_quintiles(self):
warnings.simplefilter('error')
with self.assertRaises(NullCalculationWarning):
Quintiles('one').validate(self.table)
with self.assertRaises(DataTypeError):
Quintiles('three').validate(self.table)
Quintiles('two').validate(self.table)
rows = [(n,) for n in range(1, 1001)]
table = Table(rows, ['ints'], [self.number_type])
quintiles = Quintiles('ints').run(table) # noqa
def test_deciles(self):
warnings.simplefilter('error')
with self.assertRaises(NullCalculationWarning):
Deciles('one').validate(self.table)
with self.assertRaises(DataTypeError):
Deciles('three').validate(self.table)
Deciles('two').validate(self.table)
rows = [(n,) for n in range(1, 1001)]
table = Table(rows, ['ints'], [self.number_type])
deciles = Deciles('ints').run(table) # noqa
class TestTextAggregation(unittest.TestCase):
def test_max_length(self):
rows = [
['a'],
['gobble'],
['w']
]
table = Table(rows, ['test'], [Text()])
MaxLength('test').validate(table)
self.assertEqual(MaxLength('test').run(table), 6)
def test_max_length_invalid(self):
rows = [
[1],
[2],
[3]
]
table = Table(rows, ['test'], [Number()])
with self.assertRaises(DataTypeError):
MaxLength('test').validate(table)
| {
"content_hash": "3707ac84006e74ade474466cfe41deb5",
"timestamp": "",
"source": "github",
"line_count": 603,
"max_line_length": 100,
"avg_line_length": 31.22553897180763,
"alnum_prop": 0.5732646449625578,
"repo_name": "JoeGermuska/agate",
"id": "5b1e805ffb69dd7d85c2f23e1e3fad6edff9211b",
"size": "18852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_aggregations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "8380"
},
{
"name": "Python",
"bytes": "374977"
}
],
"symlink_target": ""
} |
"""Commands used for debugging purposes."""
import imp
import sys
import sublime
import sublime_plugin
class BtReloadModules(sublime_plugin.WindowCommand):
"""Reloads all BehaveToolkit modules."""
def run(self):
modules = [module for module in sys.modules.keys()
if module[:13] == 'BehaveToolkit']
for _ in range(2):
for name in modules:
print('[BehaveToolkit] Reloading submodule: ', name)
imp.reload(sys.modules[name])
sublime.sublime_api.plugin_host_ready()
def is_visible(self):
"""This command is only visible during debug mode"""
settings = sublime.load_settings('BehaveToolkit.sublime-settings')
return settings.get('debug')
| {
"content_hash": "edd67cae9d5f2179d494930abc14d307",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 74,
"avg_line_length": 28.25925925925926,
"alnum_prop": 0.6330275229357798,
"repo_name": "mixxorz/BehaveToolkit",
"id": "63ff834d1ea2e446a26d569c68ed0e83c00b9872",
"size": "763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "behave_toolkit/commands/debug.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cucumber",
"bytes": "721"
},
{
"name": "JavaScript",
"bytes": "3391"
},
{
"name": "Python",
"bytes": "26892"
}
],
"symlink_target": ""
} |
import configparser
from typing import (List)
from ..lib import (look_up)
runners = [
# ======================================================================= #
# Single threaded runners
# ======================================================================= #
{
'name': 'single',
'features': [],
'description': """
Run a workflow in a single thread. This is the absolute minimal
runner, consisting of a single queue for jobs and a worker running
jobs every time a result is pulled.""",
'command': 'noodles.run.single',
'arguments': {}
},
{
'name': 'single',
'features': ['display'],
'description': """
Adds a display to the single runner. Everything still runs in a
single thread. Every time a job is pulled by the worker, a message
goes to the display routine; when the job is finished the result is
sent to the display routine.""",
'command': 'noodles.run.single_with_display',
'arguments': {
'display': {
'default': 'noodles.display.NCDisplay',
'reader': 'look-up',
'help': 'the display routine'
}
}
},
# ======================================================================= #
# Multi-threaded runners
# ======================================================================= #
{
'name': 'parallel',
'features': [],
'command': 'noodles.run.parallel',
'arguments': {
'n_threads': {
'default': '1',
'reader': 'integer'
}
}
},
{
'name': 'parallel',
'features': ['display'],
'command': 'noodles.run.parallel_with_display',
'arguments': {
'n_threads': {
'default': '1',
'reader': 'integer'
},
'display': {
'default': 'noodles.display.NCDisplay',
'reader': 'look-up'
}
}
},
{
'name': 'parallel',
'features': ['prov', 'display'],
'description': """
Run a workflow in `n_threads` parallel threads. Now we replaced the
single worker with a thread-pool of workers.
This version works with the JobDB to cache results; however we only
store the jobs that are hinted with the 'store' keyword, unless
`cache_all` is set to `True`.""",
'command': 'noodles.run.run_with_prov.run_parallel_opt',
'arguments': {
'n_threads': {
'default': '1',
'reader': 'integer',
'help': 'the number of threads to run'
},
'registry': {
'default': 'noodles.serial.base',
'reader': 'look-up',
'help': 'the serialisation registry to use'
},
'display': {
'default': 'noodles.display.NCDisplay',
'reader': 'look-up',
'help': 'the display to use'
},
'database': {
'default': 'TinyDB',
'help': 'the database backend for the job cache'
},
'cache_file': {
'default': 'cache.json',
'help': 'the file used to store the job cache'
},
'cache_all': {
'default': 'False',
'reader': 'boolean',
'help': 'set this if you want to store all jobs in cache'
}
}
},
{
'name': 'xenon',
'features': ['prov', 'display'],
'command': 'noodles.run.xenon.run_xenon_prov',
'arguments': {
}
},
{
'name': 'process',
'features': ['msgpack'],
'command': 'noodles.run.process.run_process',
'arguments': {
}
}
]
def find_runner(name: str, features: List[str]) -> str:
name_candidates = filter(
lambda r: r['name'] == name,
runners)
feature_candidates = filter(
lambda r: all(f in r['features'] for f in features),
name_candidates)
return min(feature_candidates, lambda r: len(r['features']))
def run_with_config(config_file, workflow, machine=None):
config = configparser.ConfigParser(
interpolation=configparser.ExtendedInterpolation())
config.read(config_file)
machine = config.get('default', 'machine', fallback=machine)
if machine is None:
print("No machine given, running local in single thread.")
runner = find_runner(name='single', features=[])
settings = {}
else:
M = config['Machines']
runner_name = M.get(machine, 'runner')
features = map(str.strip, M.get(machine, 'features').split(','))
runner = find_runner(name=runner_name, features=features)
settings = dict(M[machine])
del settings['runner']
del settings['features']
if 'user' in settings:
settings['user'] = dict(config['Users'][settings['user']])
run = look_up(runner['command'])
return run(workflow, **settings)
| {
"content_hash": "127c6c1fbed6f0d3ea93ec45eda90e6a",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 79,
"avg_line_length": 31.634730538922156,
"alnum_prop": 0.4688623887942457,
"repo_name": "NLeSC/noodles",
"id": "d735c1b641264f55e86a1f8a7645ee750aa5052a",
"size": "5283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "noodles/run/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "77"
},
{
"name": "Jupyter Notebook",
"bytes": "261943"
},
{
"name": "Lua",
"bytes": "4645"
},
{
"name": "Python",
"bytes": "244648"
},
{
"name": "Shell",
"bytes": "4923"
}
],
"symlink_target": ""
} |
"""Tests the gdal_rasterize commandline application."""
import os
import subprocess
import tempfile
from osgeo import gdal
import gflags as flags
import unittest
from autotest2.gcore import gcore_util
from autotest2.gdrivers import gdrivers_util
FLAGS = flags.FLAGS
@gdrivers_util.SkipIfDriverMissing(gdrivers_util.GTIFF_DRIVER)
class GdalRasterizeTest(gdrivers_util.DriverTestCase):
def setUp(self):
self._ext = '.tif'
super(GdalRasterizeTest, self).setUp(gdrivers_util.GTIFF_DRIVER,
self._ext)
def testRasterizeShapefile(self):
inputpath = gcore_util.GetTestFilePath('poly.shp')
_, outputpath = tempfile.mkstemp(dir=FLAGS.test_tmpdir,
suffix=self._ext)
binary = os.path.join('TODO(schwehr): Where?',
'gdal_rasterize')
cmd = [binary,
'-burn', '1',
'-ot', 'gtiff',
'-tr', '1000', '1000',
inputpath, outputpath]
subprocess.check_call(cmd)
# Checks some information about the output.
self.CheckOpen(outputpath)
self.CheckGeoTransform(
(477815.53125, 1000.0, 0.0, 4766110.5, 0.0, -1000.0))
self.CheckBand(1, 3, gdal_type=gdal.GDT_Float64)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "a9baa0d46b4582f1b45d87689efc81bf",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 68,
"avg_line_length": 28.17391304347826,
"alnum_prop": 0.6311728395061729,
"repo_name": "schwehr/gdal-autotest2",
"id": "28290c4264cb376abbbb69f2716a728deaee54fb",
"size": "1893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/apps/gdal_rasterize_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "725676"
},
{
"name": "Python",
"bytes": "1073608"
}
],
"symlink_target": ""
} |
class Templates(object):
def __init__(self):
self._filepath = 'core/templates/'
self._switch_template = self.get_template('switch')
def get_template(self, template):
with open('%s%s.html' % (self._filepath, template)) as template_file:
return template_file.read().replace('\n', '')
@property
def switch(self):
"""
Builds a switch template from switch.html.
Returns:
template (str): string of switch template
"""
return self._switch_template
ffTemplates = Templates()
| {
"content_hash": "ccafd3065693d0f801ad30406865c667",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 73,
"avg_line_length": 24.952380952380953,
"alnum_prop": 0.6469465648854962,
"repo_name": "zpriddy/Firefly",
"id": "52e89a7791cce96d11edfbfa2740462a6184ed42",
"size": "688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Firefly/core/templates/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189198"
},
{
"name": "HTML",
"bytes": "58797"
},
{
"name": "JavaScript",
"bytes": "189763"
},
{
"name": "Python",
"bytes": "280837"
},
{
"name": "Shell",
"bytes": "15470"
}
],
"symlink_target": ""
} |
import sys
from argparse import ArgumentParser, Namespace
from contextlib import contextmanager
from typing import IO, Iterator
from idb.cli import ClientCommand
from idb.common.types import Client
class ScreenshotCommand(ClientCommand):
@property
def description(self) -> str:
return "Take a Screenshot of the Target"
@property
def name(self) -> str:
return "screenshot"
def add_parser_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"dest_path",
help="The destination file path to write to or - (dash) to write to stdout",
type=str,
)
super().add_parser_arguments(parser)
async def run_with_client(self, args: Namespace, client: Client) -> None:
screenshot = await client.screenshot()
with screenshot_file(args.dest_path) as f:
f.write(screenshot)
@contextmanager
def screenshot_file(path: str) -> Iterator[IO[bytes]]:
if path == "-":
yield sys.stdout.buffer
return
with open(path, "wb") as f:
yield f
| {
"content_hash": "52565a71921457a182a431865ea8f6e9",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 88,
"avg_line_length": 27.375,
"alnum_prop": 0.6493150684931507,
"repo_name": "facebook/FBSimulatorControl",
"id": "457337d88bb9610b74d046b9eaa6bc5b491cf875",
"size": "1296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "idb/cli/commands/screenshot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2096"
},
{
"name": "CSS",
"bytes": "1682"
},
{
"name": "Dockerfile",
"bytes": "148"
},
{
"name": "JavaScript",
"bytes": "5329"
},
{
"name": "Objective-C",
"bytes": "2849557"
},
{
"name": "Objective-C++",
"bytes": "51006"
},
{
"name": "Python",
"bytes": "302699"
},
{
"name": "Shell",
"bytes": "15491"
},
{
"name": "Swift",
"bytes": "206775"
}
],
"symlink_target": ""
} |
"""read presence from redis and write to push services, gcm, apple"""
from pymongo import MongoClient
import syslog
import requests
import redis
import json
import time
import datetime
from argparse import ArgumentParser
DIST = 1609 * 120;
GCM_SERVER_URL = 'https://android.googleapis.com/gcm/send'
HEADERS = {'Content-Type': 'application/json',
'Authorization': 'key=XXXXXXXXXX'}
INQNAME = "oemap_push_worker_in_queue"
class PushWorker():
def __init__ (self):
parser = ArgumentParser()
parser.add_argument('-n', '--job', dest='job', action='store',
help='worker instance id')
self.args = parser.parse_args()
self.rhost = "127.0.0.1"
self.rport = 6379
def push_gcm(self, rec, rid_list):
payload = {
"registration_ids": rid_list,
"delay_while_idle": False,
"data": rec,
"restricted_package_name": "com.onextent.oemap"
}
R = requests.post(GCM_SERVER_URL, data=json.dumps(payload), headers=HEADERS)
self.log_debug("%s %s" % (R.status_code, R.text))
def handle_gcm(self, rec):
lon = rec['location']['coordinates'][0];
lat = rec['location']['coordinates'][1];
rid_list = []
for p in self.db.presences.find(
{ 'space': rec['space'],
'location': {
'$near': {
'$geometry': {
'type': "Point",
'coordinates': [lon, lat]
},
'$maxDistance': DIST
},
},
'rtp': 1
}
).limit(50):
if p['rid'] == rec['rid']: # skip self
continue
else:
rid_list.append(p['rid'])
if rid_list:
self.push_gcm(rec, rid_list)
def handle_apple(self, rec):
pass
def handle(self, rec):
self.handle_gcm(rec)
self.handle_apple(rec)
def run (self):
while True:
try:
self.log_notice('%s Python impl starting queue %s' % ("test", INQNAME))
rdis = redis.Redis(host=self.rhost, port=self.rport)
client = MongoClient()
self.db = client.oemap_test
while True:
(_, msg) = rdis.brpop(keys=[INQNAME], timeout=600)
if msg == None:
continue
rec = json.loads(msg)
self.handle(rec)
except Exception:
self.handle_exception()
time.sleep(1)
except: # catch *all* exceptions
self.handle_exception()
time.sleep(1)
def log_debug (self, msg):
syslog.syslog(syslog.LOG_DEBUG, "%s %s" % (self.args.job, msg))
def log_notice (self, msg):
syslog.syslog(syslog.LOG_NOTICE, "%s %s" % (self.args.job, msg))
def log_error (self, msg):
syslog.syslog(syslog.LOG_ERR, "%s %s" % (self.args.job, msg))
def handle_exception(self):
import traceback
formatted_lines = traceback.format_exc().splitlines()
for line in formatted_lines:
self.log_error(line)
# get all presence recs for spacename
# if remote_id_type is gcm, gcm push
# if remote_id_type is apple, apple push
# else noop
if __name__ == "__main__":
PushWorker().run()
| {
"content_hash": "a95f0ed9216c4a29ccff17dc952e0e6e",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 87,
"avg_line_length": 28.576923076923077,
"alnum_prop": 0.47806191117092867,
"repo_name": "navicore/oemap",
"id": "6bb0715e0aa9bb232fc620e4b1fc7ed42b5eac32",
"size": "3734",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "modules/push_worker/PushWorker.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "241273"
},
{
"name": "JavaScript",
"bytes": "9939"
},
{
"name": "Python",
"bytes": "19649"
},
{
"name": "Shell",
"bytes": "2843"
}
],
"symlink_target": ""
} |
from interage.api.config import APISettings
from interage.api.exceptions import HttpNotFoundError
from interage.api.utils.models import json_to_instance_list
class APIResult(object):
def __init__(self, **args):
super(APIResult, self).__init__()
self.client = args.get('client')
self.model_class = args.get('model_class')
self._load_from_response(args.get('response'))
def _load_from_response(self, response):
self._count = response.get('count', 0)
self._results = response.get('results', [])
self._next = response.get('next', None)
self._previous = response.get('previous', None)
def _get_result_object(self, result):
return APIResult(
response = result,
client = self.client,
model_class = self.model_class,
)
def has_next(self):
return self._next is not None
def has_previous(self):
return self._previous is not None
def next(self):
if(self.has_next):
result = self.client.request(self.next_url)
return self._get_result_object(result)
raise HttpNotFoundError()
def previous(self):
if(self.has_previous):
result = self.client.request(self.previous_url)
return self._get_result_object(result)
raise HttpNotFoundError()
@property
def next_url(self):
return self._next
@property
def previous_url(self):
return self._previous
@property
def count(self):
return self._count
def json(self):
return self.results(as_json = True)
def objects(self):
return self.results(as_json = False)
def results(self, as_json):
if(as_json):
return self._results
return json_to_instance_list(self.model_class, self._results) | {
"content_hash": "181d0be3875af829fbf5951ce04f8511",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 69,
"avg_line_length": 28.074626865671643,
"alnum_prop": 0.6018075491759702,
"repo_name": "IntMed/interage_python_sdk",
"id": "564ad8e6d48841cece1d113fc60fd21feb17b7fb",
"size": "1881",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "interage/api/models/result.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21616"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import traitlets
from cesiumpy.base import _CesiumObject
import cesiumpy.entities.cartesian as cartesian
import cesiumpy.util.common as com
class Camera(_CesiumObject):
_props = ['destination', 'orientation']
destination = traitlets.Instance(klass=cartesian._Cartesian, allow_none=True)
def __init__(self, widget):
self.widget = widget
self.destination = None
self.orientation = None
def __repr__(self):
if self.destination is None:
rep = "{klass}(destination=default)"
return rep.format(klass=self.__class__.__name__)
else:
rep = "{klass}(destination={destination})"
return rep.format(klass=self.__class__.__name__,
destination=self.destination)
def flyTo(self, destination, orientation=None):
from cesiumpy.entities.entity import _CesiumEntity
import cesiumpy.extension.geocode as geocode
if isinstance(destination, _CesiumEntity):
# if entity has a position (not positions), use it
if destination.position is not None:
destination = destination.position
destination = geocode._maybe_geocode(destination, height=100000)
if com.is_listlike(destination) and len(destination) == 4:
destination = cartesian.Rectangle.maybe(destination)
else:
destination = cartesian.Cartesian3.maybe(destination, degrees=True)
self.destination = destination
self.orientation = com.notimplemented(orientation)
return self
| {
"content_hash": "7b9c408b355e3fe6eb054ba45109634e",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 81,
"avg_line_length": 33.9375,
"alnum_prop": 0.6488643339472069,
"repo_name": "sinhrks/cesiumpy",
"id": "27ddcf9e5690684dde940892016bf9b49df8d685",
"size": "1668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cesiumpy/camera.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "334917"
}
],
"symlink_target": ""
} |
import numpy as np
import theano
import logging
from numpy.testing import assert_allclose
from theano import tensor, function
from blocks import initialization
from blocks.bricks import Identity, Linear
from blocks.bricks.recurrent import SimpleRecurrent, LSTM
from blocks.initialization import Constant
from blocks_contrib.bricks.recurrent import DelayLine
from blocks_contrib.bricks.recurrent import Unfolder, UnfolderLSTM
logger = logging.getLogger(__name__)
def test_constant_input_lstm():
x = tensor.matrix('x')
proto = LSTM(activation=Identity(), dim=1,
weights_init=Constant(1/4.),
biases_init=Constant(0.))
proto.initialize()
flagger = Linear(input_dim=1, output_dim=1,
weights_init=Constant(1./2.),
biases_init=Constant(0.))
flagger.initialize()
inp2hid = Linear(input_dim=1, output_dim=4,
weights_init=Constant(1/4.),
biases_init=Constant(0))
inp2hid.initialize()
rnn = UnfolderLSTM(proto, flagger)
rnn.initialize()
h = inp2hid.apply(x)
y = rnn.apply(inputs=h, n_steps=10, batch_size=5)
F = function([x],y)
X = np.ones((5,1)).astype(theano.config.floatX)
H = function([x], flagger.apply(x))
T = H(X)
#print T
Y = F(X)
print Y
print Y[0].shape
assert Y[0].shape == (4,5,1)
#target = np.cumsum(np.ones((6,1,1)),axis=0)
#assert_allclose(Y, target)
def test_constant_input():
x = tensor.matrix('x')
proto = SimpleRecurrent(activation=Identity(), dim=1,
weights_init=initialization.Identity(1.))
proto.initialize()
flagger = Linear(input_dim=1, output_dim=1,
weights_init=Constant(1/10.),
biases_init=Constant(0.))
flagger.initialize()
rnn = Unfolder(proto, flagger)
rnn.initialize()
y = rnn.apply(inputs=x, n_steps=10, batch_size=1)
F = function([x],y)
X = np.ones((1,1)).astype(theano.config.floatX)
H = function([x], flagger.apply(x))
T = H(X)
print T
Y = F(X)
print Y
target = np.cumsum(np.ones((5,1,1)),axis=0)
assert_allclose(Y[0], target)
def test_delay_line():
x = tensor.tensor3('x')
input_dim = 1
batch_size = 1
memory_size = 3
time_len = 4
delay_line = DelayLine(input_dim, memory_size,
weights_init=Constant(1.))
delay_line.initialize()
y = delay_line.apply(x, iterate=True)
func = function([x], y)
x_val = np.zeros((time_len, batch_size, input_dim))
val = np.arange(4)
#val = np.tile(x_val[np.newaxis], batch_size).T
x_val[:,0,0] = val.astype(theano.config.floatX)
x_val = x_val.astype(theano.config.floatX)
y_val = func(x_val).astype(theano.config.floatX)
print y_val
assert y_val.shape == (4,1,3)
if __name__ == '__main__':
#test_delay_line()
test_constant_input()
test_constant_input_lstm()
| {
"content_hash": "4a3c9ace83ff3f726ee9b9aca81b6dfa",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 66,
"avg_line_length": 27.238532110091743,
"alnum_prop": 0.6116537554732233,
"repo_name": "EderSantana/blocks_contrib",
"id": "6623af11c50602dc12e7122c73748535b2946bee",
"size": "2969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_recurrent_contrib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75727"
},
{
"name": "Shell",
"bytes": "56"
}
],
"symlink_target": ""
} |
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
token = "your_auth_token"
client = Client(account, token)
message = client.chat \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.messages("IMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.update(body="MESSAGE")
print(message.body)
| {
"content_hash": "a425056ac65c935318f5324fb2263c8a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 65,
"avg_line_length": 34.57142857142857,
"alnum_prop": 0.6921487603305785,
"repo_name": "teoreteetik/api-snippets",
"id": "4e8dc86c72763e0007b9c510e6810ae900f18991",
"size": "557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ip-messaging/rest/messages/update-messages/update-messages.6.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "643369"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "943336"
},
{
"name": "JavaScript",
"bytes": "539577"
},
{
"name": "M",
"bytes": "117"
},
{
"name": "Mathematica",
"bytes": "93"
},
{
"name": "Objective-C",
"bytes": "46198"
},
{
"name": "PHP",
"bytes": "538312"
},
{
"name": "Python",
"bytes": "467248"
},
{
"name": "Ruby",
"bytes": "470316"
},
{
"name": "Shell",
"bytes": "1564"
},
{
"name": "Swift",
"bytes": "36563"
}
],
"symlink_target": ""
} |
import logging
from .taskqueue import app as celery_app
logger = logging.getLogger(__name__)
logger.info("Config package was imported")
| {
"content_hash": "08d7ba4c0ba5d2f2c796e0d3b3ebed75",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 42,
"avg_line_length": 23,
"alnum_prop": 0.7681159420289855,
"repo_name": "v-for-vincent/sight-reading",
"id": "10ce1b72323a98541554643c1702453b978772fb",
"size": "138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sight_reading/config/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1210"
},
{
"name": "JavaScript",
"bytes": "2385"
},
{
"name": "Python",
"bytes": "38334"
},
{
"name": "Shell",
"bytes": "5109"
}
],
"symlink_target": ""
} |
import csv
from datetime import datetime
import pygal
import requests
import signal
import sys
import time
def getDeltaFromRequest(req):
reqDelta = req.elapsed
microDelta = reqDelta.microseconds
milliDelta = microDelta / 1000
return milliDelta
# TODO: Allow automatically stopping after some time
# TODO: Allow using a premade CSV file, generating chart and exiting.
# TODO: Split into a separate file or library.
class RequestTracker:
url = None
interval = 30
working = False
dataPoints = 0
csvEnabled = False
csvFile = None
csvFilename = None
csvWriter = None
graphEnabled = False
graphFilename = None
graphData = []
graph = None
verbosityEnabled = False
def __init__(self, url, interval=30):
# TODO: Validate URL here or later
self.url = url
self.interval = interval
def enableVerbosity(self):
self.verbosityEnabled = True
self.verbose('DBG : Verbosity enabled')
def disableVerbosity(self):
self.verbosityEnabled = False
# TODO: Create .dbg, .info, .warn, .err functions based on this
def verbose(self, str):
if self.verbosityEnabled:
print(str)
def midrunError(self, str):
self.verbose('ERR : Cannot toggle ' + str + ' during a run.')
self.verbose('ERR : Stop the run, toggle ' + str + ' and start again.')
def enableGraphs(self):
if self.working:
self.midrunError('graphs')
return
if self.graphEnabled:
return
if self.graphFilename is None:
self.verbose('ERR : No graph filename set yet')
return
self.graphEnabled = True
self.verbose('DBG : Graph will be output at end of run')
def disableGraphs(self):
if self.working:
self.midrunError('graphs')
return
if not self.graphEnabled:
return
self.graphEnabled = False
self.verbose('DBG : Graphs will not be output at end of run')
def setGraphFilename(self, name):
self.graphFilename = name
self.verbose('DBG : Graph filename set to ' + name)
def enableCSV(self):
if self.working:
self.midrunError('CSV')
return
if self.csvEnabled:
return
if self.csvFilename is None:
self.verbose('ERR : No CSV filename set yet')
return
self.csvEnabled = True
self.verbose('DBG : CSV file will be output throughout run')
def disableCSV(self):
if self.working:
self.midrunError('CSV')
return
if not self.csvEnabled:
return
self.csvEnabled = False
self.verbose('DBG : CSV wil not be output throughout run')
def setCSVFilename(self, name):
self.csvFilename = name
self.verbose('DBG : Graph filename set to ' + name)
def setInterval(self, interval):
if self.working:
self.midrunError('interval')
return
if interval <= 0:
self.verbose('DBG : Cannot set an interval <= 0.')
return
self.interval = interval
def start(self):
if self.working:
return
self.working = True
self.verbose('INFO: Starting run on URL ' + self.url)
if self.graphEnabled:
newGraph = pygal.Line()
newGraph.title = 'Load delay over time for ' + self.url
newGraph.x_labels = []
self.graph = newGraph
self.graph.add('Delay', self.graphData)
self.verbose('DBG : Created graph')
if self.csvEnabled:
fields = ['timestamp', 'delay']
self.csvFile = open(self.csvFilename, 'w', newline='')
self.csvWriter = csv.DictWriter(self.csvFile, fieldnames=fields)
self.csvWriter.writeheader()
self.verbose('DBG : Created CSV file')
self.work()
def stop(self):
if not self.working:
return
self.working = False
self.closeCSV()
self.finishGraph()
self.verbose('INFO: Stopping run on URL ' + self.url)
def closeCSV(self):
if not self.csvEnabled:
return
self.csvFile.close()
self.csvFile = None
def dumpGraph(self):
if not self.graphEnabled:
return
self.graph.render_to_file(self.graphFilename)
def finishGraph(self):
self.dumpGraph()
self.graph = None
self.graphData = []
def processGraphData(self, timestamp, delay):
if not self.graphEnabled:
return
szDatetime = str(datetime.fromtimestamp(timestamp))
self.graph.x_labels.append(szDatetime)
self.graphData.append(delay)
def processCSVData(self, timestamp, delay):
if not self.csvEnabled:
return
formattedData = {'timestamp': timestamp, 'delay': delay}
self.csvWriter.writerow(formattedData)
self.csvFile.flush()
def processData(self, timestamp, delay):
self.processGraphData(timestamp, delay)
self.processCSVData(timestamp, delay)
def work(self):
self.verbose('INFO: Work started')
while self.working:
req = requests.get(self.url)
timestamp = time.time()
if req is None:
print('INFO: Server failed to respond. Timestamp: ' + time)
self.processCSVData(timestamp, 'Nonresponse')
self.processGraphData(timestamp, 0)
elif req.ok is not True:
print('INFO: Server failing with code ' + str(req.getcode()))
self.processCSVData(timestamp, 'HTTP ' + str(req.getcode()))
self.processGraphData(timestamp, 0)
else:
milliDelta = getDeltaFromRequest(req)
self.processData(timestamp, milliDelta)
szDelta = str(milliDelta)
szTime = str(timestamp)
self.verbose('DBG : Delay=' + szDelta + ' Time=' + szTime)
self.dataPoints += 1
# FIXME: remove later?
self.dumpGraph()
time.sleep(self.interval)
# TODO: argv
worker = None
def sigint(signal, frame):
if worker is not None:
worker.verbose('WARN: SIGINT, shutting down')
worker.stop()
points = str(worker.dataPoints)
print('Successfully stopped tracker with ' + points + ' data points')
sys.exit(0)
signal.signal(signal.SIGINT, sigint)
worker = RequestTracker('http://decisions.mit.edu', 60)
worker.enableVerbosity()
worker.setGraphFilename('graph.svg')
worker.enableGraphs()
worker.setCSVFilename('decisions.csv')
worker.enableCSV()
worker.start()
| {
"content_hash": "5b098b42886a2e19948de8b542356ff3",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 79,
"avg_line_length": 26.11832061068702,
"alnum_prop": 0.5903843343562765,
"repo_name": "nsgomez/RequestTracker",
"id": "4959c8d4597d229df31ed39e4686641e442e38eb",
"size": "6862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "request_tracker.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6862"
}
],
"symlink_target": ""
} |
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._premium_messaging_regions_operations import build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PremiumMessagingRegionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.servicebus.v2017_04_01.aio.ServiceBusManagementClient`'s
:attr:`premium_messaging_regions` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable[_models.PremiumMessagingRegionsListResult]:
"""Gets the available premium messaging regions for servicebus.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PremiumMessagingRegionsListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.servicebus.v2017_04_01.models.PremiumMessagingRegionsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2017-04-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.PremiumMessagingRegionsListResult]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PremiumMessagingRegionsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.ServiceBus/premiumMessagingRegions"} # type: ignore
| {
"content_hash": "abfc6ae5b1c18947fb2b672137c4f546",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 133,
"avg_line_length": 44.1965811965812,
"alnum_prop": 0.6372075033842584,
"repo_name": "Azure/azure-sdk-for-python",
"id": "a9cf8690163e598bf3e8986361a7174b6f7ed418",
"size": "5671",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/servicebus/azure-mgmt-servicebus/azure/mgmt/servicebus/v2017_04_01/aio/operations/_premium_messaging_regions_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from djangoappengine.settings_base import *
import os
# Activate django-dbindexer for the default database
DATABASES['native'] = DATABASES['default']
DATABASES['default'] = {'ENGINE': 'dbindexer', 'TARGET': 'native'}
AUTOLOAD_SITECONF = 'indexes'
SECRET_KEY = '=r-$b*8hglm+858&9t043hlm6-&6-3d3vfc4((7yd0dbrakhvi'
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'djangotoolbox',
'autoload',
'dbindexer',
'blogengine',
# djangoappengine should come last, so it can override a few manage.py commands
'djangoappengine',
)
MIDDLEWARE_CLASSES = (
# This loads the index definitions, so it has to come first
'autoload.middleware.AutoloadMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.core.context_processors.media',
)
# This test runner captures stdout and associates tracebacks with their
# corresponding output. Helps a lot with print-debugging.
TEST_RUNNER = 'djangotoolbox.test.CapturingTestSuiteRunner'
ADMIN_MEDIA_PREFIX = '/media/admin/'
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates'),)
ROOT_URLCONF = 'urls'
SITE_ROOT = '/'
SITE_TITLE = 'Django non-rel blog engine on Google App Engine'
MEDIA_URL = '/static/'
| {
"content_hash": "620055fe433372c2f660e6dd28c1377b",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 83,
"avg_line_length": 28.75925925925926,
"alnum_prop": 0.7263361236316807,
"repo_name": "rickhurst/Django-non-rel-blog",
"id": "bf6ff134e85df4cfad1e8ea13fe82c2cc18f8abc",
"size": "1743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "92014"
},
{
"name": "Python",
"bytes": "4213739"
}
],
"symlink_target": ""
} |
from configparser import RawConfigParser
def createsavefile(username, userclass, chapter):
config = RawConfigParser()
config.add_section('Main')
config.set('Main', 'Class', userclass)
config.set('Main', 'Username', username )
config.set('Main', 'Current_Chaper', chapter)
config.set('Character', 'Health', 100)
with open('userdata.cfg', 'wb') as configfile:
config.write(configfile)
def getchapter():
config = RawConfigParser()
config.read('userdata.cfg')
chapter = config.get('Main', 'Current_Chapter')
return chapter
def getusername():
config = RawConfigParser()
config.read('userdata.cfg')
username = config.get('Main', 'Username')
return username
def getclass():
config = RawConfigParser()
config.read('userdata.cfg')
user_class = config.get('Main', 'Class')
return user_class
def updatehitpoints(hitpoints):
config = RawConfigParser()
config.read('userdata.cfg')
config.set('Character', 'Health', hitpoints) | {
"content_hash": "ea4dad0d12e631dd7e44e9d3b5b1d056",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 51,
"avg_line_length": 34.6,
"alnum_prop": 0.661849710982659,
"repo_name": "KaptainKaleb/Adventure-Adventure-Adventure",
"id": "68e918c7ede0fe2d43d6cb435431c7cbb1c99b02",
"size": "1038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "save.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "7216"
},
{
"name": "Python",
"bytes": "7059"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals, absolute_import
'''Cursor control and color for the .NET console.
'''
#
# Ironpython requires a patch to work do:
#
# In file PythonCommandLine.cs patch line:
# class PythonCommandLine
# {
# to:
# public class PythonCommandLine
# {
#
#
#
# primitive debug printing that won't interfere with the screen
import clr,sys
clr.AddReferenceToFileAndPath(sys.executable)
import IronPythonConsole
import sys
import re
import os
import System
from .event import Event
from pyreadline.logger import log
from pyreadline.keysyms import \
make_keysym, make_keyinfo, make_KeyPress, make_KeyPress_from_keydescr
from pyreadline.console.ansi import AnsiState
color = System.ConsoleColor
ansicolor={"0;30": color.Black,
"0;31": color.DarkRed,
"0;32": color.DarkGreen,
"0;33": color.DarkYellow,
"0;34": color.DarkBlue,
"0;35": color.DarkMagenta,
"0;36": color.DarkCyan,
"0;37": color.DarkGray,
"1;30": color.Gray,
"1;31": color.Red,
"1;32": color.Green,
"1;33": color.Yellow,
"1;34": color.Blue,
"1;35": color.Magenta,
"1;36": color.Cyan,
"1;37": color.White
}
winattr = {"black" : 0, "darkgray" : 0+8,
"darkred" : 4, "red" : 4+8,
"darkgreen" : 2, "green" : 2+8,
"darkyellow" : 6, "yellow" : 6+8,
"darkblue" : 1, "blue" : 1+8,
"darkmagenta" : 5, "magenta" : 5+8,
"darkcyan" : 3, "cyan" : 3+8,
"gray" : 7, "white" : 7+8}
class Console(object):
'''Console driver for Windows.
'''
def __init__(self, newbuffer=0):
'''Initialize the Console object.
newbuffer=1 will allocate a new buffer so the old content will be restored
on exit.
'''
self.serial = 0
self.attr = System.Console.ForegroundColor
self.saveattr = winattr[str(System.Console.ForegroundColor).lower()]
self.savebg = System.Console.BackgroundColor
log('initial attr=%s' % self.attr)
def _get(self):
top = System.Console.WindowTop
log("WindowTop:%s"%top)
return top
def _set(self, value):
top = System.Console.WindowTop
log("Set WindowTop:old:%s,new:%s"%(top, value))
WindowTop = property(_get, _set)
del _get, _set
def __del__(self):
'''Cleanup the console when finished.'''
# I don't think this ever gets called
pass
def pos(self, x=None, y=None):
'''Move or query the window cursor.'''
if x is not None:
System.Console.CursorLeft=x
else:
x = System.Console.CursorLeft
if y is not None:
System.Console.CursorTop=y
else:
y = System.Console.CursorTop
return x, y
def home(self):
'''Move to home.'''
self.pos(0, 0)
# Map ANSI color escape sequences into Windows Console Attributes
terminal_escape = re.compile('(\001?\033\\[[0-9;]*m\002?)')
escape_parts = re.compile('\001?\033\\[([0-9;]*)m\002?')
# This pattern should match all characters that change the cursor position differently
# than a normal character.
motion_char_re = re.compile('([\n\r\t\010\007])')
def write_scrolling(self, text, attr=None):
'''write text at current cursor position while watching for scrolling.
If the window scrolls because you are at the bottom of the screen
buffer, all positions that you are storing will be shifted by the
scroll amount. For example, I remember the cursor position of the
prompt so that I can redraw the line but if the window scrolls,
the remembered position is off.
This variant of write tries to keep track of the cursor position
so that it will know when the screen buffer is scrolled. It
returns the number of lines that the buffer scrolled.
'''
x, y = self.pos()
w, h = self.size()
scroll = 0 # the result
# split the string into ordinary characters and funny characters
chunks = self.motion_char_re.split(text)
for chunk in chunks:
n = self.write_color(chunk, attr)
if len(chunk) == 1: # the funny characters will be alone
if chunk[0] == '\n': # newline
x = 0
y += 1
elif chunk[0] == '\r': # carriage return
x = 0
elif chunk[0] == '\t': # tab
x = 8 * (int(x / 8) + 1)
if x > w: # newline
x -= w
y += 1
elif chunk[0] == '\007': # bell
pass
elif chunk[0] == '\010':
x -= 1
if x < 0:
y -= 1 # backed up 1 line
else: # ordinary character
x += 1
if x == w: # wrap
x = 0
y += 1
if y == h: # scroll
scroll += 1
y = h - 1
else: # chunk of ordinary characters
x += n
l = int(x / w) # lines we advanced
x = x % w # new x value
y += l
if y >= h: # scroll
scroll += y - h + 1
y = h - 1
return scroll
trtable = {0 : color.Black, 4 : color.DarkRed, 2 : color.DarkGreen,
6 : color.DarkYellow, 1 : color.DarkBlue, 5 : color.DarkMagenta,
3 : color.DarkCyan, 7 : color.Gray, 8 : color.DarkGray,
4+8 : color.Red, 2+8 : color.Green, 6+8 : color.Yellow,
1+8 : color.Blue, 5+8 : color.Magenta,3+8 : color.Cyan,
7+8 : color.White}
def write_color(self, text, attr=None):
'''write text at current cursor position and interpret color escapes.
return the number of characters written.
'''
log('write_color("%s", %s)' % (text, attr))
chunks = self.terminal_escape.split(text)
log('chunks=%s' % repr(chunks))
bg = self.savebg
n = 0 # count the characters we actually write, omitting the escapes
if attr is None:#use attribute from initial console
attr = self.attr
try:
fg = self.trtable[(0x000f&attr)]
bg = self.trtable[(0x00f0&attr)>>4]
except TypeError:
fg = attr
for chunk in chunks:
m = self.escape_parts.match(chunk)
if m:
log(m.group(1))
attr = ansicolor.get(m.group(1), self.attr)
n += len(chunk)
System.Console.ForegroundColor = fg
System.Console.BackgroundColor = bg
System.Console.Write(chunk)
return n
def write_plain(self, text, attr=None):
'''write text at current cursor position.'''
log('write("%s", %s)' %(text, attr))
if attr is None:
attr = self.attr
n = c_int(0)
self.SetConsoleTextAttribute(self.hout, attr)
self.WriteConsoleA(self.hout, text, len(text), byref(n), None)
return len(text)
if "EMACS" in os.environ:
def write_color(self, text, attr=None):
junk = c_int(0)
self.WriteFile(self.hout, text, len(text), byref(junk), None)
return len(text)
write_plain = write_color
# make this class look like a file object
def write(self, text):
log('write("%s")' % text)
return self.write_color(text)
#write = write_scrolling
def isatty(self):
return True
def flush(self):
pass
def page(self, attr=None, fill=' '):
'''Fill the entire screen.'''
System.Console.Clear()
def text(self, x, y, text, attr=None):
'''Write text at the given position.'''
self.pos(x, y)
self.write_color(text, attr)
def clear_to_end_of_window(self):
oldtop = self.WindowTop
lastline = self.WindowTop+System.Console.WindowHeight
pos = self.pos()
w, h = self.size()
length = w - pos[0] + min((lastline - pos[1] - 1), 5) * w - 1
self.write_color(length * " ")
self.pos(*pos)
self.WindowTop = oldtop
def rectangle(self, rect, attr=None, fill=' '):
'''Fill Rectangle.'''
oldtop = self.WindowTop
oldpos = self.pos()
#raise NotImplementedError
x0, y0, x1, y1 = rect
if attr is None:
attr = self.attr
if fill:
rowfill = fill[:1] * abs(x1 - x0)
else:
rowfill = ' ' * abs(x1 - x0)
for y in range(y0, y1):
System.Console.SetCursorPosition(x0, y)
self.write_color(rowfill, attr)
self.pos(*oldpos)
def scroll(self, rect, dx, dy, attr=None, fill=' '):
'''Scroll a rectangle.'''
raise NotImplementedError
def scroll_window(self, lines):
'''Scroll the window by the indicated number of lines.'''
top = self.WindowTop + lines
if top < 0:
top = 0
if top + System.Console.WindowHeight > System.Console.BufferHeight:
top = System.Console.BufferHeight
self.WindowTop = top
def getkeypress(self):
'''Return next key press event from the queue, ignoring others.'''
ck = System.ConsoleKey
while 1:
e = System.Console.ReadKey(True)
if e.Key == System.ConsoleKey.PageDown: #PageDown
self.scroll_window(12)
elif e.Key == System.ConsoleKey.PageUp:#PageUp
self.scroll_window(-12)
elif str(e.KeyChar) == "\000":#Drop deadkeys
log("Deadkey: %s"%e)
return event(self, e)
else:
return event(self, e)
def title(self, txt=None):
'''Set/get title.'''
if txt:
System.Console.Title = txt
else:
return System.Console.Title
def size(self, width=None, height=None):
'''Set/get window size.'''
sc = System.Console
if width is not None and height is not None:
sc.BufferWidth, sc.BufferHeight = width,height
else:
return sc.BufferWidth, sc.BufferHeight
if width is not None and height is not None:
sc.WindowWidth, sc.WindowHeight = width,height
else:
return sc.WindowWidth - 1, sc.WindowHeight - 1
def cursor(self, visible=True, size=None):
'''Set cursor on or off.'''
System.Console.CursorVisible = visible
def bell(self):
System.Console.Beep()
def next_serial(self):
'''Get next event serial number.'''
self.serial += 1
return self.serial
class event(Event):
'''Represent events from the console.'''
def __init__(self, console, input):
'''Initialize an event from the Windows input structure.'''
self.type = '??'
self.serial = console.next_serial()
self.width = 0
self.height = 0
self.x = 0
self.y = 0
self.char = str(input.KeyChar)
self.keycode = input.Key
self.state = input.Modifiers
log("%s,%s,%s"%(input.Modifiers, input.Key, input.KeyChar))
self.type = "KeyRelease"
self.keysym = make_keysym(self.keycode)
self.keyinfo = make_KeyPress(self.char, self.state, self.keycode)
def make_event_from_keydescr(keydescr):
def input():
return 1
input.KeyChar = "a"
input.Key = System.ConsoleKey.A
input.Modifiers = System.ConsoleModifiers.Shift
input.next_serial = input
e = event(input,input)
del input.next_serial
keyinfo = make_KeyPress_from_keydescr(keydescr)
e.keyinfo = keyinfo
return e
CTRL_C_EVENT=make_event_from_keydescr("Control-c")
def install_readline(hook):
def hook_wrap():
try:
res = hook()
except KeyboardInterrupt as x: #this exception does not seem to be caught
res = ""
except EOFError:
return None
if res[-1:] == "\n":
return res[:-1]
else:
return res
class IronPythonWrapper(IronPythonConsole.IConsole):
def ReadLine(self, autoIndentSize):
return hook_wrap()
def Write(self, text, style):
System.Console.Write(text)
def WriteLine(self, text, style):
System.Console.WriteLine(text)
IronPythonConsole.PythonCommandLine.MyConsole = IronPythonWrapper()
if __name__ == '__main__':
import time, sys
c = Console(0)
sys.stdout = c
sys.stderr = c
c.page()
c.pos(5, 10)
c.write('hi there')
c.title("Testing console")
# c.bell()
print()
print("size", c.size())
print(' some printed output')
for i in range(10):
e = c.getkeypress()
print(e.Key, chr(e.KeyChar), ord(e.KeyChar), e.Modifiers)
del c
System.Console.Clear()
| {
"content_hash": "8eff2be2519122862b8a77d8fe959b05",
"timestamp": "",
"source": "github",
"line_count": 418,
"max_line_length": 90,
"avg_line_length": 32.844497607655505,
"alnum_prop": 0.5245829994901304,
"repo_name": "tanium/pytan",
"id": "aa7f706fe05f2fda06394dea77df13444c2bc297",
"size": "14177",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "winlib/pyreadline/console/ironpython_console.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13251"
},
{
"name": "CSS",
"bytes": "32442"
},
{
"name": "HTML",
"bytes": "1232764"
},
{
"name": "JavaScript",
"bytes": "375167"
},
{
"name": "Makefile",
"bytes": "4287"
},
{
"name": "Python",
"bytes": "2541262"
},
{
"name": "Shell",
"bytes": "3194"
}
],
"symlink_target": ""
} |
import pickle
import doc2vec
with open('data/simple/models/graph_final.pickle', 'rb') as handle:
gf = pickle.load(handle)
# count category vectors from articles
doc2vec.doc2vec(gf)
| {
"content_hash": "7e255f4a11aa2b7a349dbffe84608dde",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 67,
"avg_line_length": 23.375,
"alnum_prop": 0.7540106951871658,
"repo_name": "Humblehound/WikiSpatialTree",
"id": "13d0b8ee39431ca6527ba79d9a19ec4ab889eb8e",
"size": "187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runDoc2Vec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50230"
}
],
"symlink_target": ""
} |
import vb2py.extensions as extensions
# TODO: This class can be removed
class TranslateAttributes(extensions.SystemPluginREPlugin):
"""Plugin to convert attribute names from VB to Pythoncard
There are attribute like 'Text' and 'Visible' which are in lower
case in Pythoncard and others are simply different. We do the conversion
here.
Note that this means we will convert these names even if they don't belong
to controls - this is unfortunate but still safe as we do the conversion
consistently.
"""
name = "PlugInAttributeNames"
__enabled = 0 # If false the plugin will not be called
post_process_patterns = (
# (r"\.Text\b", ".text"),
# (r"\.Caption\b", ".text"),
# (r"\.Visible\b", ".visible"),
# (r"\.Enabled\b", ".enabled"),
# (r"\.BackColor\b", ".backgroundColor"),
# (r"\.ToolTipText\b", ".ToolTipText"),
# (r"\.AddItem\b", ".append"),
)
| {
"content_hash": "6145d5d1a5cb49cfbc0ff0913816b0ba",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 78,
"avg_line_length": 34.964285714285715,
"alnum_prop": 0.6179775280898876,
"repo_name": "mvz/vb2py",
"id": "cc62b2c1325c5a3920fd2d8f2411ef5048f495ee",
"size": "979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vb2py/plugins/attributenames.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "38050"
},
{
"name": "Python",
"bytes": "630966"
},
{
"name": "Shell",
"bytes": "40"
},
{
"name": "Visual Basic",
"bytes": "100566"
}
],
"symlink_target": ""
} |
from lxml import etree
from tempest.common import rest_client
from tempest.common import xml_utils as common
from tempest import config
CONF = config.CONF
class BaseExtensionsClientXML(rest_client.RestClient):
TYPE = "xml"
def __init__(self, auth_provider):
super(BaseExtensionsClientXML, self).__init__(auth_provider)
self.service = CONF.volume.catalog_type
def _parse_array(self, node):
array = []
for child in node:
array.append(common.xml_to_json(child))
return array
def list_extensions(self):
url = 'extensions'
resp, body = self.get(url)
body = self._parse_array(etree.fromstring(body))
return resp, body
class ExtensionsClientXML(BaseExtensionsClientXML):
"""
Volume V1 extensions client.
"""
| {
"content_hash": "31b2c43839d45709c434e1fe06372585",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 68,
"avg_line_length": 24.90909090909091,
"alnum_prop": 0.6593673965936739,
"repo_name": "Mirantis/tempest",
"id": "fe8b7cb1865b39b7ac3cb1039f247d66ff64d1de",
"size": "1458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/services/volume/xml/extensions_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3297127"
},
{
"name": "Shell",
"bytes": "8663"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20170414_1807'),
]
operations = [
migrations.AlterField(
model_name='manager',
name='reports_to',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='users.Manager'),
),
]
| {
"content_hash": "9501b51f204294ae67c0051bb6b73e91",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 124,
"avg_line_length": 25.68421052631579,
"alnum_prop": 0.6372950819672131,
"repo_name": "raghuraju/Simple-Project-Management",
"id": "ae76a9c52ed003155448f0db8ed7db13cf753d1f",
"size": "559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/users/migrations/0004_auto_20170414_1809.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35187"
}
],
"symlink_target": ""
} |
"""
Bootstrap the app-template. This module disables itself after execution.
(This is meant to be run BEFORE changing $NEW_PROJECT_SLUG.
Once $NEW_PROJECT_SLUG is changed, bootstrap will no longer run.
See __init__.py for details.)
"""
import app_config
import json
import os
import subprocess
import uuid
import webbrowser
from . import utils
from distutils.spawn import find_executable
from fabric.api import execute, local, prompt, task
from oauth import get_credentials
from time import sleep
SPREADSHEET_COPY_URL_TEMPLATE = 'https://www.googleapis.com/drive/v2/files/%s/copy'
SPREADSHEET_VIEW_TEMPLATE = 'https://docs.google.com/spreadsheet/ccc?key=%s#gid=1'
@task(default=True)
def go(github_username=app_config.GITHUB_USERNAME, repository_name=None):
"""
Execute the bootstrap tasks for a new project.
"""
check_credentials()
config_files = ' '.join(['PROJECT_README.md', 'app_config.py', 'crontab'])
config = {}
config['$NEW_PROJECT_SLUG'] = os.getcwd().split('/')[-1]
config['$NEW_REPOSITORY_NAME'] = repository_name or config['$NEW_PROJECT_SLUG']
config['$NEW_PROJECT_FILENAME'] = config['$NEW_PROJECT_SLUG'].replace('-', '_')
utils.confirm("Have you created a Github repository named \"%s\"?" % config['$NEW_REPOSITORY_NAME'])
# Create the spreadsheet
title = '%s COPY' % config['$NEW_PROJECT_SLUG']
new_spreadsheet_key = create_spreadsheet(title)
if new_spreadsheet_key:
config[app_config.COPY_GOOGLE_DOC_KEY] = new_spreadsheet_key
else:
print('No spreadsheet created, you will need to update COPY_GOOGLE_DOC_KEY manually.')
for k, v in config.items():
local('sed -i "" \'s|%s|%s|g\' %s' % (k, v, config_files))
local('rm -rf .git')
local('git init')
local('mv PROJECT_README.md README.md')
local('rm -f *.pyc')
local('rm -f LICENSE')
local('git add .')
# local('git add -f www/assets/assetsignore')
local('git commit -am "Initial import from app-template."')
local('git remote add origin git@github.com:%s/%s.git' % (github_username, config['$NEW_REPOSITORY_NAME']))
local('git push -u origin main')
# Update app data
execute('update')
if new_spreadsheet_key:
print('You can view your COPY spreadsheet at:')
print(SPREADSHEET_VIEW_TEMPLATE % new_spreadsheet_key)
def check_credentials():
"""
Check credentials and spawn server and browser if not
"""
credentials = get_credentials()
if not credentials or 'https://www.googleapis.com/auth/drive' not in credentials.config['google']['scope']:
try:
with open(os.devnull, 'w') as fnull:
print('Credentials were not found or permissions were not correct. Automatically opening a browser to authenticate with Google.')
gunicorn = find_executable('gunicorn')
process = subprocess.Popen([gunicorn, '-b', '127.0.0.1:8888', 'app:wsgi_app'], stdout=fnull, stderr=fnull)
webbrowser.open_new('http://127.0.0.1:8888/oauth')
print('Waiting...')
while not credentials:
try:
credentials = get_credentials()
sleep(1)
except ValueError:
continue
print('Successfully authenticated!')
process.terminate()
except KeyboardInterrupt:
print('\nCtrl-c pressed. Later, skater!')
exit()
def create_spreadsheet(title):
"""
Copy the COPY spreadsheet
"""
kwargs = {
'credentials': get_credentials(),
'url': SPREADSHEET_COPY_URL_TEMPLATE % app_config.COPY_GOOGLE_DOC_KEY,
'method': 'POST',
'headers': {'Content-Type': 'application/json'},
'body': json.dumps({
'title': title,
}),
}
resp = app_config.authomatic.access(**kwargs)
if resp.status == 200:
spreadsheet_key = resp.data['id']
print('New spreadsheet created with key %s' % spreadsheet_key)
return spreadsheet_key
else:
print('Error creating spreadsheet (status code %s) with message %s' % (resp.status, resp.reason))
return None
| {
"content_hash": "9feb679bbdd1a3404d36c1f407d63d21",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 145,
"avg_line_length": 36.51724137931034,
"alnum_prop": 0.6251180358829084,
"repo_name": "PostDispatchInteractive/app-template",
"id": "f32ac083efc0f97f3b913afd638a583517b6dd98",
"size": "4260",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "fabfile/bootstrap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "238"
},
{
"name": "HTML",
"bytes": "24530"
},
{
"name": "JavaScript",
"bytes": "196068"
},
{
"name": "Less",
"bytes": "36224"
},
{
"name": "Python",
"bytes": "77763"
},
{
"name": "Shell",
"bytes": "327"
}
],
"symlink_target": ""
} |
from flask import blueprints, request, session, url_for, render_template
from werkzeug.utils import redirect
from models.users.user import User
import models.users.errors as UserErrors
import models.users.decorators as user_decorators
user_blueprint = blueprints.Blueprint('users', __name__)
@user_blueprint.route('/login', methods=['GET', 'POST'])
def login_user():
"""Login a user"""
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
try:
if User.is_login_valid(email, password):
session['email'] = email
return redirect(url_for(".user_alerts"))
except UserErrors.UserError as e:
return e.message
return render_template("users/login.html")
@user_blueprint.route('/register', methods=['GET', 'POST'])
def register_user():
"""Register a user"""
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
try:
if User.register_user(email, password):
session['email'] = email
return redirect(url_for(".user_alerts"))
except UserErrors.UserError as e:
return e.message
return render_template("users/register.html")
@user_blueprint.route('/alerts')
@user_decorators.requires_login
def user_alerts():
user = User.find_by_email(session['email'])
alerts = user.get_alerts()
return render_template('users/alerts.html', alerts=alerts)
@user_blueprint.route('/logout')
def logout_user():
session['email'] = None
return redirect(url_for('home'))
@user_blueprint.route('/check_alerts/<string:user_id>')
def check_user_alerts(user_id):
pass | {
"content_hash": "366ca049db34a9ba062f4a7ec7a9582d",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 72,
"avg_line_length": 25.934426229508198,
"alnum_prop": 0.7098609355246523,
"repo_name": "asimonia/pricing-alerts",
"id": "0ccd78e889ef061cd64815c236385cb0d81d4320",
"size": "1582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/models/users/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "201"
},
{
"name": "HTML",
"bytes": "12208"
},
{
"name": "Python",
"bytes": "17700"
}
],
"symlink_target": ""
} |
"""
Unit test for ClassSelector parameters.
"""
from numbers import Number
import param
from . import API1TestCase
class TestClassSelectorParameters(API1TestCase):
def setUp(self):
super(TestClassSelectorParameters, self).setUp()
class P(param.Parameterized):
e = param.ClassSelector(default=1,class_=int)
f = param.ClassSelector(default=int,class_=Number, is_instance=False)
g = param.ClassSelector(default=1,class_=(int,str))
h = param.ClassSelector(default=int,class_=(int,str), is_instance=False)
self.P = P
def test_single_class_instance_constructor(self):
p = self.P(e=6)
self.assertEqual(p.e, 6)
def test_single_class_instance_error(self):
exception = "Parameter 'e' value must be an instance of int, not 'a'"
with self.assertRaisesRegexp(ValueError, exception):
self.P(e='a')
def test_single_class_type_constructor(self):
p = self.P(f=float)
self.assertEqual(p.f, float)
def test_single_class_type_error(self):
exception = "Parameter 'str' must be a subclass of Number, not 'type'"
with self.assertRaisesRegexp(ValueError, exception):
self.P(f=str)
def test_multiple_class_instance_constructor1(self):
p = self.P(g=1)
self.assertEqual(p.g, 1)
def test_multiple_class_instance_constructor2(self):
p = self.P(g='A')
self.assertEqual(p.g, 'A')
def test_multiple_class_instance_error(self):
exception = "Parameter 'g' value must be an instance of \(int, str\), not '3.0'"
with self.assertRaisesRegexp(ValueError, exception):
self.P(g=3.0)
def test_multiple_class_type_constructor1(self):
p = self.P(h=int)
self.assertEqual(p.h, int)
def test_multiple_class_type_constructor2(self):
p = self.P(h=str)
self.assertEqual(p.h, str)
def test_multiple_class_type_error(self):
exception = "Parameter 'float' must be a subclass of \(int, str\), not 'type'"
with self.assertRaisesRegexp(ValueError, exception):
self.P(h=float)
class TestDictParameters(API1TestCase):
def test_valid_dict_parameter(self):
valid_dict = {1:2, 3:3}
class Test(param.Parameterized):
items = param.Dict(default=valid_dict)
def test_valid_dict_parameter_positional(self):
valid_dict = {1:2, 3:3}
class Test(param.Parameterized):
items = param.Dict(valid_dict)
def test_dict_invalid_set(self):
valid_dict = {1:2, 3:3}
class Test(param.Parameterized):
items = param.Dict(valid_dict)
test = Test()
exception = "Parameter 'items' value must be an instance of dict, not '3'"
with self.assertRaisesRegexp(ValueError, exception):
test.items = 3
| {
"content_hash": "8a2d12063be01a57575217f53b6984db",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 88,
"avg_line_length": 31.747252747252748,
"alnum_prop": 0.6285912080304603,
"repo_name": "ioam/param",
"id": "e8aaf7d6d2db0fcc9f17b7769ff8e2fe2a9765e7",
"size": "2889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/API1/testclassselector.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "387425"
}
],
"symlink_target": ""
} |
import uuid
import boto3
import pytest
from botocore.exceptions import ClientError
from moto import mock_sagemaker
from moto.core import DEFAULT_ACCOUNT_ID as ACCOUNT_ID
TEST_REGION_NAME = "us-east-1"
@mock_sagemaker
def test_create__trial_component():
client = boto3.client("sagemaker", region_name=TEST_REGION_NAME)
trial_component_name = "some-trial-component-name"
resp = client.create_trial_component(TrialComponentName=trial_component_name)
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200
resp = client.list_trial_components()
assert len(resp["TrialComponentSummaries"]) == 1
assert (
resp["TrialComponentSummaries"][0]["TrialComponentName"] == trial_component_name
)
assert (
resp["TrialComponentSummaries"][0]["TrialComponentArn"]
== f"arn:aws:sagemaker:{TEST_REGION_NAME}:{ACCOUNT_ID}:experiment-trial-component/{trial_component_name}"
)
@mock_sagemaker
def test_list_trial_components():
client = boto3.client("sagemaker", region_name=TEST_REGION_NAME)
trial_component_names = [f"some-trial-component-name-{i}" for i in range(10)]
for trial_component_name in trial_component_names:
resp = client.create_trial_component(TrialComponentName=trial_component_name)
resp = client.list_trial_components(MaxResults=1)
assert len(resp["TrialComponentSummaries"]) == 1
next_token = resp["NextToken"]
resp = client.list_trial_components(MaxResults=2, NextToken=next_token)
assert len(resp["TrialComponentSummaries"]) == 2
next_token = resp["NextToken"]
resp = client.list_trial_components(NextToken=next_token)
assert len(resp["TrialComponentSummaries"]) == 7
assert resp.get("NextToken") is None
@mock_sagemaker
def test_delete__trial_component():
client = boto3.client("sagemaker", region_name=TEST_REGION_NAME)
trial_component_name = "some-trial-component-name"
resp = client.create_trial_component(TrialComponentName=trial_component_name)
resp = client.delete_trial_component(TrialComponentName=trial_component_name)
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200
resp = client.list_trial_components()
assert len(resp["TrialComponentSummaries"]) == 0
@mock_sagemaker
def test_add_tags_to_trial_component():
client = boto3.client("sagemaker", region_name=TEST_REGION_NAME)
trial_component_name = "some-trial-component-name"
resp = client.create_trial_component(TrialComponentName=trial_component_name)
resp = client.describe_trial_component(TrialComponentName=trial_component_name)
arn = resp["TrialComponentArn"]
tags = [{"Key": "name", "Value": "value"}]
client.add_tags(ResourceArn=arn, Tags=tags)
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200
resp = client.list_tags(ResourceArn=arn)
assert resp["Tags"] == tags
@mock_sagemaker
def test_delete_tags_to_trial_component():
client = boto3.client("sagemaker", region_name=TEST_REGION_NAME)
trial_component_name = "some-trial-component-name"
resp = client.create_trial_component(TrialComponentName=trial_component_name)
resp = client.describe_trial_component(TrialComponentName=trial_component_name)
arn = resp["TrialComponentArn"]
tags = [{"Key": "name", "Value": "value"}]
client.add_tags(ResourceArn=arn, Tags=tags)
client.delete_tags(ResourceArn=arn, TagKeys=[i["Key"] for i in tags])
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200
resp = client.list_tags(ResourceArn=arn)
assert resp["Tags"] == []
@mock_sagemaker
def test_list_trial_component_tags():
client = boto3.client("sagemaker", region_name=TEST_REGION_NAME)
trial_component_name = "some-trial-component-name"
client.create_trial_component(TrialComponentName=trial_component_name)
resp = client.describe_trial_component(TrialComponentName=trial_component_name)
resource_arn = resp["TrialComponentArn"]
tags = []
for _ in range(80):
tags.append({"Key": str(uuid.uuid4()), "Value": "myValue"})
response = client.add_tags(ResourceArn=resource_arn, Tags=tags)
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
response = client.list_tags(ResourceArn=resource_arn)
assert len(response["Tags"]) == 50
assert response["Tags"] == tags[:50]
response = client.list_tags(
ResourceArn=resource_arn, NextToken=response["NextToken"]
)
assert len(response["Tags"]) == 30
assert response["Tags"] == tags[50:]
@mock_sagemaker
def test_associate_trial_component():
client = boto3.client("sagemaker", region_name=TEST_REGION_NAME)
experiment_name = "some-experiment-name"
resp = client.create_experiment(ExperimentName=experiment_name)
trial_name = "some-trial-name"
resp = client.create_trial(ExperimentName=experiment_name, TrialName=trial_name)
trial_component_name = "some-trial-component-name"
resp = client.create_trial_component(TrialComponentName=trial_component_name)
resp = client.associate_trial_component(
TrialComponentName=trial_component_name, TrialName=trial_name
)
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200
assert (
resp["TrialComponentArn"]
== f"arn:aws:sagemaker:{TEST_REGION_NAME}:{ACCOUNT_ID}:experiment-trial-component/{trial_component_name}"
)
assert (
resp["TrialArn"]
== f"arn:aws:sagemaker:{TEST_REGION_NAME}:{ACCOUNT_ID}:experiment-trial/{trial_name}"
)
resp = client.list_trial_components(TrialName=trial_name)
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200
assert (
resp["TrialComponentSummaries"][0]["TrialComponentName"] == trial_component_name
)
resp = client.list_trials(TrialComponentName=trial_component_name)
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200
assert resp["TrialSummaries"][0]["TrialName"] == trial_name
with pytest.raises(ClientError) as ex:
resp = client.associate_trial_component(
TrialComponentName="does-not-exist", TrialName="does-not-exist"
)
ex.value.response["Error"]["Code"].should.equal("ResourceNotFound")
ex.value.response["Error"]["Message"].should.equal(
f"Trial 'arn:aws:sagemaker:{TEST_REGION_NAME}:{ACCOUNT_ID}:experiment-trial/does-not-exist' does not exist."
)
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
@mock_sagemaker
def test_disassociate_trial_component():
client = boto3.client("sagemaker", region_name=TEST_REGION_NAME)
experiment_name = "some-experiment-name"
resp = client.create_experiment(ExperimentName=experiment_name)
trial_name = "some-trial-name"
resp = client.create_trial(ExperimentName=experiment_name, TrialName=trial_name)
trial_component_name = "some-trial-component-name"
resp = client.create_trial_component(TrialComponentName=trial_component_name)
client.associate_trial_component(
TrialComponentName=trial_component_name, TrialName=trial_name
)
resp = client.disassociate_trial_component(
TrialComponentName=trial_component_name, TrialName=trial_name
)
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200
assert (
resp["TrialComponentArn"]
== f"arn:aws:sagemaker:{TEST_REGION_NAME}:{ACCOUNT_ID}:experiment-trial-component/{trial_component_name}"
)
assert (
resp["TrialArn"]
== f"arn:aws:sagemaker:{TEST_REGION_NAME}:{ACCOUNT_ID}:experiment-trial/{trial_name}"
)
resp = client.list_trial_components(TrialName=trial_name)
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200
assert len(resp["TrialComponentSummaries"]) == 0
resp = client.list_trials(TrialComponentName=trial_component_name)
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200
assert len(resp["TrialSummaries"]) == 0
resp = client.disassociate_trial_component(
TrialComponentName="does-not-exist", TrialName="does-not-exist"
)
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200
assert (
resp["TrialComponentArn"]
== f"arn:aws:sagemaker:{TEST_REGION_NAME}:{ACCOUNT_ID}:experiment-trial-component/does-not-exist"
)
assert (
resp["TrialArn"]
== f"arn:aws:sagemaker:{TEST_REGION_NAME}:{ACCOUNT_ID}:experiment-trial/does-not-exist"
)
| {
"content_hash": "108ac662141dada5911fd030aeef9c96",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 116,
"avg_line_length": 31.754716981132077,
"alnum_prop": 0.7011289364230541,
"repo_name": "spulec/moto",
"id": "0affddc3cb7c1c2315f6a4616c3ae97112d7b73b",
"size": "8415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_sagemaker/test_sagemaker_trial_component.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "255"
},
{
"name": "HTML",
"bytes": "5983"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "1424"
},
{
"name": "Jinja",
"bytes": "2502"
},
{
"name": "Makefile",
"bytes": "2284"
},
{
"name": "Python",
"bytes": "14737868"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "5515"
}
],
"symlink_target": ""
} |
import unittest
from vodem.api import dns_mode
class TestDnsMode(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.valid_response = {
'dns_mode': 'auto',
}
def test_call(self):
resp = dns_mode()
self.assertEqual(self.valid_response, resp)
| {
"content_hash": "0e164edca903c67751a4869c006bf012",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 51,
"avg_line_length": 19.25,
"alnum_prop": 0.6071428571428571,
"repo_name": "alzeih/python-vodem-vodafone-K4607-Z",
"id": "1fff65ffa07acb9d3f1247c34c5f18b08d4ce529",
"size": "308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/api/test_dns_mode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "19346"
},
{
"name": "JavaScript",
"bytes": "444689"
},
{
"name": "Python",
"bytes": "84811"
},
{
"name": "Shell",
"bytes": "46"
}
],
"symlink_target": ""
} |
"""The tests for the wake on lan switch platform."""
import unittest
from unittest.mock import patch
from homeassistant.setup import setup_component
from homeassistant.const import STATE_ON, STATE_OFF
import homeassistant.components.switch as switch
from tests.common import get_test_home_assistant
TEST_STATE = None
def send_magic_packet(*macs, **kwargs):
"""Fake call for sending magic packets."""
return
def call(cmd, stdout, stderr):
"""Return fake subprocess return codes."""
if cmd[5] == 'validhostname' and TEST_STATE:
return 0
return 2
def system():
"""Fake system call to test the windows platform."""
return 'Windows'
class TestWOLSwitch(unittest.TestCase):
"""Test the wol switch."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
@patch('wakeonlan.wol.send_magic_packet', new=send_magic_packet)
@patch('subprocess.call', new=call)
def test_valid_hostname(self):
"""Test with valid hostname."""
global TEST_STATE
TEST_STATE = False
self.assertTrue(setup_component(self.hass, switch.DOMAIN, {
'switch': {
'platform': 'wake_on_lan',
'mac_address': '00-01-02-03-04-05',
'host': 'validhostname',
}
}))
state = self.hass.states.get('switch.wake_on_lan')
self.assertEqual(STATE_OFF, state.state)
TEST_STATE = True
switch.turn_on(self.hass, 'switch.wake_on_lan')
self.hass.block_till_done()
state = self.hass.states.get('switch.wake_on_lan')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.wake_on_lan')
self.hass.block_till_done()
state = self.hass.states.get('switch.wake_on_lan')
self.assertEqual(STATE_ON, state.state)
@patch('wakeonlan.wol.send_magic_packet', new=send_magic_packet)
@patch('subprocess.call', new=call)
@patch('platform.system', new=system)
def test_valid_hostname_windows(self):
"""Test with valid hostname on windows."""
global TEST_STATE
TEST_STATE = False
self.assertTrue(setup_component(self.hass, switch.DOMAIN, {
'switch': {
'platform': 'wake_on_lan',
'mac_address': '00-01-02-03-04-05',
'host': 'validhostname',
}
}))
state = self.hass.states.get('switch.wake_on_lan')
self.assertEqual(STATE_OFF, state.state)
TEST_STATE = True
switch.turn_on(self.hass, 'switch.wake_on_lan')
self.hass.block_till_done()
state = self.hass.states.get('switch.wake_on_lan')
self.assertEqual(STATE_ON, state.state)
@patch('wakeonlan.wol.send_magic_packet', new=send_magic_packet)
def test_minimal_config(self):
"""Test with minimal config."""
self.assertTrue(setup_component(self.hass, switch.DOMAIN, {
'switch': {
'platform': 'wake_on_lan',
'mac_address': '00-01-02-03-04-05',
}
}))
@patch('wakeonlan.wol.send_magic_packet', new=send_magic_packet)
@patch('subprocess.call', new=call)
def test_broadcast_config(self):
"""Test with broadcast address config."""
self.assertTrue(setup_component(self.hass, switch.DOMAIN, {
'switch': {
'platform': 'wake_on_lan',
'mac_address': '00-01-02-03-04-05',
'broadcast_address': '255.255.255.255',
}
}))
state = self.hass.states.get('switch.wake_on_lan')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.wake_on_lan')
self.hass.block_till_done()
@patch('wakeonlan.wol.send_magic_packet', new=send_magic_packet)
@patch('subprocess.call', new=call)
def test_off_script(self):
"""Test with turn off script."""
global TEST_STATE
TEST_STATE = False
self.assertTrue(setup_component(self.hass, switch.DOMAIN, {
'switch': {
'platform': 'wake_on_lan',
'mac_address': '00-01-02-03-04-05',
'host': 'validhostname',
'turn_off': {
'service': 'shell_command.turn_off_TARGET',
},
}
}))
state = self.hass.states.get('switch.wake_on_lan')
self.assertEqual(STATE_OFF, state.state)
TEST_STATE = True
switch.turn_on(self.hass, 'switch.wake_on_lan')
self.hass.block_till_done()
state = self.hass.states.get('switch.wake_on_lan')
self.assertEqual(STATE_ON, state.state)
TEST_STATE = False
switch.turn_off(self.hass, 'switch.wake_on_lan')
self.hass.block_till_done()
state = self.hass.states.get('switch.wake_on_lan')
self.assertEqual(STATE_OFF, state.state)
@patch('wakeonlan.wol.send_magic_packet', new=send_magic_packet)
@patch('subprocess.call', new=call)
@patch('platform.system', new=system)
def test_invalid_hostname_windows(self):
"""Test with invalid hostname on windows."""
global TEST_STATE
TEST_STATE = False
self.assertTrue(setup_component(self.hass, switch.DOMAIN, {
'switch': {
'platform': 'wake_on_lan',
'mac_address': '00-01-02-03-04-05',
'host': 'invalidhostname',
}
}))
state = self.hass.states.get('switch.wake_on_lan')
self.assertEqual(STATE_OFF, state.state)
TEST_STATE = True
switch.turn_on(self.hass, 'switch.wake_on_lan')
self.hass.block_till_done()
state = self.hass.states.get('switch.wake_on_lan')
self.assertEqual(STATE_OFF, state.state)
| {
"content_hash": "7593349acdd43629f19a8ed31c020167",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 68,
"avg_line_length": 32.851063829787236,
"alnum_prop": 0.5667098445595855,
"repo_name": "stefan-jonasson/home-assistant",
"id": "063cf93d87118ca7bc59faba2623b9de3782c0fa",
"size": "6176",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/switch/test_wake_on_lan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4056"
},
{
"name": "Python",
"bytes": "8360711"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "12658"
}
],
"symlink_target": ""
} |
"""booksearch URL Configuration.
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include("search.urls"))
]
| {
"content_hash": "3270399a5dc7af41bc6ba672e4d1a578",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 79,
"avg_line_length": 37.04545454545455,
"alnum_prop": 0.694478527607362,
"repo_name": "gitgik/booksearch",
"id": "2f29cd66d8d9fad83f4b9a4ecf51c5a2ab42b426",
"size": "815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "booksearch/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1736"
},
{
"name": "Python",
"bytes": "13985"
}
],
"symlink_target": ""
} |
import abc
import copy
import numpy as np
from menpo.base import Vectorizable
from menpo.transform.base import (Alignment, ComposableTransform,
VComposable, VInvertible)
class HomogFamilyAlignment(Alignment):
r"""
Simple subclass of Alignment that adds the ability to create a copy of an
alignment class without the alignment behavior.
"""
@abc.abstractmethod
def copy_without_alignment(self):
pass
class Homogeneous(ComposableTransform, Vectorizable, VComposable, VInvertible):
r"""
A simple n-dimensional homogeneous transformation.
Adds a unit homogeneous coordinate to points, performs the dot
product, re-normalizes by division by the homogeneous coordinate,
and returns the result.
Can be composed with another Homogeneous, so long as the dimensionality
matches.
Parameters
----------
h_matrix : (n_dims + 1, n_dims + 1) ndarray
The homogeneous matrix to be applied.
"""
def __init__(self, h_matrix):
self._h_matrix = h_matrix.copy()
@classmethod
def identity(cls, n_dims):
return Homogeneous(np.eye(n_dims + 1))
@property
def h_matrix(self):
return self._h_matrix
def set_h_matrix(self, value):
# TODO add verification logic for homogeneous here
self._h_matrix = value.copy()
@property
def n_dims(self):
return self.h_matrix.shape[0] - 1
@property
def n_dims_output(self):
# doesn't have to be a square homogeneous matrix...
return self.h_matrix.shape[1] - 1
def _apply(self, x, **kwargs):
# convert to homogeneous
h_x = np.hstack([x, np.ones([x.shape[0], 1])])
# apply the transform
h_y = h_x.dot(self.h_matrix.T)
# normalize and return
return (h_y / h_y[:, -1][:, None])[:, :-1]
def as_vector(self):
return self.h_matrix.flatten()
def from_vector_inplace(self, vector):
self.set_h_matrix(vector.reshape(self.h_matrix.shape))
@property
def composes_inplace_with(self):
r"""
Homogeneous can swallow composition with any other Homogeneous,
subclasses will have to override and be more specific.
"""
return Homogeneous
def compose_after_from_vector_inplace(self, vector):
self.compose_after_inplace(self.from_vector(vector))
@property
def composes_with(self):
r"""
Any Homogeneous can compose with any other Homogeneous.
"""
return Homogeneous
# noinspection PyProtectedMember
def _compose_before(self, t):
r"""
Chains an Homogeneous family transform with another transform of the
same family, producing a new transform that is the composition of
the two.
.. note::
The type of the returned transform is always the first common
ancestor between self and transform.
Any Alignment will be lost.
Parameters
----------
transform : :class:`Homogeneous`
Transform to be applied **after** self
Returns
--------
transform : :class:`Homogeneous`
The resulting homogeneous transform.
"""
# note that this overload of the basic _compose_before is just to
# deal with the complexities of maintaining the correct class of
# transform upon composition
from .affine import Affine
from .similarity import Similarity
if isinstance(t, type(self)):
# He is a subclass of me - I can swallow him.
# What if I'm an Alignment though? Rules of composition state we
# have to produce a non-Alignment result. Nasty, but we check
# here to save a lot of repetition.
if isinstance(self, HomogFamilyAlignment):
new_self = self.copy_without_alignment()
else:
new_self = copy.deepcopy(self)
new_self._compose_before_inplace(t)
elif isinstance(self, type(t)):
# I am a subclass of him - he can swallow me
new_self = t._compose_after(self)
elif isinstance(self, Similarity) and isinstance(t, Similarity):
# we're both in the Similarity family
new_self = Similarity(self.h_matrix)
new_self._compose_before_inplace(t)
elif isinstance(self, Affine) and isinstance(t, Affine):
# we're both in the Affine family
new_self = Affine(self.h_matrix)
new_self._compose_before_inplace(t)
else:
# at least one of us is Homogeneous
new_self = Homogeneous(self.h_matrix)
new_self._compose_before_inplace(t)
return new_self
# noinspection PyProtectedMember
def _compose_after(self, t):
r"""
Chains an Homogeneous family transform with another transform of the
same family, producing a new transform that is the composition of
the two.
.. note::
The type of the returned transform is always the first common
ancestor between self and transform.
Any Alignment will be lost.
Parameters
----------
transform : :class:`Homogeneous`
Transform to be applied **before** self
Returns
--------
transform : :class:`Homogeneous`
The resulting homogeneous transform.
"""
# note that this overload of the basic _compose_after is just to
# deal with the complexities of maintaining the correct class of
# transform upon composition
from .affine import Affine
from .similarity import Similarity
if isinstance(t, type(self)):
# He is a subclass of me - I can swallow him.
# What if I'm an Alignment though? Rules of composition state we
# have to produce a non-Alignment result. Nasty, but we check
# here to save a lot of repetition.
if isinstance(self, HomogFamilyAlignment):
new_self = self.copy_without_alignment()
else:
new_self = copy.deepcopy(self)
new_self._compose_after_inplace(t)
elif isinstance(self, type(t)):
# I am a subclass of him - he can swallow me
new_self = t._compose_before(self)
elif isinstance(self, Similarity) and isinstance(t, Similarity):
# we're both in the Similarity family
new_self = Similarity(self.h_matrix)
new_self._compose_after_inplace(t)
elif isinstance(self, Affine) and isinstance(t, Affine):
# we're both in the Affine family
new_self = Affine(self.h_matrix)
new_self._compose_after_inplace(t)
else:
# at least one of us is Homogeneous
new_self = Homogeneous(self.h_matrix)
new_self._compose_after_inplace(t)
return new_self
def _compose_before_inplace(self, transform):
# Force the Homogeneous variant. compose machinery will guarantee
# this is only invoked in the right circumstances (e.g. the types
# will match so we don't need to block the setting of the matrix)
Homogeneous.set_h_matrix(self, np.dot(transform.h_matrix,
self.h_matrix))
def _compose_after_inplace(self, transform):
# Force the Homogeneous variant. compose machinery will guarantee
# this is only invoked in the right circumstances (e.g. the types
# will match so we don't need to block the setting of the matrix)
Homogeneous.set_h_matrix(self, np.dot(self.h_matrix,
transform.h_matrix))
def has_true_inverse(self):
return True
def _build_pseudoinverse(self):
return Homogeneous(np.linalg.inv(self.h_matrix))
| {
"content_hash": "6ba38585826592194dab40d185f8b317",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 79,
"avg_line_length": 35.651785714285715,
"alnum_prop": 0.6076884547958928,
"repo_name": "jabooth/menpo-archive",
"id": "bc59ae7547783a0406bb6a37467d8b6f3a8e18d5",
"size": "7986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "menpo/transform/homogeneous/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "101730"
},
{
"name": "C++",
"bytes": "169304"
},
{
"name": "Python",
"bytes": "818217"
},
{
"name": "Shell",
"bytes": "776"
}
],
"symlink_target": ""
} |
from flask import session, make_response, request, redirect, jsonify, render_template, current_app
from flask.ext.login import logout_user
from flask.views import View
from onelogin.saml2.auth import OneLogin_Saml2_Auth
from urlparse import urlparse
####
# Extension Manager
####
class SamlManager(object):
def __init__(self, app=None, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
self.login_callback = None
if app:
self.init_app(app)
def init_app(self, app):
app.saml_manager = self #expose for login_from_acs
app.add_url_rule('/saml/login', view_func=SamlLogin.as_view('login'), endpoint='login')
app.add_url_rule('/saml/logout', view_func=SamlLogout.as_view('logout'))
app.add_url_rule('/saml/acs', view_func=SamlACS.as_view('acs'))
app.add_url_rule('/saml/sls', view_func=SamlSLS.as_view('sls'))
def login_from_acs(self, callback):
self.login_callback = callback
return callback
####
# Views
####
class SamlLogin(View):
methods = ['GET']
def dispatch_request(self):
saml = SamlRequest(request)
return redirect(saml.sso())
class SamlLogout(View):
methods = ['GET']
def dispatch_request(self):
saml = SamlRequest(request)
return redirect(saml.slo())
class SamlACS(View):
methods = ['POST']
def dispatch_request(self):
saml = SamlRequest(request)
return saml.acs()
class SamlSLS(View):
methods = ['GET', 'POST']
def dispatch_request(self):
saml = SamlRequest(request)
sls_response = saml.sls()
logout_user()
if sls_response.get('success_slo'):
if current_app.config.get('SAML_LOGOUT_PATH'):
return redirect(current_app.config.get('SAML_LOGOUT_PATH'))
else:
return render_template('saml_logout_successful.html')
else:
return jsonify(sls_response)
####
# SAML logic
####
class SamlRequest(object):
def __init__(self, request_data):
self.request = self.prepare_flask_request(request_data)
settings_path = current_app.config.get('SAML_SETTINGS_PATH')
self.auth = OneLogin_Saml2_Auth(self.request, custom_base_path=settings_path)
self.errors = []
self.not_auth_warn = False
self.success_slo = False
self.attributes = False
self.logged_in = False
def serialize(self):
return dict(
errors=self.errors,
not_auth_warn=self.not_auth_warn,
success_slo=self.success_slo,
attributes=self.attributes,
logged_in=self.logged_in
)
def prepare_flask_request(self, request_data):
url_data = urlparse(request_data.url)
return {
'http_host': request_data.host,
'server_port': url_data.port,
'script_name': request_data.path,
'get_data': request_data.args.copy(),
'post_data': request_data.form.copy()
}
def sso(self):
return self.auth.login()
def slo(self):
name_id = None
session_index = None
if 'samlNameId' in session:
name_id = session['samlNameId']
if 'samlSessionIndex' in session:
session_index = session['samlSessionIndex']
return self.auth.logout(name_id=name_id, session_index=session_index)
def acs(self):
self.auth.process_response()
self.errors = self.auth.get_errors()
self.not_auth_warn = not self.auth.is_authenticated()
if len(self.errors) == 0:
session['samlUserdata'] = self.auth.get_attributes()
session['samlNameId'] = self.auth.get_nameid()
session['samlSessionIndex'] = self.auth.get_session_index()
if 'samlUserdata' in session:
self.logged_in = True
if len(session['samlUserdata']) > 0:
self.attributes = session['samlUserdata'].items()
attrs = self.serialize()
return current_app.saml_manager.login_callback(attrs)
def sls(self):
dscb = lambda: session.clear()
url = self.auth.process_slo(delete_session_cb=dscb)
self.errors = self.auth.get_errors()
if len(self.errors) == 0:
if url is not None:
return url
else:
self.success_slo = True
return self.serialize()
def generate_metadata(self):
settings = self.auth.get_settings()
metadata = settings.get_sp_metadata()
errors = settings.validate_metadata(metadata)
if len(errors) == 0:
resp = make_response(metadata, 200)
resp.headers['Content-Type'] = 'text/xml'
else:
resp = make_response(errors.join(', '), 500)
return resp
| {
"content_hash": "3b79701c28f549a6d41884e327ba2d55",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 98,
"avg_line_length": 30.980891719745223,
"alnum_prop": 0.5933388157894737,
"repo_name": "cbron/python-saml-flask",
"id": "615dbc379d6eda0482779008b51a0d83f1379693",
"size": "4864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saml.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4864"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import patterns, include, url
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^nss/', include('nss_admin.urls')),
url(r'^samba_admin/', include('samba_admin.urls')),
url(r'^dhcp_admin/', include('dhcp_admin.urls')),
# url(r'^schedule/', include('school_sched.urls')),
# url(r'^agenda/', include('schoolagenda.urls')),
)
handler404 = 'django.views.defaults.page_not_found'
handler500 = 'django.views.defaults.server_error'
# enable serving static files on debugging server
if settings.SERVE_STATICS:
urlpatterns += patterns('', (
r'^%s(?P<path>.*)$' % settings.MEDIA_URL[1:],
'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}
), )
urlpatterns += staticfiles_urlpatterns()
| {
"content_hash": "8d1c94cc3672bb44ed8ced414e040635",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 71,
"avg_line_length": 33.6,
"alnum_prop": 0.6828231292517006,
"repo_name": "vencax/py-s21-admin",
"id": "78941d18cdab6a3e3d49271fa9d33b18c6afc038",
"size": "1176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7717"
}
],
"symlink_target": ""
} |
from kaira.loaders.abstract_loader import AbstractLoader
class LosotoLoader(AbstractLoader):
def __init__(self,data_container=None,**kwargs):
super(LosotoLoader,self).__init__(data_container=data_container,**kwargs)
def init_default_data_container(self):
'''Return the default empty data container for this loader'''
pass
| {
"content_hash": "f077d3e4a49e097c4c26cd3f39d603d2",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 81,
"avg_line_length": 32.54545454545455,
"alnum_prop": 0.7094972067039106,
"repo_name": "Joshuaalbert/KAIRApy",
"id": "d44734529bf1eb5119b32247d8e3d0b9e513e346",
"size": "358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/kaira/loaders/losoto.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "81109"
}
],
"symlink_target": ""
} |
import pygame
class GameInstructions:
"""
GameOver class to emulate the game state 'game paused'
state variables:
score:integer
menuButton: pygame.Rect object
retryButton: pygame.Rect object
exitButton: pygame.Rect object
"""
def __init__(self) :
"""
constructor for GamePause.py
Transition: initializes a pause screen
input:none
output:none
"""
self.upMessage = '[W] OR [UP] to move up'
self.downMessage = '[S] OR [DOWN] to move down'
self.rightMessage = '[A] OR [RIGHT] to move right'
self.leftMessage = '[D] OR [RIGHT] to move left'
self.featureMessage = 'The red tip of the snake means you'
self.featureMessage2 = 'can eat yourself once without losing.'
self.continueMessage = 'Press Enter to Continue'
def updateState(self) :
"""
function to update the score at the time of game pause
Transition: changes the current score at game pause time
input:integer value for score
output:none
"""
x = 1
def getCurrentState(self) :
"""
function to return the current game state
Transition: returns the objects to display on the main screen
input:none
output:an array of objects
"""
return [self.upMessage,self.downMessage,self.rightMessage,self.leftMessage,self.featureMessage,self.featureMessage2,self.continueMessage]
| {
"content_hash": "c276f367b949ea778656e8488c867283",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 145,
"avg_line_length": 33.63265306122449,
"alnum_prop": 0.5679611650485437,
"repo_name": "GuerreroGames/Snake-Game",
"id": "03ab1606e802053564b895ebb57bf1855590131c",
"size": "1648",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Re-Implemented Game/GameInstructions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32286"
}
],
"symlink_target": ""
} |
from copy import deepcopy
from glob import iglob as GLOBiglob
import json
from os.path import join as OSpath__join
# ENSO_metrics functions
from EnsoPlots.EnsoMetricPlot import main_plotter
from EnsoPlots.EnsoPlotToolsLib import remove_metrics
# ---------------------------------------------------#
# Arguments
# ---------------------------------------------------#
metric_collection = "ENSO_tel"
project = "CMIP5" # "obs2obs" #
model = "CNRM-CM5" # "ERA-Interim_SODA3.4.2" # "ERA-Interim_ERA-Interim" # "ERA-Interim" #
experiment = "historical"
member = "r1i1p1"
dataname = deepcopy(model) if project == "obs2obs" else model + "_" + member
plot_ref = True if project == "obs2obs" else False
# True to use the set of metric in the BAMS paper
# More metric have been computed and tested but not kept
reduced_set = True # False #
# computation version, 'v20200427' for models and 'v20201231' for obs are provided with the package
version_mod = "v20200427"
version_obs = "v20201231"
# json files
dict_json = {
"CMIP5": {
"ENSO_perf": "share/EnsoMetrics/cmip5_historical_ENSO_perf_" + version_mod + "_allModels_allRuns.json",
"ENSO_proc": "share/EnsoMetrics/cmip5_historical_ENSO_proc_" + version_mod + "_allModels_allRuns.json",
"ENSO_tel": "share/EnsoMetrics/cmip5_historical_ENSO_tel_" + version_mod + "_allModels_allRuns.json"},
"CMIP6": {
"ENSO_perf": "share/EnsoMetrics/cmip6_historical_ENSO_perf_" + version_mod + "_allModels_allRuns.json",
"ENSO_proc": "share/EnsoMetrics/cmip6_historical_ENSO_proc_" + version_mod + "_allModels_allRuns.json",
"ENSO_tel": "share/EnsoMetrics/cmip6_historical_ENSO_tel_" + version_mod + "_allModels_allRuns.json"},
"obs2obs": {
"ENSO_perf": "share/EnsoMetrics/obs2obs_historical_ENSO_perf_" + version_obs + "_allObservations.json",
"ENSO_proc": "share/EnsoMetrics/obs2obs_historical_ENSO_proc_" + version_obs + "_allObservations.json",
"ENSO_tel": "share/EnsoMetrics/obs2obs_historical_ENSO_tel_" + version_obs + "_allObservations.json"}}
path_main = "/Users/yannplanton/Documents/Yann/Fac/2016_2018_postdoc_LOCEAN/2018_06_ENSO_metrics/2020_05_report"
path_nc = OSpath__join(path_main, "Data/" + project.lower() + "/" + experiment + "/" + metric_collection)
# figure name
path_out = ""
dataname2 = dataname.replace("GPCPv2.3", "GPCPv23").replace("SODA3.4.2", "SODA342")
figure_name = project.lower() + "_" + experiment + "_" + metric_collection + "_" + dataname2
# ---------------------------------------------------#
# ---------------------------------------------------#
# Main
# ---------------------------------------------------#
# read json file
with open(dict_json[project][metric_collection]) as ff:
data_json = json.load(ff)['RESULTS']['model'][model][member]
ff.close()
del ff
# get metric names
list_metrics = sorted(list(data_json["value"].keys()), key=lambda v: v.upper())
if reduced_set is True:
metrics = remove_metrics(list_metrics, metric_collection, reduced_set=reduced_set)
# pattern of netCDF files
pattern = project.lower() + "_" + experiment + "_" + metric_collection + "_" + version_mod + "_" + dataname
#
# Loop on metrics
#
for met in ["EnsoPrMapDjfRmse"]:#list_metrics:
print(met)
# get NetCDF file name
met2 = met.replace("Rmse", "") if metric_collection in ["ENSO_tel"] and "Map" in met else deepcopy(met)
filename_nc = pattern + "_" + met2 + ".nc"
filename_nc = list(GLOBiglob(OSpath__join(path_nc, filename_nc)))[0]
# get diagnostic values for the given model and observations
dict_dia = data_json["value"][met]["diagnostic"]
diagnostic_values = dict((key1, dict_dia[key1]["value"]) for key1 in list(dict_dia.keys()))
diagnostic_units = data_json["metadata"]["metrics"][met]["diagnostic"]["units"]
# get metric values computed with the given model and observations
if metric_collection in ["ENSO_tel"] and "Map" in met:
list1, list2 = [met.replace("Rmse", "Corr"), met], ["metric", "metric"]
dict_met = data_json["value"]
metric_values = dict((key1, {model: [1-dict_met[su][ty][key1]["value"] if "Corr" in su else
dict_met[su][ty][key1]["value"]for su, ty in zip(list1, list2)]})
for key1 in list(dict_met[list1[0]]["metric"].keys()))
metric_units = [data_json["metadata"]["metrics"][su]["metric"]["units"] for su in list1]
del list1, list2
else:
dict_met = data_json["value"][met]["metric"]
metric_values = dict((key1, {model: dict_met[key1]["value"]}) for key1 in list(dict_met.keys()))
metric_units = data_json["metadata"]["metrics"][met]["metric"]["units"]
# figure name
name_png = figure_name + "_" + met
# this function needs:
# - the name of the metric collection: metric_collection
# - the name of the metric: metric
# - the name of the model: model
# - name of the experiment: experiment
# - name of the netCDF file name and path: filename_nc
# - a dictionary containing the diagnostic values: diagnostic_values (e.g., {"ERA-Interim": 1, "Tropflux": 1.1,
# model: 1.5})
# - the diagnostic units: diagnostic_units
# - a dictionary containing the metric values: metric_values (e.g., {"ERA-Interim": {model: 1.5},
# "Tropflux": {model: 1.36}})
# - the metric units: metric_units
# - (optional) the member name, not needed if project aims to compare observational datasets (obs2obs): member
# - (optional) the path where to save the plots: path_png
# - (optional) the name of the plots: name_png
# - (optional) if the project aims to compare observational datasets (obs2obs): plot_ref
main_plotter(metric_collection, met2, model, experiment, filename_nc, diagnostic_values, diagnostic_units,
metric_values, metric_units, member=member, path_png=path_out, name_png=figure_name, plot_ref=plot_ref)
del diagnostic_values, diagnostic_units, dict_dia, dict_met, filename_nc, met2, metric_values, metric_units, \
name_png
| {
"content_hash": "50ebbe595f353a2d701b559bea17d60a",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 120,
"avg_line_length": 56.765765765765764,
"alnum_prop": 0.6135534042215521,
"repo_name": "eguil/ENSO_metrics",
"id": "024657175f6ef2cd779e32dbfc5b4069145baf7c",
"size": "6882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/driver_plot_divedowns.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1727448"
},
{
"name": "Shell",
"bytes": "1000"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Video.user'
db.add_column('videoportal_video', 'user',
self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Video.user'
db.delete_column('videoportal_video', 'user_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
},
'videoportal.channel': {
'Meta': {'object_name': 'Channel'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'contained_videos'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['videoportal.Video']"})
},
'videoportal.comment': {
'Meta': {'object_name': 'Comment'},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '1000'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'timecode': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videoportal.Video']"})
},
'videoportal.video': {
'Meta': {'object_name': 'Video'},
'assemblyid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'channel': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['videoportal.Channel']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'duration': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'encodingDone': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.IntegerField', [], {'max_length': '1'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mp3Size': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'mp3URL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'mp4Size': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'mp4URL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'oggSize': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'oggURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'originalFile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'protocolURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'torrentURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'videoThumbURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'webmSize': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'webmURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['videoportal'] | {
"content_hash": "c7507b420e0c9ee20ff890045d4c8d51",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 208,
"avg_line_length": 76.89430894308943,
"alnum_prop": 0.5491647282723621,
"repo_name": "Piratenfraktion-Berlin/OwnTube",
"id": "8e003abce05bb8f766c65a99577866c1087fb97e",
"size": "9482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "videoportal/migrations/0004_auto__add_field_video_user.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "58222"
},
{
"name": "HTML",
"bytes": "48870"
},
{
"name": "JavaScript",
"bytes": "116759"
},
{
"name": "Python",
"bytes": "726277"
}
],
"symlink_target": ""
} |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('domain', '0008_use_livequery'),
]
operations = []
| {
"content_hash": "ea83caafe952b6caa324ee6889a58be2",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 41,
"avg_line_length": 16.5,
"alnum_prop": 0.6363636363636364,
"repo_name": "dimagi/commcare-hq",
"id": "2ec993bb05c45039ea718ec60e7e9058d2e9586f",
"size": "165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/domain/migrations/0009_restrict_mob_access_from_FF.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import BadSymbol
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class bitstamp1(Exchange):
def describe(self):
return self.deep_extend(super(bitstamp1, self).describe(), {
'id': 'bitstamp1',
'name': 'Bitstamp',
'countries': ['GB'],
'rateLimit': 1000,
'version': 'v1',
'has': {
'CORS': True,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': False,
'createStopMarketOrder': False,
'createStopOrder': False,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchMarginMode': False,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOpenInterestHistory': False,
'fetchOrder': None,
'fetchOrderBook': True,
'fetchPosition': False,
'fetchPositionMode': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTrades': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27786377-8c8ab57e-5fe9-11e7-8ea4-2b05b6bcceec.jpg',
'api': {
'rest': 'https://www.bitstamp.net/api',
},
'www': 'https://www.bitstamp.net',
'doc': 'https://www.bitstamp.net/api',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
},
'api': {
'public': {
'get': [
'ticker',
'ticker_hour',
'order_book',
'transactions',
'eur_usd',
],
},
'private': {
'post': [
'balance',
'user_transactions',
'open_orders',
'order_status',
'cancel_order',
'cancel_all_orders',
'buy',
'sell',
'bitcoin_deposit_address',
'unconfirmed_btc',
'ripple_withdrawal',
'ripple_address',
'withdrawal_requests',
'bitcoin_withdrawal',
],
},
},
'precisionMode': TICK_SIZE,
'markets': {
'BTC/USD': {'id': 'btcusd', 'symbol': 'BTC/USD', 'base': 'BTC', 'quote': 'USD', 'baseId': 'btc', 'quoteId': 'usd', 'maker': 0.005, 'taker': 0.005, 'type': 'spot', 'spot': True},
'BTC/EUR': {'id': 'btceur', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR', 'baseId': 'btc', 'quoteId': 'eur', 'maker': 0.005, 'taker': 0.005, 'type': 'spot', 'spot': True},
'EUR/USD': {'id': 'eurusd', 'symbol': 'EUR/USD', 'base': 'EUR', 'quote': 'USD', 'baseId': 'eur', 'quoteId': 'usd', 'maker': 0.005, 'taker': 0.005, 'type': 'spot', 'spot': True},
'XRP/USD': {'id': 'xrpusd', 'symbol': 'XRP/USD', 'base': 'XRP', 'quote': 'USD', 'baseId': 'xrp', 'quoteId': 'usd', 'maker': 0.005, 'taker': 0.005, 'type': 'spot', 'spot': True},
'XRP/EUR': {'id': 'xrpeur', 'symbol': 'XRP/EUR', 'base': 'XRP', 'quote': 'EUR', 'baseId': 'xrp', 'quoteId': 'eur', 'maker': 0.005, 'taker': 0.005, 'type': 'spot', 'spot': True},
'XRP/BTC': {'id': 'xrpbtc', 'symbol': 'XRP/BTC', 'base': 'XRP', 'quote': 'BTC', 'baseId': 'xrp', 'quoteId': 'btc', 'maker': 0.005, 'taker': 0.005, 'type': 'spot', 'spot': True},
'LTC/USD': {'id': 'ltcusd', 'symbol': 'LTC/USD', 'base': 'LTC', 'quote': 'USD', 'baseId': 'ltc', 'quoteId': 'usd', 'maker': 0.005, 'taker': 0.005, 'type': 'spot', 'spot': True},
'LTC/EUR': {'id': 'ltceur', 'symbol': 'LTC/EUR', 'base': 'LTC', 'quote': 'EUR', 'baseId': 'ltc', 'quoteId': 'eur', 'maker': 0.005, 'taker': 0.005, 'type': 'spot', 'spot': True},
'LTC/BTC': {'id': 'ltcbtc', 'symbol': 'LTC/BTC', 'base': 'LTC', 'quote': 'BTC', 'baseId': 'ltc', 'quoteId': 'btc', 'maker': 0.005, 'taker': 0.005, 'type': 'spot', 'spot': True},
'ETH/USD': {'id': 'ethusd', 'symbol': 'ETH/USD', 'base': 'ETH', 'quote': 'USD', 'baseId': 'eth', 'quoteId': 'usd', 'maker': 0.005, 'taker': 0.005, 'type': 'spot', 'spot': True},
'ETH/EUR': {'id': 'etheur', 'symbol': 'ETH/EUR', 'base': 'ETH', 'quote': 'EUR', 'baseId': 'eth', 'quoteId': 'eur', 'maker': 0.005, 'taker': 0.005, 'type': 'spot', 'spot': True},
'ETH/BTC': {'id': 'ethbtc', 'symbol': 'ETH/BTC', 'base': 'ETH', 'quote': 'BTC', 'baseId': 'eth', 'quoteId': 'btc', 'maker': 0.005, 'taker': 0.005, 'type': 'spot', 'spot': True},
},
})
def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitstamp1 api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
if symbol != 'BTC/USD':
raise ExchangeError(self.id + ' ' + self.version + " fetchOrderBook doesn't support " + symbol + ', use it for BTC/USD only')
self.load_markets()
orderbook = self.publicGetOrderBook(params)
timestamp = self.safe_timestamp(orderbook, 'timestamp')
return self.parse_order_book(orderbook, symbol, timestamp)
def parse_ticker(self, ticker, market=None):
#
# {
# "volume": "2836.47827985",
# "last": "36544.93",
# "timestamp": "1643372072",
# "bid": "36535.79",
# "vwap":"36594.20",
# "high": "37534.15",
# "low": "35511.32",
# "ask": "36548.47",
# "open": 37179.62
# }
#
symbol = self.safe_symbol(None, market)
timestamp = self.safe_timestamp(ticker, 'timestamp')
vwap = self.safe_string(ticker, 'vwap')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = Precise.string_mul(baseVolume, vwap)
last = self.safe_string(ticker, 'last')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'ask'),
'askVolume': None,
'vwap': vwap,
'open': self.safe_string(ticker, 'open'),
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitstamp1 api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
if symbol != 'BTC/USD':
raise ExchangeError(self.id + ' ' + self.version + " fetchTicker doesn't support " + symbol + ', use it for BTC/USD only')
self.load_markets()
market = self.market(symbol)
ticker = self.publicGetTicker(params)
#
# {
# "volume": "2836.47827985",
# "last": "36544.93",
# "timestamp": "1643372072",
# "bid": "36535.79",
# "vwap":"36594.20",
# "high": "37534.15",
# "low": "35511.32",
# "ask": "36548.47",
# "open": 37179.62
# }
#
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market=None):
timestamp = self.safe_timestamp_2(trade, 'date', 'datetime')
side = 'buy' if (trade['type'] == 0) else 'sell'
orderId = self.safe_string(trade, 'order_id')
id = self.safe_string(trade, 'tid')
price = self.safe_string(trade, 'price')
amount = self.safe_string(trade, 'amount')
marketId = self.safe_string(trade, 'currency_pair')
market = self.safe_market(marketId, market)
return self.safe_trade({
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': None,
'fee': None,
}, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitstamp1 api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
if symbol != 'BTC/USD':
raise BadSymbol(self.id + ' ' + self.version + " fetchTrades doesn't support " + symbol + ', use it for BTC/USD only')
self.load_markets()
market = self.market(symbol)
request = {
'time': 'minute',
}
response = self.publicGetTransactions(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_balance(self, response):
result = {'info': response}
codes = list(self.currencies.keys())
for i in range(0, len(codes)):
code = codes[i]
currency = self.currency(code)
currencyId = currency['id']
account = self.account()
account['free'] = self.safe_string(response, currencyId + '_available')
account['used'] = self.safe_string(response, currencyId + '_reserved')
account['total'] = self.safe_string(response, currencyId + '_balance')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitstamp1 api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
response = self.privatePostBalance(params)
return self.parse_balance(response)
def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float|None price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitstamp1 api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if type != 'limit':
raise ExchangeError(self.id + ' ' + self.version + ' accepts limit orders only')
if symbol != 'BTC/USD':
raise ExchangeError(self.id + ' v1 supports BTC/USD orders only')
self.load_markets()
method = 'privatePost' + self.capitalize(side)
request = {
'amount': amount,
'price': price,
}
response = getattr(self, method)(self.extend(request, params))
id = self.safe_string(response, 'id')
return {
'info': response,
'id': id,
}
def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str|None symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitstamp1 api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
return self.privatePostCancelOrder({'id': id})
def parse_order_status(self, status):
statuses = {
'In Queue': 'open',
'Open': 'open',
'Finished': 'closed',
'Canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def fetch_order_status(self, id, symbol=None, params={}):
self.load_markets()
request = {
'id': id,
}
response = self.privatePostOrderStatus(self.extend(request, params))
return self.parse_order_status(response)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitstamp1 api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
pair = market['id'] if market else 'all'
request = {
'id': pair,
}
response = self.privatePostOpenOrdersId(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api']['rest'] + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
auth = nonce + self.uid + self.apiKey
signature = self.encode(self.hmac(self.encode(auth), self.encode(self.secret)))
query = self.extend({
'key': self.apiKey,
'signature': signature.upper(),
'nonce': nonce,
}, query)
body = self.urlencode(query)
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
status = self.safe_string(response, 'status')
if status == 'error':
raise ExchangeError(self.id + ' ' + self.json(response))
| {
"content_hash": "2f16528e5efa8f9919599c900f356f4d",
"timestamp": "",
"source": "github",
"line_count": 374,
"max_line_length": 193,
"avg_line_length": 46.63101604278075,
"alnum_prop": 0.5276376146788991,
"repo_name": "ccxt/ccxt",
"id": "3696a7d131f246c078177d8d5c309a1aa7cd6a2c",
"size": "17621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ccxt/bitstamp1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1724"
},
{
"name": "HTML",
"bytes": "246"
},
{
"name": "JavaScript",
"bytes": "11619228"
},
{
"name": "PHP",
"bytes": "10272973"
},
{
"name": "Python",
"bytes": "9037496"
},
{
"name": "Shell",
"bytes": "6887"
}
],
"symlink_target": ""
} |
"""
Finds all cliques in an undirected graph. A clique is a set of vertices in the
graph such that the subgraph is fully connected (ie. for any pair of nodes in
the subgraph there is an edge between them).
"""
def find_all_cliques(edges):
"""
takes dict of sets
each key is a vertex
value is set of all edges connected to vertex
returns list of lists (each sub list is a maximal clique)
implementation of the basic algorithm described in:
Bron, Coen; Kerbosch, Joep (1973), "Algorithm 457: finding all cliques of an undirected graph",
"""
def expand_clique(candidates, nays):
nonlocal compsub
if not candidates and not nays:
nonlocal solutions
solutions.append(compsub.copy())
else:
for selected in candidates.copy():
candidates.remove(selected)
candidates_temp = get_connected(selected, candidates)
nays_temp = get_connected(selected, nays)
compsub.append(selected)
expand_clique(candidates_temp, nays_temp)
nays.add(compsub.pop())
def get_connected(vertex, old_set):
new_set = set()
for neighbor in edges[str(vertex)]:
if neighbor in old_set:
new_set.add(neighbor)
return new_set
compsub = []
solutions = []
possibles = set(edges.keys())
expand_clique(possibles, set())
return solutions
| {
"content_hash": "5264c70ccdedc147e452d242c925ec7a",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 99,
"avg_line_length": 34.785714285714285,
"alnum_prop": 0.6228610540725531,
"repo_name": "keon/algorithms",
"id": "f1db16ed5af625ae6670ba3ce7a2334e08259877",
"size": "1461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algorithms/graph/find_all_cliques.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "697310"
}
],
"symlink_target": ""
} |
"""
Personal Python package of Daniel Haase.
"""
from os.path import abspath, dirname, join
from warnings import warn
# read version number from text file
try:
_versionFilename = join(dirname(abspath(__file__)), "VERSION.txt")
with open(_versionFilename, "r") as _f:
for _line in _f:
_line = _line.strip()
if (_line == "") or (_line[0] == "#"):
# ignore empty lines and comments
continue
else:
# the first valid line will be used as version number
__version__ = _line
break
else:
# end of file, version was not found
warn("Found no valid version number in file '{}'".format(_versionFilename))
__version__ = "unknown"
except Exception as e:
warn("Failed to get version number from file '{}' (error: '{}')".format(_versionFilename, e))
__version__ = "unknown"
finally:
# purge
del abspath
del dirname
del join
del warn
del _versionFilename
try:
del _f
except NameError:
pass
try:
del _line
except NameError:
pass
| {
"content_hash": "837394b747c5782268558a93bfd27d07",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 97,
"avg_line_length": 26.613636363636363,
"alnum_prop": 0.5499573014517506,
"repo_name": "dhaase-de/dh-python-dh",
"id": "31ddb30f4e7fd5a7e059f9472fa5d95dd74cbbf0",
"size": "1171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dh/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "183301"
},
{
"name": "Shell",
"bytes": "6444"
}
],
"symlink_target": ""
} |
import argparse
import csv
import os
##############################################################################
################################ Constants #################################
##############################################################################
LOGS_FOLDER_NAME = 'logs'
LOG_FILE_EXTENSION = '.log'
##############################################################################
################################# Methods ##################################
##############################################################################
def retrieveLogsList(folder_name):
'''
Saves the logs filenames in a list. Uses the absolute path name to
avoid storing the folder name as a separate parameter.
'''
original_path = os.getcwd()
# Validate input
if not isinstance(folder_name,str):
raise TypeError('Please enter a valid folder name')
# Change cwd to the logs folder
os.chdir(os.path.join(os.getcwd(),folder_name))
files = os.listdir(os.getcwd())
logs_filenames = [os.path.join(os.getcwd(),f) for f in files
if f.endswith(LOG_FILE_EXTENSION)]
os.chdir(original_path)
return logs_filenames
def createLogObject(log_filename):
'''
Creates a log object with the desired properties as key value pairs.
This method is needed since sometimes CURR measurements appear before
the firmware version is shown and the multipliers cannot be computed.
'''
CURR_ROW_NAME = 'CURR'
ROW_NAME_INDEX = 0
TIMEUS_INDEX = 1
# Validate input
if not isinstance(log_filename,str) or not(log_filename.endswith(LOG_FILE_EXTENSION)):
raise TypeError('Please enter a valid log filename.' +
' Filename entered: "%s"' % (log_filename))
# Initialize empty log object
log_object = {'firmware_version' : '',
'curr_index' : -1,
'timeus_entries' : [],
'current_entries' : []}
# Store desired properties in log_object
with open(log_filename,'rb') as log:
log_reader = csv.reader(log)
for row in log_reader:
if log_object['firmware_version'] == '':
log_object['firmware_version'] = findFirmwareVersionInRow(row)
if log_object['curr_index'] == -1:
log_object['curr_index'] = findCurrIndexInRow(row)
if row[ROW_NAME_INDEX] == CURR_ROW_NAME:
curr_index = log_object['curr_index']
log_object['timeus_entries'].append(float(row[TIMEUS_INDEX].strip()))
log_object['current_entries'].append(float(row[curr_index].strip()))
return log_object
def computeFlightTime(log_object):
CURRENT_THRESHOLDS_TAKEOFF = {'V3.2': 4, 'V3.3': 4, 'V3.4': 4, 'V3.7': 4,
'V3.5': 4}
CURRENT_THRESHOLDS_LAND = {'V3.2': 4, 'V3.3': 4, 'V3.4': 4, 'V3.7': 0.4,
'V3.5': 4}
CURRENT_MULTIPLIERS = {'V3.2': 1, 'V3.3': 1.0/100, 'V3.4': 1, 'V3.7': 1,
'V3.5': 1}
TIME_MULTIPLIERS = {'V3.2': 1000, 'V3.3': 1, 'V3.4': 1, 'V3.7': 1,
'V3.5': 1}
# Validate input
log_object_valid = 'firmware_version' in log_object and \
'curr_index' in log_object and \
'timeus_entries' in log_object and \
'current_entries' in log_object
if not isinstance(log_object,dict) or not log_object_valid:
raise TypeError('Please enter a valid log object.' +
' Object entered: %s' % (log_object))
current_threshold_takeoff = CURRENT_THRESHOLDS_TAKEOFF[log_object['firmware_version']]
current_threshold_land = CURRENT_THRESHOLDS_LAND[log_object['firmware_version']]
current_multiplier = CURRENT_MULTIPLIERS[log_object['firmware_version']]
time_multiplier = TIME_MULTIPLIERS[log_object['firmware_version']]
drone_flying = False
last_takeoff = -1
flight_time = 0
for entry_ind in range(len(log_object['timeus_entries'])):
timeus = time_multiplier*log_object['timeus_entries'][entry_ind]
current = current_multiplier*log_object['current_entries'][entry_ind]
if current > current_threshold_takeoff:
if not drone_flying:
last_takeoff = timeus
drone_flying = True
elif current < current_threshold_land and drone_flying:
flight_time += timeus - last_takeoff
drone_flying = False
return flight_time/1e6
def computeTotalFlightTime(logs_list):
total_flight_time = 0
if not isinstance(logs_list,list):
raise TypeError('Please enter a list of log filenames')
for log_filename in logs_list:
log_object = createLogObject(log_filename)
total_flight_time += computeFlightTime(log_object)
return total_flight_time
##############################################################################
############################ Helper functions ##############################
##############################################################################
def getEntryIndexFromHeader(header,entry_name):
entry_index = -1
try:
# Index starts at 1 (0 is header name)
entry_index = header.index(entry_name) + 1
except ValueError:
print 'Entry "%s" not found in header: %s' % (entry_name,header)
return entry_index
def findHeaderInRow(row,header_row_name):
FMT_ROW_NAME = 'FMT'
HEADER_FIRST_INDEX = 5
if (row[0].strip() == FMT_ROW_NAME and row[3].strip() == header_row_name):
curr_header = [header_element.strip().upper() for header_element
in row[HEADER_FIRST_INDEX:]]
return curr_header
return []
def findEntryIndexInRow(row,header_name,entry_name):
entry_index = -1
header = findHeaderInRow(row,header_name)
if header != []:
entry_index = getEntryIndexFromHeader(header,entry_name)
return entry_index
def findCurrIndexInRow(row):
return findEntryIndexInRow(row,'CURR','CURR')
def findFirmwareVersionInRow(row):
ROW_NAME_INDEX = 0
MSG_ROW_NAME = 'MSG'
FIRMWARE_ENTRY_INDEX = -1
FIRMWARE_LENGTH = 4 # To save only V3.2, V3.3, V3.4
firmware_string = row[FIRMWARE_ENTRY_INDEX]
if row[ROW_NAME_INDEX] == MSG_ROW_NAME:
start_firmware = firmware_string.find('V') # Assuming always starts with V
return firmware_string[start_firmware:start_firmware+FIRMWARE_LENGTH]
return ''
def formatSeconds(seconds):
SECONDS_TO_HOURS = 3600
SECONDS_TO_MINUTES = 60
if not isinstance(seconds,int) and not isinstance(seconds,float):
raise TypeError('Please enter a valid amount of seconds')
elif seconds < 0:
raise ValueError('Please enter a positive amount of seconds')
seconds = int(seconds)
hours = seconds / SECONDS_TO_HOURS
seconds = seconds % SECONDS_TO_HOURS
minutes = seconds / SECONDS_TO_MINUTES
seconds = seconds % SECONDS_TO_MINUTES
return '%02d:%02d:%02d' % (hours,minutes,seconds)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-l','--log',help='single log filename')
args = parser.parse_args()
if args.log:
flight_time = computeTotalFlightTime([args.log])
print 'Flight time: %s s' % (formatSeconds(flight_time))
else:
folder_name = LOGS_FOLDER_NAME
logs_list = retrieveLogsList(folder_name)
print 'Analyzing %s logs...' % (len(logs_list))
total_flight_time = computeTotalFlightTime(logs_list)
print 'Total flight time: %s s' % (formatSeconds(total_flight_time))
| {
"content_hash": "12d9301be1c89b0492278c1687e57f70",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 90,
"avg_line_length": 38.10243902439024,
"alnum_prop": 0.5589553194213289,
"repo_name": "Javiercerna/pixhawk-flight-time",
"id": "a594455b9433c97e65a2bffbe8c693d0e8223414",
"size": "7811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flight_time.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14328"
}
],
"symlink_target": ""
} |
from ..geometry import create_plane
from ..gloo import set_state
from .mesh import MeshVisual
class PlaneVisual(MeshVisual):
"""Visual that displays a plane.
Parameters
----------
width : float
Plane width.
height : float
Plane height.
width_segments : int
Plane segments count along the width.
height_segments : float
Plane segments count along the height.
direction: unicode
``{'-x', '+x', '-y', '+y', '-z', '+z'}``
Direction the plane will be facing.
vertex_colors : ndarray
Same as for `MeshVisual` class. See `create_plane` for vertex ordering.
face_colors : ndarray
Same as for `MeshVisual` class. See `create_plane` for vertex ordering.
color : Color
The `Color` to use when drawing the cube faces.
edge_color : tuple or Color
The `Color` to use when drawing the cube edges. If `None`, then no
cube edges are drawn.
"""
def __init__(self, width=1, height=1, width_segments=1, height_segments=1,
direction='+z', vertex_colors=None, face_colors=None,
color=(0.5, 0.5, 1, 1), edge_color=None):
vertices, filled_indices, outline_indices = create_plane(
width, height, width_segments, height_segments, direction)
MeshVisual.__init__(self, vertices['position'], filled_indices,
vertex_colors, face_colors, color)
if edge_color:
self._outline = MeshVisual(vertices['position'], outline_indices,
color=edge_color, mode='lines')
else:
self._outline = None
def draw(self, transforms):
"""Draw the visual
Parameters
----------
transforms : instance of TransformSystem
The transforms to use.
"""
MeshVisual.draw(self, transforms)
if self._outline:
set_state(polygon_offset=(1, 1), polygon_offset_fill=True)
self._outline.draw(transforms)
| {
"content_hash": "bff3684573558e6ff9c628e42fcccddf",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 79,
"avg_line_length": 35.327586206896555,
"alnum_prop": 0.5851634943875061,
"repo_name": "hronoses/vispy",
"id": "a4cc965d51343acaf5675e1413cc75640e71c60e",
"size": "2375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vispy/visuals/plane.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "143081"
},
{
"name": "GLSL",
"bytes": "171513"
},
{
"name": "JavaScript",
"bytes": "5007"
},
{
"name": "Makefile",
"bytes": "1593"
},
{
"name": "PowerShell",
"bytes": "4151"
},
{
"name": "Python",
"bytes": "2858273"
}
],
"symlink_target": ""
} |
""" Classes and routines to handle lattice issues for online modeling and
runtime calculations.
* ``LteParser``: parse ``ELEGANT`` lattice definition files for simulation:
1. convert lte file into dict/json format for further usage;
2. resolve rpn expressions within element definitions;
3. retain prefixed information of lte file as '_prefixstr' key in json/dict;
* ``Lattice``: handle lattice issues from json/dict definitions:
1. instantiate with json/dict lattice definition, e.g. from ``LteParser.file2json()``;
2. generate lte file for elegant simulation;
3. iteratively expand the beamline definition in lte file;
4. generate lte file after manipulations.
.. Author : Tong Zhang
.. Created : 2016-01-28
"""
import json
import os
import time
import ast
import sys
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from pyrpn import rpn
from . import element
class LteParser(object):
"""
:param infile: lte filename or list of lines of lte file
:param mode: 'f': treat infile as file,
's': (else) treat as list of lines
"""
def __init__(self, infile, mode='f'):
if mode == 'f': # read lines from infile
self.file_lines = open(infile, 'r').readlines()
elif mode == 's': # infile is the output of generateLatticeFile(bl,'sio')
self.file_lines = infile.split('\n') # string to list of lines
self.confstr = '' # configuration string line for given element excluding control part
self.confstr_epics = '' # configuration string line for given element, epics control part
self.ctrlconf_dict = {} # epics control config dict
self.confdict = {} # configuration string line to dict
self.confjson = {} # configuration string line to json
self.prestrdict = {} # prefix string line to dict, e.g. line starts with '%'
self.stodict = {} # sto key-value dict
self.resolvePrefix() # sto string information
self.resolveEPICS() # handle line starts with !!epics
def resolvePrefix(self):
""" extract prefix information into dict with the key of '_prefixstr'
"""
tmpstrlist = []
tmpstodict = {}
for line in self.file_lines:
if line.startswith('%'):
stolist = line.replace('%', '').split('sto')
rpnexp = stolist[0].strip() # rpn expression
rpnvar = stolist[1].strip() # rpn variable
tmpstodict[rpnvar] = rpnexp
# bug: rpnval in rpnexp
# raises error when converting string convert to float
# Found: 2016-06-08 22:29:25 PM CST
# Fixed: 2016-06-12 11:51:01 AM CST
# e.g.
# a sto 0.1
# a sto b
# then b should be 0.1,
# i.e. b -> a -> 0.1
# solve the 'sto chain' assignment issue.
self.stodict = self.resolve_rpn(tmpstodict)
for k, v in self.stodict.items():
stostr = '% {val} sto {var}'.format(val=v, var=k)
tmpstrlist.append(stostr)
self.prestrdict['_prefixstr'] = tmpstrlist
def get_rpndict_flag(self, rpndict):
""" calculate flag set, the value is True or False,
if rpndict value is not None, flag is True, or False
if set with only one item, i.e. True returns,
means values of rpndict are all valid float numbers,
then finally return True, or False
"""
flag_set = set([rpn.Rpn.solve_rpn(str(v)) is not None for v in rpndict.values()])
if len(flag_set) == 1 and flag_set.pop():
return True
else:
return False
def rinse_rpnexp(self, rpnexp, rpndict):
""" replace valid keyword of rpnexp from rpndict
e.g. rpnexp = 'b a /', rpndict = {'b': 10}
then after rinsing, rpnexp = '10 a /'
return rinsed rpnexp
"""
for wd in rpnexp.split():
if wd in rpndict:
try:
val = float(rpndict[wd])
rpnexp = rpnexp.replace(wd, str(val))
except:
pass
return rpnexp
def resolve_rpn(self, rpndict):
""" solve dict of rpn expressions to pure var to val dict
:param rpndict: dict of rpn expressions
return pure var to val dict
"""
if rpndict == {}:
return {}
retflag = self.get_rpndict_flag(rpndict)
cnt = 0
tmpdict = {k: v for k, v in rpndict.items()}
while not retflag:
# update rpndict
cnt += 1
tmpdict = self.update_rpndict(tmpdict)
# and flag
retflag = self.get_rpndict_flag(tmpdict)
return tmpdict
def update_rpndict(self, rpndict):
""" update rpndict, try to solve rpn expressions as many as possible,
leave unsolvable unchanged.
return new dict
"""
tmpdict = {k: v for k, v in rpndict.items()}
for k, v in rpndict.items():
v_str = str(v)
if rpn.Rpn.solve_rpn(v_str) is None:
tmpdict[k] = self.rinse_rpnexp(v_str, tmpdict)
else:
tmpdict[k] = rpn.Rpn.solve_rpn(v_str)
return tmpdict
def resolveEPICS(self):
""" extract epics control configs into
"""
kw_name_list = []
kw_ctrlconf_list = []
for line in self.file_lines:
if line.startswith('!!epics'):
el = line.replace('!!epics', '').replace(':', ';;', 1).split(';;')
kw_name_list.append(el[0].strip())
kw_ctrlconf_list.append(json.loads(el[1].strip()))
self.ctrlconf_dict = dict(zip(kw_name_list, kw_ctrlconf_list))
def getKw(self, kw):
""" Extract doc snippet for element configuration,
:param kw: element name
:return: instance itself
1 call getKwAsDict() to return config as a dict
2 call getKwAsJson() to return config as json string
3 call getKwAsString() to return config as a raw string
USAGE: getKw('Q10')
"""
ikw = kw.lower()
line_continue_flag = ''
appendflag = False
try:
for line in self.file_lines:
if line.strip() == '':
continue
line = ' '.join(line.strip().split()).strip('\n; ')
if line.startswith('!'):
continue
if line.lower().startswith(ikw + ' :') or line.lower().startswith(ikw + ':'):
conflist = [] # list to put into element configuration
conflist.append(line)
appendflag = True
elif appendflag and line_continue_flag == '&':
conflist.append(line)
line_continue_flag = line[-1]
if line_continue_flag != '&':
appendflag = False
conf_str = ''.join(conflist).replace('&', ',')
if 'line' in conf_str.lower().split('=')[0]: # if bl defines lattice
conf_str = conf_str.lower().replace(',', ' ')[::-1].replace('enil', 'beamline,lattice'[::-1], 1)[
::-1] # avoid the case with bl keyword has 'line'
except:
conf_str = ''
# print conf_str
# split('!epics'): second part is epics control conf
splitedparts = conf_str.split('!epics')
self.confstr = splitedparts[0]
try:
self.confstr_epics = splitedparts[1].strip()
except IndexError:
self.confstr_epics = ''
return self
def toDict(self):
""" convert self.confstr to dict, could apply chain rule, write to self.confdict
USAGE: ins = LteParser(infile)
ins.getKw(kw).toDict()
"""
self.confdict = self.str2dict(self.confstr)
return self
def str2dict(self, rawstr):
""" convert str to dict format
USAGE: rdict = str2dict(rawstr)
:param rawstr: raw configuration string of element
"""
kw_list = []
sp1 = rawstr.split(':')
kw_name = sp1[0].strip().upper()
kw_desc = sp1[1:]
sp2 = kw_desc[0].replace(',', ';;', 1).split(';;')
kw_type = sp2[0].strip()
try:
kw_vals = sp2[1].replace(",", '=').split('=')
[(not (i.isspace() or i == '')) and kw_list.append(i) for i in kw_vals]
ks = [k.strip() for k in kw_list[0::2]]
vs = [v.strip().replace('"', '').replace("'", '') for v in kw_list[1::2]]
kw_vals_dict = dict(zip(ks, vs))
rdict = {kw_name: {kw_type: kw_vals_dict}}
except:
rdict = {kw_name: kw_type}
return rdict
def dict2json(self, idict):
""" convert dict into json
USAGE: rjson = dict2json(idict)
"""
return json.dumps(idict)
def getKwAsJson(self, kw):
""" return keyword configuration as a json
Usage: rjson = getKwAsJson(kw)
:param kw: element keyword
"""
return self.dict2json(self.getKwAsDict(kw))
def getKwAsDict(self, kw):
""" return keyword configuration as a dict
Usage: rdict = getKwAsDict(kw)
"""
self.getKw(kw)
return self.str2dict(self.confstr)
def getKwCtrlConf(self, kw, fmt='dict'):
""" return keyword's control configuration, followed after '!epics' notation
:param kw: keyword name
:param fmt: return format, 'raw', 'dict', 'json', default is 'dict'
"""
try:
confd = self.ctrlconf_dict[kw]
if fmt == 'dict':
retval = confd
else: # 'json' string for other options
retval = json.dumps(confd)
except KeyError:
# try to get from raw line string
self.getKw(kw)
if self.confstr_epics != '':
if fmt == 'dict':
retval = ast.literal_eval(self.confstr_epics)
elif fmt == 'json':
retval = json.dumps(ast.literal_eval(self.confstr_epics))
else: # raw string
retval = self.confstr_epics
else:
retval = None
return retval
def getKwAsString(self, kw):
""" return keyword configuration as a string
Usage: rstr = getKwAsString(kw)
"""
return self.getKw(kw).confstr
def detectAllKws(self):
""" Detect all keyword from infile, return as a list
USAGE: kwslist = detectAllKws()
"""
kwslist = []
for line in self.file_lines:
# if line.strip() == '': continue
line = ''.join(line.strip().split())
if line.startswith("!"):
continue
# if ':' in line and not "line" in line:
if ':' in line:
kw_name = line.split(':')[0]
if set(kw_name).difference({'=', '-', '*', '/', '+'}) == set(kw_name):
kwslist.append(kw_name)
return kwslist
def file2json(self, jsonfile=None):
""" Convert entire lte file into json like format
USAGE: 1: kwsdictstr = file2json()
2: kwsdictstr = file2json(jsonfile = 'somefile')
show pretty format with pipeline: | jshon, or | pjson
if jsonfile is defined, dump to defined file before returning json string
:param jsonfile: filename to dump json strings
"""
kwslist = self.detectAllKws()
kwsdict = {}
idx = 0
for kw in sorted(kwslist, key=str.lower):
# print kw
idx += 1
tdict = self.getKwAsDict(kw)
self.rpn2val(tdict)
kwsdict.update(tdict)
if kw not in self.ctrlconf_dict:
ctrlconf = self.getKwCtrlConf(kw, fmt='dict')
if ctrlconf is not None:
self.ctrlconf_dict.update({kw: ctrlconf})
kwsdict.update(self.prestrdict)
ctrlconfdict = {'_epics': self.ctrlconf_dict} # all epics contrl config in self.ctrlconfdict
kwsdict.update(ctrlconfdict)
try:
with open(os.path.expanduser(jsonfile), 'w') as outfile:
json.dump(kwsdict, outfile)
except:
pass
return json.dumps(kwsdict)
def getKwType(self, kw):
""" return the type of kw, upper cased string
USAGE: rtype = getKwType(kw)
"""
return self.getKwAsDict(kw).values()[0].keys()[0].upper()
def getKwConfig(self, kw):
""" return the configuration of kw, dict
USAGE: rdict = getKwConfig(kw)
"""
confd = self.getKwAsDict(kw).values()[0].values()[0]
return {k.lower(): v for k, v in confd.items()}
def makeElement(self, kw):
""" return element object regarding the keyword configuration
"""
kw_name = kw
kw_type = self.getKwType(kw_name)
kw_config = {k.lower(): v for k, v in self.getKwConfig(kw_name).items()}
objtype = 'Element' + kw_type.capitalize()
retobj = getattr(element, objtype)(name=kw_name, config=kw_config)
# set up EPICS control configs
ctrlconf = self.getKwCtrlConf(kw_name)
if ctrlconf != {}:
retobj.setConf(ctrlconf, type='ctrl')
return retobj
def scanStoVars(self, strline):
""" scan input string line, replace sto parameters with calculated results.
"""
for wd in strline.split():
if wd in self.stodict:
strline = strline.replace(wd, str(self.stodict[wd]))
return strline
def rpn2val(self, rdict):
""" Resolve the rpn string into calulated float number
USAGE: rpn2val(rdict)
:param rdict: json like dict
"""
kw_name = list(rdict.keys())[0] # b11
kw_val = rdict[kw_name]
try:
kw_type = list(kw_val.keys())[0] # csrcsben
kw_param = list(kw_val.values())[0]
if kw_type != 'beamline':
for k, v in kw_param.items():
v = self.scanStoVars(v)
rpnval = rpn.Rpn.solve_rpn(v)
if rpnval is not None:
kw_param[k] = rpnval # update rpn string to float if not None
except:
pass # element that only has type name, e.g. {'bpm01': 'moni'}
def solve_rpn(self):
""" solve rpn string in self.confdict, and update self.confdict
USAGE: ins = LteParser(infile)
ins.getKw(kw).toDict().solve_rpn()
"""
self.rpn2val(self.confdict)
return self
# ===========================================================================
class Lattice(object):
""" class for handling lattice configurations and operations
"""
def __init__(self, elements):
""" initialize the class with input elements
elements should be dict converted from json,
if not convert first by json.loads(elements)
"""
if isinstance(elements, str):
self.all_elements = json.loads(elements)
else: # elements is dict already
self.all_elements = elements
self.kws_ele, self.kws_bl = self.getAllKws()
def dumpAllElements(self):
""" dump all element configuration lines as json format.
"""
return json.dumps(self.all_elements)
def getAllEle(self):
""" return all element keywords
"""
return self.kws_ele
def getAllBl(self):
""" return all beamline keywords
"""
return self.kws_bl
def getBeamline(self, beamlineKw):
""" get beamline definition from all_elements, return as a list
:param beamlineKw: keyword of beamline
"""
lattice_string = list(self.all_elements.get(beamlineKw.upper()).values())[0].get('lattice')
return lattice_string[1:-1].split() # drop leading '(' and trailing ')' and split into list
def getFullBeamline(self, beamlineKw, extend=False):
""" get beamline definition from all_elements,
expand iteratively with the elements from all_elements
e.g.
element 'doub1' in
chi : line=(DBLL2 , doub1 , DP4FH , DP4SH , DBLL5 , DBD ,
B11 , DB11 , B12 , DB12 , PF2 , DB13 ,
B13 , DB14 , B14 , DBD , DBLL5 , doub2 ,
DP5FH , DP5SH , DBLL2 , PSTN1)
should be expaned with 'doub1' configuration:
doub1 : line=(DQD3, Q05, DQD2, Q06, DQD3)
since:
getBeamline('doub1') = [u'dqd3', u'q05', u'dqd2', u'q06', u'dqd3'] = A
getBeamline('doub2') = [u'dqd3', u'q05', u'dqd2', u'q06', u'dqd3'] = B
getBeamline('chi') = [u'dbll2', u'doub1', u'dp4fh', u'dp4sh', u'dbll5', u'dbd', u'b11', u'db11', u'b12',
u'db12', u'pf2', u'db13', u'b13', u'db14', u'b14', u'dbd', u'dbll5', u'doub2',
u'dp5fh', u'dp5sh', u'dbll2', u'pstn1']
thus: getFullBeamline('chi') should return:
[u'dbll2', A, u'dp4fh', u'dp4sh', u'dbll5', u'dbd', u'b11', u'db11', u'b12', u'db12', u'pf2', u'db13',
u'b13', u'db14', u'b14', u'dbd', u'dbll5', B, u'dp5fh', u'dp5sh', u'dbll2', u'pstn1']
:param extend: if extend mode should be envoked, by default False
if extend = True, element like '2*D01' would be expended to be D01, D01
"""
try:
assert beamlineKw.upper() in self.kws_bl
rawbl = self.getBeamline(beamlineKw)
fullbl = []
if not extend:
for ele in rawbl:
if self.isBeamline(ele):
fullbl.extend(self.getFullBeamline(ele))
else: # if not beamline, do not expand
fullbl.append(ele)
else: # extend
for ele in rawbl:
ele_num_name_dict = self.rinseElement(ele)
elename = ele_num_name_dict['name']
elenum = ele_num_name_dict['num']
if self.isBeamline(elename):
fullbl.extend(self.getFullBeamline(elename, extend=True) * elenum)
else:
fullbl.extend([elename] * elenum)
return fullbl
except AssertionError:
print('ERROR: %s is not a right defined beamline.' % beamlineKw)
def isBeamline(self, kw):
""" test if kw is a beamline
:param kw: keyword
"""
return kw.upper() in self.kws_bl
def getAllKws(self):
""" extract all keywords into two categories
kws_ele: magnetic elements
kws_bl: beamline elements
return (kws_ele, kws_bl)
"""
kws_ele = []
kws_bl = []
for ele in self.all_elements:
if ele == '_prefixstr' or ele == '_epics':
continue
elif self.getElementType(ele).lower() == u'beamline':
kws_bl.append(ele)
else:
kws_ele.append(ele)
return tuple((kws_ele, kws_bl))
def showBeamlines(self):
""" show all defined beamlines
"""
cnt = 0
blidlist = []
for k in self.all_elements:
try:
if 'beamline' in self.all_elements.get(k):
cnt += 1
blidlist.append(k)
except:
pass
retstr = '{total:<3d}beamlines: {allbl}'.format(total=cnt,
allbl=';'.join(blidlist))
return retstr
def getElementType(self, elementKw):
""" return type name for given element keyword,
e.g. getElementType('Q01') should return string: 'QUAD'
"""
try:
etype = list(self.all_elements.get(elementKw.upper()).keys())[0]
except:
etype = self.all_elements.get(elementKw.upper())
return etype.upper()
def getElementConf(self, elementKw, raw=False):
""" return configuration for given element keyword,
e.g. getElementConf('Q01') should return dict: {u'k1': 0.0, u'l': 0.05}
:param elementKw: element keyword
"""
if raw is True:
try:
econf = self.all_elements.get(elementKw.upper())
except:
return {}
else:
try:
econf = list(self.all_elements.get(elementKw.upper()).values())[0]
except:
return {}
return econf
def getElementCtrlConf(self, elementKw):
""" return keyword's EPICS control configs,
if not setup, return {}
"""
try:
retval = self.all_elements['_epics'][elementKw.upper()]
except KeyError:
retval = {}
return retval
def formatElement(self, kw, format='elegant'):
""" convert json/dict of element configuration into elegant/mad format
:param kw: keyword
"""
etype = self.getElementType(kw)
econf_dict = self.getElementConf(kw)
econf_str = ''
for k, v in econf_dict.items():
econf_str += (k + ' = ' + '"' + str(v) + '"' + ', ')
if format == 'elegant':
fmtstring = '{eid:<10s}:{etype:>10s}, {econf}'.format(eid=kw.upper(),
etype=etype.upper(),
econf=econf_str[
:-2])
# [:-2] slicing to remove trailing space and ','
elif format == 'mad':
raise NotImplementedError("Not implemented, yet")
return fmtstring
def generateLatticeLine(self, latname='newline', line=None):
""" construct a new lattice line
:param latname: name for generated new lattice
"""
latticeline = []
for e in line:
if isinstance(e, list):
latticeline.extend(e)
else:
latticeline.append(e)
newblele = {latname.upper(): {'beamline': {'lattice': '(' + ' '.join(latticeline) + ')'}}}
self.all_elements.update(newblele)
self.kws_bl.append(latname.upper())
return newblele
def generateLatticeFile(self, beamline, filename=None, format='elegant'):
""" generate simulation files for lattice analysis,
e.g. ".lte" for elegant, ".madx" for madx
input parameters:
:param beamline: keyword for beamline
:param filename: name of lte/mad file,
if None, output to stdout;
if 'sio', output to a string as return value;
other cases, output to filename;
:param format: madx, elegant,
'elegant' by default, generated lattice is for elegant tracking
"""
"""
if not self.isBeamline(beamline):
print("%s is a valid defined beamline, do not process." % (beamline))
return False
"""
if filename is None:
f = sys.stdout
elif filename == 'sio':
f = StringIO()
else:
f = open(os.path.expanduser(filename), 'w')
# write filehead, mainly resolving prefix string lines
cl1 = "This file is automatically generated by 'generateLatticeFile()' method,"
cl2 = 'could be used as ' + format + ' lattice file.'
cl3 = 'Author: Tong Zhang (zhangtong@sinap.ac.cn)'
cl4 = 'Generated Date: ' + time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime())
f.write('!{str1:<73s}!\n'.format(str1='-' * 73))
f.write('!{str1:^73s}!\n'.format(str1=cl1))
f.write('!{str1:^73s}!\n'.format(str1=cl2))
f.write('!{str1:^73s}!\n'.format(str1='-' * 24))
f.write('!{str1:^73s}!\n'.format(str1=cl3))
f.write('!{str1:^73s}!\n'.format(str1=cl4))
f.write('!{str1:<73s}!\n'.format(str1='-' * 73))
f.write('\n')
""" do not need to dump stoed variables now, 2016-03-21
# write global variables
f.write('! {str1:<73s}\n'.format(str1= 'Global variable definitions:'))
f.write('\n'.join(self.all_elements['_prefixstr']))
f.write('\n')
f.write('\n')
"""
# write EPICS control configuration part if contains '_epics' key
if '_epics' in self.all_elements:
f.write('! {str1:<73s}\n'.format(str1='EPICS control definitions:'))
for k, v in self.all_elements['_epics'].items():
f.write('!!epics {k:<10s}:{v:>50s}\n'.format(k=k, v=json.dumps(v)))
f.write('\n')
# write element definitions and lattice
f.write('! {str1:<72s}\n'.format(str1='Element definitions:'))
elelist = self.getFullBeamline(beamline, extend=True)
if self.getElementType(elelist[0]) != 'CHARGE':
elelist.insert(0, self.getChargeElement())
for ele in sorted(set(elelist)):
elestring = self.rinseElement(ele)['name']
f.write(self.formatElement(elestring, format='elegant') + '\n')
# write beamline lattice definition
f.write('\n')
f.write('! {str1:<72s}\n'.format(str1='Beamline definitions:'))
f.write('{bl:<10s}: line = ({lattice})'.format(bl=beamline.upper(),
lattice=', '.join(elelist)))
if filename == 'sio':
retval = f.getvalue()
else:
retval = True
f.close()
# if everything's OK, return True or string ('sio') mode
return retval
def getElementList(self, bl):
""" return the elements list according to the appearance order
in beamline named 'bl'
:param bl: beamline name
"""
return self.getFullBeamline(bl, extend=True)
def rinseElement(self, ele):
""" resolve element case with multiply format,
e.g. rinseElement('10*D01') should return dict {'num': 10; 'name' = 'D01'}
:param ele: element string
"""
if '*' in ele:
tmplist = ''.join(ele.split()).split('*')
tmplist_num = tmplist[[x.isdigit() for x in tmplist].index(True)]
tmplist_ele = tmplist[[x.isdigit() for x in tmplist].index(False)]
return dict(zip(('num', 'name'), (int(tmplist_num), tmplist_ele)))
else:
return dict(zip(('num', 'name'), (1, ele)))
def orderLattice(self, beamline):
""" ordering element type appearance sequence for each element of beamline
e.g. after getFullBeamline,
lattice list ['q','Q01', 'B11', 'Q02', 'B22'] will return:
[(u'q', u'CHARGE', 1),
(u'q01', u'QUAD', 1),
(u'b11', u'CSRCSBEN', 1),
(u'q02', u'QUAD', 2),
(u'b12', u'CSRCSBEN', 2)]
"""
ele_name_list = self.getFullBeamline(beamline, extend=True)
ele_type_list = [self.getElementType(ele) for ele in ele_name_list]
order_list = [0] * len(ele_name_list)
ele_type_dict_uniq = dict(zip(ele_type_list, order_list))
for idx in xrange(len(ele_name_list)):
etype = ele_type_list[idx]
ele_type_dict_uniq[etype] += 1
order_list[idx] = ele_type_dict_uniq[etype]
return zip(ele_name_list, ele_type_list, order_list)
def getChargeElement(self):
""" return charge element name
"""
for k in self.getAllEle():
if self.getElementType(k) == 'CHARGE':
return k
return ''
def getElementByOrder(self, beamline, type, irange):
""" return element list by appearance order in beamline,
which could be returned by orderLattice(beamline)
:param beamline: beamline name
:param type: element type name
:param irange: selected element range
possible irange definitions:
irange = 0, first one 'type' element;
irange = -1, last one
irange = 0,2,3, the first, third and fourth 'type' element
irange = 2:10:1, start:end:setp range
irange = 'all', all
"""
try:
assert beamline.upper() in self.kws_bl
except AssertionError:
print('%s is not a defined beamline.' % beamline)
return ''
try:
orderedLattice_list = self.orderLattice(beamline)
allmatchedlist = [val for idx, val in enumerate(orderedLattice_list) if val[1] == type.upper()]
if ',' in str(irange):
retlist = [allmatchedlist[int(num)] for num in str(irange).split(',')]
elif ':' in str(irange):
idxlist = map(int, irange.split(':'))
if len(idxlist) == 2:
idxlist.append(1)
idx_start, idx_stop, idx_step = idxlist[0], idxlist[1], idxlist[2]
retlist = allmatchedlist[slice(idx_start, idx_stop, idx_step)]
elif str(irange) == 'all':
retlist = allmatchedlist[:]
else:
retlist = [allmatchedlist[int(irange)]]
return retlist
except:
# print('Can not find %s in %s.' % (type, beamline))
return ''
def getElementByName(self, beamline, name):
""" return element list by literal name in beamline
each element is tuple like (name, type, order)
:param beamline: beamline name
:param name: element literal name
"""
try:
assert beamline.upper() in self.kws_bl
except AssertionError:
print('%s is not a defined beamline.' % beamline)
return ''
try:
assert name.lower() in self.getFullBeamline(beamline, extend=True)
orderedLattice_list = self.orderLattice(beamline)
retlist = [val for idx, val in enumerate(orderedLattice_list) if val[0] == name.lower()]
return retlist
except AssertionError:
print('%s is not in %s.' % (name, beamline))
return ''
def manipulateLattice(self, beamline, type='quad',
irange='all', property='k1',
opstr='+0%'):
""" manipulate element with type, e.g. quad
input parameters:
:param beamline: beamline definition keyword
:param type: element type, case insensitive
:param irange: slice index, see getElementByOrder()
:param property: element property, e.g. 'k1' for 'quad' strength
:param opstr: operation, '+[-]n%' or '+[-*/]n'
"""
# lattice_list = self.getFullBeamline(beamline, extend = True)
# orderedLattice_list = self.orderLattice(beamline)
opele_list = self.getElementByOrder(beamline, type, irange)
opr = opstr[0]
opn = float(opstr[1:].strip('%'))
if opstr[-1] == '%':
opn /= 100.0
opsdict = {'+': lambda a, p: a * (1 + p),
'-': lambda a, p: a * (1 - p)}
else:
opsdict = {'+': lambda a, p: a + p,
'-': lambda a, p: a - p,
'*': lambda a, p: a * p,
'/': lambda a, p: a / float(p)}
for ename, etype, eid in opele_list:
val0_old = self.all_elements[ename.upper()].values()[0].get(property.lower())
val0_new = opsdict[opr](val0_old, opn)
self.all_elements[ename.upper()].values()[0][property.lower()] = val0_new
return True
def getElementProperties(self, name):
""" return element properties
:param name: element name
"""
try:
allp = self.all_elements[name.upper()]
if isinstance(allp, dict):
type = allp.keys()[0]
properties = allp.values()[0]
return {'type': type, 'properties': properties}
else:
type = allp
return {'type': type, 'properties': None}
except:
pass
def makeElement(self, kw):
""" return element object regarding the keyword configuration
"""
kw_name = kw
kw_type = self.getElementType(kw_name)
kw_config = {k.lower(): v for k, v in self.getElementConf(kw_name).items()}
objtype = 'Element' + kw_type.capitalize()
retobj = getattr(element, objtype)(name=kw_name, config=kw_config)
# set up EPICS control configs
ctrlconf = self.getElementCtrlConf(kw)
if ctrlconf != {}:
retobj.setConf(ctrlconf, type='ctrl')
return retobj
# ===========================================================================
def test2():
latticepath = os.path.join(os.getcwd(), '../lattice')
infile = os.path.join(latticepath, 'linac.lte')
# kw = 'B11'
# kw = 'bl'
# kw = 'BPM01'
# kw = 'dp4fh'
# kw = 'a1i'
# kw = 'q'
lpins = LteParser(infile)
# print lpins.prestrdict
# lpins.getKw(kw)
# print lpins.confstr
# lpins.getKw(kw).toDict().solve_rpn()
# print lpins.confdict
# print lpins.detectAllKws()
# print the whole lte file into json format, to show by: cat output | jshon [pjson]
# allLatticeElements_str = lpins.file2json(jsonfile = 'jfile.dat')
allLatticeElements_str = lpins.file2json()
# print type(allLatticeElements_str)
# allLatticeElements_dict = json.loads(allLatticeElements_str)
# print type(allLatticeElements_dict)
# print allLatticeElements_dict.values()
latins = Lattice(allLatticeElements_str)
# print latins.getElementType('Q01')
# print latins.getElementConf('q01')
# print latins.all_elements['BL']['beamline']['lattice']
# latins.showBeamlines()
# print latins.getBeamline('doub1')
# print latins.getBeamline('doub2')
# print latins.getBeamline('chi')
# print latins.getFullBeamline('chi')
# print latins.getFullBeamline('bl')
# print latins.formatElement('q01')
# print latins.formatElement('q06')
# print latins.formatElement('B11')
# print latins.formatElement('BPM01')
testingpath = os.path.join(os.getcwd(), '../tests/tracking')
outlatfile = os.path.join(testingpath, 'tmp.lte')
latins.generateLatticeFile('bl', outlatfile)
# print latins.kws_ele
# print latins.kws_bl
def main():
test2()
if __name__ == '__main__':
main()
| {
"content_hash": "5e5515984bb0ce737390c2fcba0e12f1",
"timestamp": "",
"source": "github",
"line_count": 946,
"max_line_length": 116,
"avg_line_length": 37.21141649048626,
"alnum_prop": 0.5326117834213965,
"repo_name": "Archman/beamline",
"id": "4ec8bd7a9f596d1e18bb825eff4e96d4ce2f1242",
"size": "35249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beamline/lattice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20449"
}
],
"symlink_target": ""
} |
import sys
from helpers.run import *
args = sys.argv[1:]
if len(args) > 0:
if args[0] == "test":
options = "--configuration test/phpunit.xml"
tests = "test/Cases/."
filters = ""
if len(args) > 2:
if args[1] == "--filter":
filters = "--filter {test}".format(test=args[2])
Run.call("php vendor/bin/phpunit {options} {filters} {tests}".format(options=options,
filters=filters,
tests=tests))
| {
"content_hash": "2147c75dcf2ea98524334db81e89aa08",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 93,
"avg_line_length": 34.333333333333336,
"alnum_prop": 0.4223300970873786,
"repo_name": "richrdkng/ffmpeg-php-wrapper",
"id": "0c67be8abbf119e515eb2126a6ae1eb5015bad13",
"size": "665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "script/tasks/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "135"
},
{
"name": "Nginx",
"bytes": "550"
},
{
"name": "PHP",
"bytes": "91536"
},
{
"name": "Python",
"bytes": "2959"
},
{
"name": "Ruby",
"bytes": "691"
},
{
"name": "Shell",
"bytes": "6887"
}
],
"symlink_target": ""
} |
from csv import DictWriter
from io import StringIO
from typing import Any, Dict, Iterable, List, Optional, Union
from flask import Response, jsonify, request, stream_with_context
from flask.json import dumps
import orjson
from ._config import MAX_RESULTS, MAX_COMPATIBILITY_RESULTS
from ._common import app, is_compatibility_mode
def print_non_standard(data):
"""
prints a non standard JSON message
"""
format = request.values.get("format", "classic")
if format == "json":
return jsonify(data)
if not data:
message = "no results"
result = -2
else:
message = "success"
result = 1
if result == -1 and is_compatibility_mode():
return jsonify(dict(result=result, message=message))
return jsonify(dict(result=result, message=message, epidata=data))
class APrinter:
def __init__(self):
self.count: int = 0
self.result: int = -1
self._max_results: int = MAX_COMPATIBILITY_RESULTS if is_compatibility_mode() else MAX_RESULTS
def make_response(self, gen):
return Response(
gen,
mimetype="application/json",
)
def __call__(self, generator: Iterable[Dict[str, Any]]) -> Response:
def gen():
self.result = -2 # no result, default response
began = False
try:
for row in generator:
if not began:
# do it here to catch an error before we send the begin
r = self._begin()
began = True
if r is not None:
yield r
r = self._print_row(row)
if r is not None:
yield r
except Exception as e:
app.logger.exception(f"error executing: {str(e)}")
self.result = -1
yield self._error(e)
if not began:
# do it manually to catch an error before we send the begin
r = self._begin()
began = True
if r is not None:
yield r
r = self._end()
if r is not None:
yield r
return self.make_response(stream_with_context(gen()))
@property
def remaining_rows(self) -> int:
return self._max_results - self.count
def _begin(self) -> Optional[Union[str, bytes]]:
# hook
return None
def _error(self, error: Exception) -> str:
# send an generic error
return dumps(dict(result=self.result, message=f"unknown error occurred: {error}", error=str(error), epidata=[]))
def _print_row(self, row: Dict) -> Optional[Union[str, bytes]]:
first = self.count == 0
if self.count >= self._max_results:
# hit the limit
self.result = 2
return None
if first:
self.result = 1 # at least one row
self.count += 1
return self._format_row(first, row)
def _format_row(self, first: bool, row: Dict) -> Optional[Union[str, bytes]]:
# hook
return None
def _end(self) -> Optional[Union[str, bytes]]:
# hook
return None
class ClassicPrinter(APrinter):
"""
a printer class writing in the classic epidata format
"""
def _begin(self):
if is_compatibility_mode():
return "{ "
return '{ "epidata": ['
def _format_row(self, first: bool, row: Dict):
if first and is_compatibility_mode():
sep = b'"epidata": ['
else:
sep = b"," if not first else b""
return sep + orjson.dumps(row)
def _end(self):
message = "success"
prefix = "], "
if self.count == 0 and is_compatibility_mode():
# no array to end
prefix = ""
if self.count == 0:
message = "no results"
elif self.result == 2:
message = "too many results, data truncated"
return f'{prefix}"result": {self.result}, "message": {dumps(message)} }}'.encode("utf-8")
class ClassicTreePrinter(ClassicPrinter):
"""
a printer class writing a tree by the given grouping criteria as the first element in the epidata array
"""
group: str
_tree: Dict[str, List[Dict]] = dict()
def __init__(self, group: str):
super(ClassicTreePrinter, self).__init__()
self.group = group
def _begin(self):
self._tree = dict()
return super(ClassicTreePrinter, self)._begin()
def _format_row(self, first: bool, row: Dict):
group = row.get(self.group, "")
del row[self.group]
if group in self._tree:
self._tree[group].append(row)
else:
self._tree[group] = [row]
if first and is_compatibility_mode():
return b'"epidata": ['
return None
def _end(self):
if self.count == 0:
return super(ClassicTreePrinter, self)._end()
tree = orjson.dumps(self._tree)
self._tree = dict()
r = super(ClassicTreePrinter, self)._end()
return tree + r
class CSVPrinter(APrinter):
"""
a printer class writing in a CSV file
"""
_stream = StringIO()
_writer: DictWriter
_filename: Optional[str]
def __init__(self, filename: Optional[str] = "epidata"):
super(CSVPrinter, self).__init__()
self._filename = filename
def make_response(self, gen):
headers = {"Content-Disposition": f"attachment; filename={self._filename}.csv"} if self._filename else {}
return Response(gen, mimetype="text/csv; charset=utf8", headers=headers)
def _begin(self):
return None
def _error(self, error: Exception) -> str:
# send an generic error
return f"unknown error occurred:\n{error}"
def _format_row(self, first: bool, row: Dict):
if first:
self._writer = DictWriter(self._stream, list(row.keys()), lineterminator="\n")
self._writer.writeheader()
self._writer.writerow(row)
# remove the stream content to print just one line at a time
self._stream.flush()
v = self._stream.getvalue()
self._stream.seek(0)
self._stream.truncate(0)
return v
def _end(self):
self._writer = None
return ""
class JSONPrinter(APrinter):
"""
a printer class writing in a JSON array
"""
def _begin(self):
return b"["
def _format_row(self, first: bool, row: Dict):
sep = b"," if not first else b""
return sep + orjson.dumps(row)
def _end(self):
return b"]"
class JSONLPrinter(APrinter):
"""
a printer class writing in JSONLines format
"""
def make_response(self, gen):
return Response(gen, mimetype=" text/plain; charset=utf8")
def _format_row(self, first: bool, row: Dict):
# each line is a JSON file with a new line to separate them
return orjson.dumps(row, option=orjson.OPT_APPEND_NEWLINE)
def _end(self):
return b""
def create_printer() -> APrinter:
format: str = request.values.get("format", "classic")
if format == "tree":
return ClassicTreePrinter("signal")
if format.startswith("tree-"):
# support tree format by any property following the dash
return ClassicTreePrinter(format[len("tree-") :])
if format == "json":
return JSONPrinter()
if format == "csv":
return CSVPrinter()
if format == "jsonl":
return JSONLPrinter()
return ClassicPrinter()
| {
"content_hash": "088c12a10fac9bef1119f41066013c64",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 120,
"avg_line_length": 28.99245283018868,
"alnum_prop": 0.5603279968762203,
"repo_name": "cmu-delphi/delphi-epidata",
"id": "715214e198646468367ff3f10c8f539a6940e4be",
"size": "7683",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/server/_printer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2852"
},
{
"name": "HTML",
"bytes": "727"
},
{
"name": "JavaScript",
"bytes": "18856"
},
{
"name": "Makefile",
"bytes": "5648"
},
{
"name": "PHP",
"bytes": "131735"
},
{
"name": "Python",
"bytes": "881368"
},
{
"name": "R",
"bytes": "17445"
},
{
"name": "Shell",
"bytes": "2024"
}
],
"symlink_target": ""
} |
from eventlet import tpool
from nova import context
from nova import db
from nova import flags
from nova import log as logging
from nova import utils
from nova.virt.libvirt import netutils
LOG = logging.getLogger("nova.virt.libvirt.firewall")
FLAGS = flags.FLAGS
try:
import libvirt
except ImportError:
LOG.warn(_("Libvirt module could not be loaded. NWFilterFirewall will "
"not work correctly."))
class FirewallDriver(object):
def prepare_instance_filter(self, instance, network_info):
"""Prepare filters for the instance.
At this point, the instance isn't running yet."""
raise NotImplementedError()
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance"""
raise NotImplementedError()
def apply_instance_filter(self, instance, network_info):
"""Apply instance filter.
Once this method returns, the instance should be firewalled
appropriately. This method should as far as possible be a
no-op. It's vastly preferred to get everything set up in
prepare_instance_filter.
"""
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
"""Refresh security group rules from data store
Gets called when a rule has been added to or removed from
the security group."""
raise NotImplementedError()
def refresh_security_group_members(self, security_group_id):
"""Refresh security group members from data store
Gets called when an instance gets added to or removed from
the security group."""
raise NotImplementedError()
def refresh_provider_fw_rules(self):
"""Refresh common rules for all hosts/instances from data store.
Gets called when a rule has been added to or removed from
the list of rules (via admin api).
"""
raise NotImplementedError()
def setup_basic_filtering(self, instance, network_info):
"""Create rules to block spoofing and allow dhcp.
This gets called when spawning an instance, before
:method:`prepare_instance_filter`.
"""
raise NotImplementedError()
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists"""
raise NotImplementedError()
class NWFilterFirewall(FirewallDriver):
"""
This class implements a network filtering mechanism versatile
enough for EC2 style Security Group filtering by leveraging
libvirt's nwfilter.
First, all instances get a filter ("nova-base-filter") applied.
This filter provides some basic security such as protection against
MAC spoofing, IP spoofing, and ARP spoofing.
This filter drops all incoming ipv4 and ipv6 connections.
Outgoing connections are never blocked.
Second, every security group maps to a nwfilter filter(*).
NWFilters can be updated at runtime and changes are applied
immediately, so changes to security groups can be applied at
runtime (as mandated by the spec).
Security group rules are named "nova-secgroup-<id>" where <id>
is the internal id of the security group. They're applied only on
hosts that have instances in the security group in question.
Updates to security groups are done by updating the data model
(in response to API calls) followed by a request sent to all
the nodes with instances in the security group to refresh the
security group.
Each instance has its own NWFilter, which references the above
mentioned security group NWFilters. This was done because
interfaces can only reference one filter while filters can
reference multiple other filters. This has the added benefit of
actually being able to add and remove security groups from an
instance at run time. This functionality is not exposed anywhere,
though.
Outstanding questions:
The name is unique, so would there be any good reason to sync
the uuid across the nodes (by assigning it from the datamodel)?
(*) This sentence brought to you by the redundancy department of
redundancy.
"""
def __init__(self, get_connection, **kwargs):
self._libvirt_get_connection = get_connection
self.static_filters_configured = False
self.handle_security_groups = False
def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter"""
pass
def _get_connection(self):
return self._libvirt_get_connection()
_conn = property(_get_connection)
def nova_dhcp_filter(self):
"""The standard allow-dhcp-server filter is an <ip> one, so it uses
ebtables to allow traffic through. Without a corresponding rule in
iptables, it'll get blocked anyway."""
return '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
<uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid>
<rule action='accept' direction='out'
priority='100'>
<udp srcipaddr='0.0.0.0'
dstipaddr='255.255.255.255'
srcportstart='68'
dstportstart='67'/>
</rule>
<rule action='accept' direction='in'
priority='100'>
<udp srcipaddr='$DHCPSERVER'
srcportstart='67'
dstportstart='68'/>
</rule>
</filter>'''
def nova_ra_filter(self):
return '''<filter name='nova-allow-ra-server' chain='root'>
<uuid>d707fa71-4fb5-4b27-9ab7-ba5ca19c8804</uuid>
<rule action='accept' direction='inout'
priority='100'>
<icmpv6 srcipaddr='$RASERVER'/>
</rule>
</filter>'''
def setup_basic_filtering(self, instance, network_info):
"""Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
logging.info('called setup_basic_filtering in nwfilter')
if self.handle_security_groups:
# No point in setting up a filter set that we'll be overriding
# anyway.
return
logging.info('ensuring static filters')
self._ensure_static_filters()
if instance['image_ref'] == str(FLAGS.vpn_image_id):
base_filter = 'nova-vpn'
else:
base_filter = 'nova-base'
for (network, mapping) in network_info:
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
self._define_filter(self._filter_container(instance_filter_name,
[base_filter]))
def _ensure_static_filters(self):
"""Static filters are filters that have no need to be IP aware.
There is no configuration or tuneability of these filters, so they
can be set up once and forgotten about.
"""
if self.static_filters_configured:
return
self._define_filter(self._filter_container('nova-base',
['no-mac-spoofing',
'no-ip-spoofing',
'no-arp-spoofing',
'allow-dhcp-server']))
self._define_filter(self._filter_container('nova-vpn',
['allow-dhcp-server']))
self._define_filter(self.nova_base_ipv4_filter)
self._define_filter(self.nova_base_ipv6_filter)
self._define_filter(self.nova_dhcp_filter)
self._define_filter(self.nova_ra_filter)
if FLAGS.allow_same_net_traffic:
self._define_filter(self.nova_project_filter)
if FLAGS.use_ipv6:
self._define_filter(self.nova_project_filter_v6)
self.static_filters_configured = True
def _filter_container(self, name, filters):
xml = '''<filter name='%s' chain='root'>%s</filter>''' % (
name,
''.join(["<filterref filter='%s'/>" % (f,) for f in filters]))
return xml
def nova_base_ipv4_filter(self):
retval = "<filter name='nova-base-ipv4' chain='ipv4'>"
for protocol in ['tcp', 'udp', 'icmp']:
for direction, action, priority in [('out', 'accept', 399),
('in', 'drop', 400)]:
retval += """<rule action='%s' direction='%s' priority='%d'>
<%s />
</rule>""" % (action, direction,
priority, protocol)
retval += '</filter>'
return retval
def nova_base_ipv6_filter(self):
retval = "<filter name='nova-base-ipv6' chain='ipv6'>"
for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']:
for direction, action, priority in [('out', 'accept', 399),
('in', 'drop', 400)]:
retval += """<rule action='%s' direction='%s' priority='%d'>
<%s />
</rule>""" % (action, direction,
priority, protocol)
retval += '</filter>'
return retval
def nova_project_filter(self):
retval = "<filter name='nova-project' chain='ipv4'>"
for protocol in ['tcp', 'udp', 'icmp']:
retval += """<rule action='accept' direction='in' priority='200'>
<%s srcipaddr='$PROJNET' srcipmask='$PROJMASK' />
</rule>""" % protocol
retval += '</filter>'
return retval
def nova_project_filter_v6(self):
retval = "<filter name='nova-project-v6' chain='ipv6'>"
for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']:
retval += """<rule action='accept' direction='inout'
priority='200'>
<%s srcipaddr='$PROJNETV6'
srcipmask='$PROJMASKV6' />
</rule>""" % (protocol)
retval += '</filter>'
return retval
def _define_filter(self, xml):
if callable(xml):
xml = xml()
# execute in a native thread and block current greenthread until done
tpool.execute(self._conn.nwfilterDefineXML, xml)
def unfilter_instance(self, instance, network_info):
"""Clear out the nwfilter rules."""
instance_name = instance.name
for (network, mapping) in network_info:
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
try:
self._conn.nwfilterLookupByName(instance_filter_name).\
undefine()
except libvirt.libvirtError:
LOG.debug(_('The nwfilter(%(instance_filter_name)s) '
'for %(instance_name)s is not found.') % locals())
instance_secgroup_filter_name =\
'%s-secgroup' % (self._instance_filter_name(instance))
try:
self._conn.nwfilterLookupByName(instance_secgroup_filter_name)\
.undefine()
except libvirt.libvirtError:
LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) '
'for %(instance_name)s is not found.') % locals())
def prepare_instance_filter(self, instance, network_info):
"""Creates an NWFilter for the given instance.
In the process, it makes sure the filters for the provider blocks,
security groups, and base filter are all in place.
"""
self.refresh_provider_fw_rules()
ctxt = context.get_admin_context()
instance_secgroup_filter_name = \
'%s-secgroup' % (self._instance_filter_name(instance))
instance_secgroup_filter_children = ['nova-base-ipv4',
'nova-base-ipv6',
'nova-allow-dhcp-server']
if FLAGS.use_ipv6:
networks = [network for (network, info) in network_info if
info['gateway6']]
if networks:
instance_secgroup_filter_children.\
append('nova-allow-ra-server')
for security_group in \
db.security_group_get_by_instance(ctxt, instance['id']):
self.refresh_security_group_rules(security_group['id'])
instance_secgroup_filter_children.append('nova-secgroup-%s' %
security_group['id'])
self._define_filter(
self._filter_container(instance_secgroup_filter_name,
instance_secgroup_filter_children))
network_filters = self.\
_create_network_filters(instance, network_info,
instance_secgroup_filter_name)
for (name, children) in network_filters:
self._define_filters(name, children)
def _create_network_filters(self, instance, network_info,
instance_secgroup_filter_name):
if instance['image_ref'] == str(FLAGS.vpn_image_id):
base_filter = 'nova-vpn'
else:
base_filter = 'nova-base'
result = []
for (_n, mapping) in network_info:
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
instance_filter_children = [base_filter, 'nova-provider-rules',
instance_secgroup_filter_name]
if FLAGS.allow_same_net_traffic:
instance_filter_children.append('nova-project')
if FLAGS.use_ipv6:
instance_filter_children.append('nova-project-v6')
result.append((instance_filter_name, instance_filter_children))
return result
def _define_filters(self, filter_name, filter_children):
self._define_filter(self._filter_container(filter_name,
filter_children))
def refresh_security_group_rules(self, security_group_id):
return self._define_filter(
self.security_group_to_nwfilter_xml(security_group_id))
def refresh_provider_fw_rules(self):
"""Update rules for all instances.
This is part of the FirewallDriver API and is called when the
provider firewall rules change in the database. In the
`prepare_instance_filter` we add a reference to the
'nova-provider-rules' filter for each instance's firewall, and
by changing that filter we update them all.
"""
xml = self.provider_fw_to_nwfilter_xml()
return self._define_filter(xml)
def security_group_to_nwfilter_xml(self, security_group_id):
security_group = db.security_group_get(context.get_admin_context(),
security_group_id)
rule_xml = ""
v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'}
for rule in security_group.rules:
rule_xml += "<rule action='accept' direction='in' priority='300'>"
if rule.cidr:
version = netutils.get_ip_version(rule.cidr)
if(FLAGS.use_ipv6 and version == 6):
net, prefixlen = netutils.get_net_and_prefixlen(rule.cidr)
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
(v6protocol[rule.protocol], net, prefixlen)
else:
net, mask = netutils.get_net_and_mask(rule.cidr)
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
(rule.protocol, net, mask)
if rule.protocol in ['tcp', 'udp']:
rule_xml += "dstportstart='%s' dstportend='%s' " % \
(rule.from_port, rule.to_port)
elif rule.protocol == 'icmp':
LOG.info('rule.protocol: %r, rule.from_port: %r, '
'rule.to_port: %r', rule.protocol,
rule.from_port, rule.to_port)
if rule.from_port != -1:
rule_xml += "type='%s' " % rule.from_port
if rule.to_port != -1:
rule_xml += "code='%s' " % rule.to_port
rule_xml += '/>\n'
rule_xml += "</rule>\n"
xml = "<filter name='nova-secgroup-%s' " % security_group_id
if(FLAGS.use_ipv6):
xml += "chain='root'>%s</filter>" % rule_xml
else:
xml += "chain='ipv4'>%s</filter>" % rule_xml
return xml
def provider_fw_to_nwfilter_xml(self):
"""Compose a filter of drop rules from specified cidrs."""
rule_xml = ""
v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'}
rules = db.provider_fw_rule_get_all(context.get_admin_context())
for rule in rules:
rule_xml += "<rule action='block' direction='in' priority='150'>"
version = netutils.get_ip_version(rule.cidr)
if(FLAGS.use_ipv6 and version == 6):
net, prefixlen = netutils.get_net_and_prefixlen(rule.cidr)
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
(v6protocol[rule.protocol], net, prefixlen)
else:
net, mask = netutils.get_net_and_mask(rule.cidr)
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
(rule.protocol, net, mask)
if rule.protocol in ['tcp', 'udp']:
rule_xml += "dstportstart='%s' dstportend='%s' " % \
(rule.from_port, rule.to_port)
elif rule.protocol == 'icmp':
LOG.info('rule.protocol: %r, rule.from_port: %r, '
'rule.to_port: %r', rule.protocol,
rule.from_port, rule.to_port)
if rule.from_port != -1:
rule_xml += "type='%s' " % rule.from_port
if rule.to_port != -1:
rule_xml += "code='%s' " % rule.to_port
rule_xml += '/>\n'
rule_xml += "</rule>\n"
xml = "<filter name='nova-provider-rules' "
if(FLAGS.use_ipv6):
xml += "chain='root'>%s</filter>" % rule_xml
else:
xml += "chain='ipv4'>%s</filter>" % rule_xml
return xml
def _instance_filter_name(self, instance, nic_id=None):
if not nic_id:
return 'nova-instance-%s' % (instance['name'])
return 'nova-instance-%s-%s' % (instance['name'], nic_id)
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists"""
for (network, mapping) in network_info:
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
try:
self._conn.nwfilterLookupByName(instance_filter_name)
except libvirt.libvirtError:
name = instance.name
LOG.debug(_('The nwfilter(%(instance_filter_name)s) for'
'%(name)s is not found.') % locals())
return False
return True
class IptablesFirewallDriver(FirewallDriver):
def __init__(self, execute=None, **kwargs):
from nova.network import linux_net
self.iptables = linux_net.iptables_manager
self.instances = {}
self.network_infos = {}
self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
self.basicly_filtered = False
self.iptables.ipv4['filter'].add_chain('sg-fallback')
self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
self.iptables.ipv6['filter'].add_chain('sg-fallback')
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
def setup_basic_filtering(self, instance, network_info):
"""Set up provider rules and basic NWFilter."""
self.nwfilter.setup_basic_filtering(instance, network_info)
if not self.basicly_filtered:
LOG.debug(_('iptables firewall: Setup Basic Filtering'))
self.refresh_provider_fw_rules()
self.basicly_filtered = True
def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter"""
pass
def unfilter_instance(self, instance, network_info):
if self.instances.pop(instance['id'], None):
# NOTE(vish): use the passed info instead of the stored info
self.network_infos.pop(instance['id'])
self.remove_filters_for_instance(instance)
self.iptables.apply()
self.nwfilter.unfilter_instance(instance, network_info)
else:
LOG.info(_('Attempted to unfilter instance %s which is not '
'filtered'), instance['id'])
def prepare_instance_filter(self, instance, network_info):
self.instances[instance['id']] = instance
self.network_infos[instance['id']] = network_info
self.add_filters_for_instance(instance)
self.iptables.apply()
def _create_filter(self, ips, chain_name):
return ['-d %s -j $%s' % (ip, chain_name) for ip in ips]
def _filters_for_instance(self, chain_name, network_info):
ips_v4 = [ip['ip'] for (_n, mapping) in network_info
for ip in mapping['ips']]
ipv4_rules = self._create_filter(ips_v4, chain_name)
ipv6_rules = []
if FLAGS.use_ipv6:
ips_v6 = [ip['ip'] for (_n, mapping) in network_info
for ip in mapping['ip6s']]
ipv6_rules = self._create_filter(ips_v6, chain_name)
return ipv4_rules, ipv6_rules
def _add_filters(self, chain_name, ipv4_rules, ipv6_rules):
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule(chain_name, rule)
if FLAGS.use_ipv6:
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule(chain_name, rule)
def add_filters_for_instance(self, instance):
network_info = self.network_infos[instance['id']]
chain_name = self._instance_chain_name(instance)
if FLAGS.use_ipv6:
self.iptables.ipv6['filter'].add_chain(chain_name)
self.iptables.ipv4['filter'].add_chain(chain_name)
ipv4_rules, ipv6_rules = self._filters_for_instance(chain_name,
network_info)
self._add_filters('local', ipv4_rules, ipv6_rules)
ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
self._add_filters(chain_name, ipv4_rules, ipv6_rules)
def remove_filters_for_instance(self, instance):
chain_name = self._instance_chain_name(instance)
self.iptables.ipv4['filter'].remove_chain(chain_name)
if FLAGS.use_ipv6:
self.iptables.ipv6['filter'].remove_chain(chain_name)
def instance_rules(self, instance, network_info):
ctxt = context.get_admin_context()
ipv4_rules = []
ipv6_rules = []
# Always drop invalid packets
ipv4_rules += ['-m state --state ' 'INVALID -j DROP']
ipv6_rules += ['-m state --state ' 'INVALID -j DROP']
# Allow established connections
ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
# Pass through provider-wide drops
ipv4_rules += ['-j $provider']
ipv6_rules += ['-j $provider']
dhcp_servers = [info['dhcp_server'] for (_n, info) in network_info]
for dhcp_server in dhcp_servers:
ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 '
'-j ACCEPT' % (dhcp_server,))
#Allow project network traffic
if FLAGS.allow_same_net_traffic:
cidrs = [network['cidr'] for (network, _m) in network_info]
for cidr in cidrs:
ipv4_rules.append('-s %s -j ACCEPT' % (cidr,))
# We wrap these in FLAGS.use_ipv6 because they might cause
# a DB lookup. The other ones are just list operations, so
# they're not worth the clutter.
if FLAGS.use_ipv6:
# Allow RA responses
gateways_v6 = [mapping['gateway6'] for (_n, mapping) in
network_info]
for gateway_v6 in gateways_v6:
ipv6_rules.append(
'-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))
#Allow project network traffic
if FLAGS.allow_same_net_traffic:
cidrv6s = [network['cidr_v6'] for (network, _m) in
network_info]
for cidrv6 in cidrv6s:
ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,))
security_groups = db.security_group_get_by_instance(ctxt,
instance['id'])
# then, security group chains and rules
for security_group in security_groups:
rules = db.security_group_rule_get_by_security_group(ctxt,
security_group['id'])
for rule in rules:
LOG.debug(_('Adding security group rule: %r'), rule)
if not rule.cidr:
version = 4
else:
version = netutils.get_ip_version(rule.cidr)
if version == 4:
fw_rules = ipv4_rules
else:
fw_rules = ipv6_rules
protocol = rule.protocol
if version == 6 and rule.protocol == 'icmp':
protocol = 'icmpv6'
args = ['-j ACCEPT']
if protocol:
args += ['-p', protocol]
if protocol in ['udp', 'tcp']:
if rule.from_port == rule.to_port:
args += ['--dport', '%s' % (rule.from_port,)]
else:
args += ['-m', 'multiport',
'--dports', '%s:%s' % (rule.from_port,
rule.to_port)]
elif protocol == 'icmp':
icmp_type = rule.from_port
icmp_code = rule.to_port
if icmp_type == -1:
icmp_type_arg = None
else:
icmp_type_arg = '%s' % icmp_type
if not icmp_code == -1:
icmp_type_arg += '/%s' % icmp_code
if icmp_type_arg:
if version == 4:
args += ['-m', 'icmp', '--icmp-type',
icmp_type_arg]
elif version == 6:
args += ['-m', 'icmp6', '--icmpv6-type',
icmp_type_arg]
if rule.cidr:
LOG.info('Using cidr %r', rule.cidr)
args += ['-s', rule.cidr]
fw_rules += [' '.join(args)]
else:
if rule['grantee_group']:
for instance in rule['grantee_group']['instances']:
LOG.info('instance: %r', instance)
ips = db.instance_get_fixed_addresses(ctxt,
instance['id'])
LOG.info('ips: %r', ips)
for ip in ips:
subrule = args + ['-s %s' % ip]
fw_rules += [' '.join(subrule)]
LOG.info('Using fw_rules: %r', fw_rules)
ipv4_rules += ['-j $sg-fallback']
ipv6_rules += ['-j $sg-fallback']
return ipv4_rules, ipv6_rules
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists"""
return self.nwfilter.instance_filter_exists(instance, network_info)
def refresh_security_group_members(self, security_group):
self.do_refresh_security_group_rules(security_group)
self.iptables.apply()
def refresh_security_group_rules(self, security_group):
self.do_refresh_security_group_rules(security_group)
self.iptables.apply()
@utils.synchronized('iptables', external=True)
def do_refresh_security_group_rules(self, security_group):
for instance in self.instances.values():
self.remove_filters_for_instance(instance)
self.add_filters_for_instance(instance)
def refresh_provider_fw_rules(self):
"""See class:FirewallDriver: docs."""
self._do_refresh_provider_fw_rules()
self.iptables.apply()
@utils.synchronized('iptables', external=True)
def _do_refresh_provider_fw_rules(self):
"""Internal, synchronized version of refresh_provider_fw_rules."""
self._purge_provider_fw_rules()
self._build_provider_fw_rules()
def _purge_provider_fw_rules(self):
"""Remove all rules from the provider chains."""
self.iptables.ipv4['filter'].empty_chain('provider')
if FLAGS.use_ipv6:
self.iptables.ipv6['filter'].empty_chain('provider')
def _build_provider_fw_rules(self):
"""Create all rules for the provider IP DROPs."""
self.iptables.ipv4['filter'].add_chain('provider')
if FLAGS.use_ipv6:
self.iptables.ipv6['filter'].add_chain('provider')
ipv4_rules, ipv6_rules = self._provider_rules()
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule('provider', rule)
if FLAGS.use_ipv6:
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule('provider', rule)
def _provider_rules(self):
"""Generate a list of rules from provider for IP4 & IP6."""
ctxt = context.get_admin_context()
ipv4_rules = []
ipv6_rules = []
rules = db.provider_fw_rule_get_all(ctxt)
for rule in rules:
LOG.debug(_('Adding provider rule: %s'), rule['cidr'])
version = netutils.get_ip_version(rule['cidr'])
if version == 4:
fw_rules = ipv4_rules
else:
fw_rules = ipv6_rules
protocol = rule['protocol']
if version == 6 and protocol == 'icmp':
protocol = 'icmpv6'
args = ['-p', protocol, '-s', rule['cidr']]
if protocol in ['udp', 'tcp']:
if rule['from_port'] == rule['to_port']:
args += ['--dport', '%s' % (rule['from_port'],)]
else:
args += ['-m', 'multiport',
'--dports', '%s:%s' % (rule['from_port'],
rule['to_port'])]
elif protocol == 'icmp':
icmp_type = rule['from_port']
icmp_code = rule['to_port']
if icmp_type == -1:
icmp_type_arg = None
else:
icmp_type_arg = '%s' % icmp_type
if not icmp_code == -1:
icmp_type_arg += '/%s' % icmp_code
if icmp_type_arg:
if version == 4:
args += ['-m', 'icmp', '--icmp-type',
icmp_type_arg]
elif version == 6:
args += ['-m', 'icmp6', '--icmpv6-type',
icmp_type_arg]
args += ['-j DROP']
fw_rules += [' '.join(args)]
return ipv4_rules, ipv6_rules
def _security_group_chain_name(self, security_group_id):
return 'nova-sg-%s' % (security_group_id,)
def _instance_chain_name(self, instance):
return 'inst-%s' % (instance['id'],)
| {
"content_hash": "2a5f249cd00143c00bcea0cdda1eae21",
"timestamp": "",
"source": "github",
"line_count": 798,
"max_line_length": 79,
"avg_line_length": 41.41102756892231,
"alnum_prop": 0.5300792834231072,
"repo_name": "30loops/nova",
"id": "c6253511e68f90c9f624dae2fa81815727b5f66c",
"size": "33866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/virt/libvirt/firewall.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
class Solution(object):
def canJump(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
max_reach, end = 0, len(nums) - 1
for idx, num in enumerate(nums):
if max_reach < idx:
return False
if max_reach >= end:
return True
max_reach = max(max_reach, nums[idx] + idx)
| {
"content_hash": "c67bc16ec7217ff7ef4cb1115ae9d689",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 55,
"avg_line_length": 29.923076923076923,
"alnum_prop": 0.4652956298200514,
"repo_name": "ChuanleiGuo/AlgorithmsPlayground",
"id": "af01a3d15d729c7626d8c23e80fd1b44d933ff19",
"size": "389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LeetCodeSolutions/python/55_Jump_Game.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8884"
},
{
"name": "C++",
"bytes": "58994"
},
{
"name": "Java",
"bytes": "441895"
},
{
"name": "Python",
"bytes": "335460"
}
],
"symlink_target": ""
} |
"""
Forward Simulation of Gradiometry Data for Magnetic Vector Models
=================================================================
Here we use the module *SimPEG.potential_fields.magnetics* to predict magnetic
gradiometry data for magnetic vector models. The simulation is performed on a
Tree mesh. For this tutorial, we focus on the following:
- How to define the survey when we want to measured multiple field components
- How to predict magnetic data in the case of remanence
- How to include surface topography
- How to construct tree meshes based on topography and survey geometry
- The units of the physical property model and resulting data
"""
#########################################################################
# Import Modules
# --------------
#
import numpy as np
from scipy.interpolate import LinearNDInterpolator
import matplotlib as mpl
import matplotlib.pyplot as plt
from discretize import TreeMesh
from discretize.utils import mkvc, refine_tree_xyz
from SimPEG.utils import plot2Ddata, model_builder, surface2ind_topo, mat_utils
from SimPEG import maps
from SimPEG.potential_fields import magnetics
# sphinx_gallery_thumbnail_number = 2
#############################################
# Topography
# ----------
#
# Here we define surface topography as an (N, 3) numpy array. Topography could
# also be loaded from a file.
#
[x_topo, y_topo] = np.meshgrid(np.linspace(-200, 200, 41), np.linspace(-200, 200, 41))
z_topo = -15 * np.exp(-(x_topo ** 2 + y_topo ** 2) / 80 ** 2)
x_topo, y_topo, z_topo = mkvc(x_topo), mkvc(y_topo), mkvc(z_topo)
xyz_topo = np.c_[x_topo, y_topo, z_topo]
#############################################
# Defining the Survey
# -------------------
#
# Here, we define survey that will be used for the simulation. Magnetic
# surveys are simple to create. The user only needs an (N, 3) array to define
# the xyz locations of the observation locations, the list of field components
# which are to be modeled and the properties of the Earth's field.
#
# Define the observation locations as an (N, 3) numpy array or load them.
x = np.linspace(-80.0, 80.0, 17)
y = np.linspace(-80.0, 80.0, 17)
x, y = np.meshgrid(x, y)
x, y = mkvc(x.T), mkvc(y.T)
fun_interp = LinearNDInterpolator(np.c_[x_topo, y_topo], z_topo)
z = fun_interp(np.c_[x, y]) + 10 # Flight height 10 m above surface.
receiver_locations = np.c_[x, y, z]
# Define the component(s) of the field we want to simulate as strings within
# a list. Here we measure the x, y and z derivatives of the Bz anomaly at
# each observation location.
components = ["bxz", "byz", "bzz"]
# Use the observation locations and components to define the receivers. To
# simulate data, the receivers must be defined as a list.
receiver_list = magnetics.receivers.Point(receiver_locations, components=components)
receiver_list = [receiver_list]
# Define the inducing field H0 = (intensity [nT], inclination [deg], declination [deg])
field_inclination = 60
field_declination = 30
field_strength = 50000
inducing_field = (field_strength, field_inclination, field_declination)
source_field = magnetics.sources.SourceField(
receiver_list=receiver_list, parameters=inducing_field
)
# Define the survey
survey = magnetics.survey.Survey(source_field)
##########################################################
# Defining an OcTree Mesh
# -----------------------
#
# Here, we create the OcTree mesh that will be used to predict magnetic
# gradiometry data for the forward simuulation.
#
dx = 5 # minimum cell width (base mesh cell width) in x
dy = 5 # minimum cell width (base mesh cell width) in y
dz = 5 # minimum cell width (base mesh cell width) in z
x_length = 240.0 # domain width in x
y_length = 240.0 # domain width in y
z_length = 120.0 # domain width in y
# Compute number of base mesh cells required in x and y
nbcx = 2 ** int(np.round(np.log(x_length / dx) / np.log(2.0)))
nbcy = 2 ** int(np.round(np.log(y_length / dy) / np.log(2.0)))
nbcz = 2 ** int(np.round(np.log(z_length / dz) / np.log(2.0)))
# Define the base mesh
hx = [(dx, nbcx)]
hy = [(dy, nbcy)]
hz = [(dz, nbcz)]
mesh = TreeMesh([hx, hy, hz], x0="CCN")
# Refine based on surface topography
mesh = refine_tree_xyz(
mesh, xyz_topo, octree_levels=[2, 2], method="surface", finalize=False
)
# Refine box base on region of interest
xp, yp, zp = np.meshgrid([-100.0, 100.0], [-100.0, 100.0], [-80.0, 0.0])
xyz = np.c_[mkvc(xp), mkvc(yp), mkvc(zp)]
mesh = refine_tree_xyz(mesh, xyz, octree_levels=[2, 2], method="box", finalize=False)
mesh.finalize()
##########################################################
# Create Magnetic Vector Intensity Model (MVI)
# --------------------------------------------
#
# Magnetic vector models are defined by three-component effective
# susceptibilities. To create a magnetic vector
# model, we must
#
# 1) Define the magnetic susceptibility for each cell. Then multiply by the
# unit vector direction of the inducing field. (induced contribution)
# 2) Define the remanent magnetization vector for each cell and normalize
# by the magnitude of the Earth's field (remanent contribution)
# 3) Sum the induced and remanent contributions
# 4) Define as a vector np.r_[chi_1, chi_2, chi_3]
#
#
# Define susceptibility values for each unit in SI
background_susceptibility = 0.0001
sphere_susceptibility = 0.01
# Find cells active in the forward modeling (cells below surface)
ind_active = surface2ind_topo(mesh, xyz_topo)
# Define mapping from model to active cells
nC = int(ind_active.sum())
model_map = maps.IdentityMap(nP=3 * nC) # model has 3 parameters for each cell
# Define susceptibility for each cell
susceptibility_model = background_susceptibility * np.ones(ind_active.sum())
ind_sphere = model_builder.getIndicesSphere(np.r_[0.0, 0.0, -45.0], 15.0, mesh.gridCC)
ind_sphere = ind_sphere[ind_active]
susceptibility_model[ind_sphere] = sphere_susceptibility
# Compute the unit direction of the inducing field in Cartesian coordinates
field_direction = mat_utils.dip_azimuth2cartesian(field_inclination, field_declination)
# Multiply susceptibility model to obtain the x, y, z components of the
# effective susceptibility contribution from induced magnetization.
susceptibility_model = np.outer(susceptibility_model, field_direction)
# Define the effective susceptibility contribution for remanent magnetization to have a
# magnitude of 0.006 SI, with inclination -45 and declination 90
remanence_inclination = -45.0
remanence_declination = 90.0
remanence_susceptibility = 0.01
remanence_model = np.zeros(np.shape(susceptibility_model))
effective_susceptibility_sphere = (
remanence_susceptibility
* mat_utils.dip_azimuth2cartesian(remanence_inclination, remanence_declination)
)
remanence_model[ind_sphere, :] = effective_susceptibility_sphere
# Define effective susceptibility model as a vector np.r_[chi_x, chi_y, chi_z]
plotting_model = susceptibility_model + remanence_model
model = mkvc(plotting_model)
# Plot Effective Susceptibility Model
fig = plt.figure(figsize=(9, 4))
plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan)
plotting_model = np.sqrt(np.sum(plotting_model, axis=1) ** 2)
ax1 = fig.add_axes([0.1, 0.12, 0.73, 0.78])
mesh.plot_slice(
plotting_map * plotting_model,
normal="Y",
ax=ax1,
ind=int(mesh.h[1].size / 2),
grid=True,
clim=(np.min(plotting_model), np.max(plotting_model)),
)
ax1.set_title("MVI Model at y = 0 m")
ax1.set_xlabel("x (m)")
ax1.set_ylabel("z (m)")
ax2 = fig.add_axes([0.85, 0.12, 0.05, 0.78])
norm = mpl.colors.Normalize(vmin=np.min(plotting_model), vmax=np.max(plotting_model))
cbar = mpl.colorbar.ColorbarBase(ax2, norm=norm, orientation="vertical")
cbar.set_label(
"Effective Susceptibility Amplitude (SI)", rotation=270, labelpad=15, size=12
)
###################################################################
# Simulation: Gradiometry Data for an MVI Model
# ---------------------------------------------
#
# Here we predict magnetic gradiometry data for an effective susceptibility model
# in the case of remanent magnetization.
#
# Define the forward simulation. By setting the 'store_sensitivities' keyword
# argument to "forward_only", we simulate the data without storing the sensitivities
simulation = magnetics.simulation.Simulation3DIntegral(
survey=survey,
mesh=mesh,
chiMap=model_map,
ind_active=ind_active,
model_type="vector",
store_sensitivities="forward_only",
)
# Compute predicted data for some model
dpred = simulation.dpred(model)
n_data = len(dpred)
# Plot
fig = plt.figure(figsize=(13, 4))
v_max = np.max(np.abs(dpred))
ax1 = fig.add_axes([0.1, 0.15, 0.25, 0.78])
plot2Ddata(
receiver_list[0].locations,
dpred[0:n_data:3],
ax=ax1,
ncontour=30,
clim=(-v_max, v_max),
contourOpts={"cmap": "bwr"},
)
ax1.set_title("$dBz/dx$")
ax1.set_xlabel("x (m)")
ax1.set_ylabel("y (m)")
ax2 = fig.add_axes([0.36, 0.15, 0.25, 0.78])
cplot2 = plot2Ddata(
receiver_list[0].locations,
dpred[1:n_data:3],
ax=ax2,
ncontour=30,
clim=(-v_max, v_max),
contourOpts={"cmap": "bwr"},
)
cplot2[0].set_clim((-v_max, v_max))
ax2.set_title("$dBz/dy$")
ax2.set_xlabel("x (m)")
ax2.set_yticks([])
ax3 = fig.add_axes([0.62, 0.15, 0.25, 0.78])
cplot3 = plot2Ddata(
receiver_list[0].locations,
dpred[2:n_data:3],
ax=ax3,
ncontour=30,
clim=(-v_max, v_max),
contourOpts={"cmap": "bwr"},
)
cplot3[0].set_clim((-v_max, v_max))
ax3.set_title("$dBz/dz$")
ax3.set_xlabel("x (m)")
ax3.set_yticks([])
ax4 = fig.add_axes([0.88, 0.15, 0.02, 0.79])
norm = mpl.colors.Normalize(vmin=-v_max, vmax=v_max)
cbar = mpl.colorbar.ColorbarBase(
ax4, norm=norm, orientation="vertical", cmap=mpl.cm.bwr
)
cbar.set_label("$nT/m$", rotation=270, labelpad=15, size=12)
plt.show()
| {
"content_hash": "39ebab1ff7c8fa00893fc91e6a526fd5",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 87,
"avg_line_length": 33.525597269624576,
"alnum_prop": 0.6715870915199023,
"repo_name": "simpeg/simpeg",
"id": "fff7fe4e236a76bd897967541112d161f9990141",
"size": "9823",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tutorials/04-magnetics/plot_2b_magnetics_mvi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Python",
"bytes": "3476002"
}
],
"symlink_target": ""
} |
from pyparsing import ZeroOrMore, Regex
from PyProjManCore.proj_man import ProjMan
import json
import os
# This parser object should handle the following entities
# action
# object(s)
# parameters
# each verb objects and parameters are linked to an API call defined in the api.py module
class OpCode:
"""Used to systematically communicate with ProjMan"""
def __init__(self, verb=None, parameters=None, feedback=None, error=None):
"""Made of:
a verb
list of objects
list of literals
"""
self._verb = verb
self._inp_params = parameters
self._feedback = feedback
self._error = error
self._override_feedback = False
@property
def error(self):
return self._error
def __str__(self):
ret = "Opcode Verb : [{}] Parameters:".format(self._verb)
for par in self._inp_params:
ret = ret + "{}, ".format(par)
ret = ret + " Feedback : {} Error : {}".format(self._feedback, self._error)
return ret
def tokenizer(inp: str):
"""Slice input string into tokens"""
parser = ZeroOrMore(Regex(r'\[[^]]*\]') | Regex(r'"[^"]*"') | Regex(r'[^ ]+'))
tokens = []
for token in parser.parseString(inp):
tokens.append(token.replace('"','').replace("'",'').replace('[','').replace(']',''))
return tokens
class PyProjManParser:
"""PyProjMan parser used by the text based UI to interact with PyProjMan API"""
def __init__(self, project: ProjMan = None, config_file=None):
"""This should contact API object first, but for Alpha release, it will directly contact ProjMan"""
# Load Parser Data
try:
if config_file is None:
config_file = os.path.join(os.getcwd(), 'data', 'parser.json')
with open(config_file) as parser_data_file:
parser_data = json.load(parser_data_file)
self._version = parser_data['Version']
self._release = parser_data['Release']
self._ignore_case = parser_data['IgnoreCase']
raw_primitives = parser_data['Primitives']
self._primitives = {}
for k, v in raw_primitives.items():
self._primitives[k] = int(v, 16) # Evaluate Hex
del raw_primitives
self._verbs = parser_data['Verbs']
self._parameters = parser_data['Parameters']
self._decorators = parser_data['Decorators']
raw_reply = parser_data['Reply']
self._reply = {}
for k, v in raw_reply.items():
self._reply[int(k)] = v
del raw_reply
raw_err_codes = parser_data['ErrorCodes']
self._error_codes = {}
for k, v in raw_err_codes.items():
self._error_codes[int(k)] = v
del raw_err_codes
self._help_strings = parser_data['HelpString']
self._valid = True
except FileNotFoundError:
print("""
Fatal Error:
Configuration file not found
Exiting
""")
self._valid = False
# Load initial project
if project is None:
project = ProjMan()
self._project = project
def listen(self):
"""Listens to user input"""
_active = True
op_code = OpCode()
while _active:
inp = input("PyProjMan > ") # 1: read input from user : in a form of a string
op_code._override_feedback = False
op_code = self.parse(inp) # 2: pass input string to parse() function : get op code
op_code = self.hook(op_code) # 3: pass op code to hook() : get feedback as op code
self.feed_back(op_code) # 4: display feedback op code to user via feed_back()
if op_code._verb == self._primitives['EXIT']:
_active = False
def parse(self, inp: str):
"""parse input string to call functions
:returns op code mapped to function calls from ProjMan"""
op_code = OpCode()
# TODO:
# 1: split string into a verb and rest of string
tokens = tokenizer(inp)
# 2: lookup verb in the _verbs dictionary, and place its numeric value
if tokens[0].lower() in self._verbs:
op_code = OpCode(verb=self._primitives[self._verbs[tokens[0].lower()]])
op_code._inp_params = []
for token in tokens[1:]:
if token.lower() in self._parameters:
op_code._inp_params.append(self._primitives[self._parameters[token.lower()]])
else:
op_code._inp_params.append(token)
else:
op_code = OpCode(error=903) # Invalid Verb
# 3: slice input string into tokens; keywords and literals
# literals identified by double quotes, single quotes or square brackets surrounding them
# 4: lookup non literal tokens and replace them with values from _objects and _decoration dictionary
# 5: check for syntax maps
# if all is ok, return op code object
# otherwise, return a syntax error op code object
return op_code
def feed_back(self, op_code: OpCode):
"""Returns feedback from PyProjMan to end user
:returns a string to user interface"""
# Reverse lookup Op Code into text using the _reply dictionary, and construct feedback
# this should return a string
if not op_code._override_feedback:
op_code._feedback = self.lookup_primative(op_code._verb)
for param in op_code._inp_params:
if isinstance(param, int):
op_code._feedback = op_code._feedback + " {}".format(self.lookup_primative(param))
else:
op_code._feedback = op_code._feedback + " {}".format(param)
print("{} : {}".format(self._error_codes[op_code._error], op_code._feedback))
return op_code
def hook(self, op_code):
"""what really interacts with ProjMan
:argument op_code
:returns op code """
# TODO:
# Lookup verb in a large switch statement, and make a call based on the numeric value,
# passing objects, and literals as arguments
# collect response, and convert it into op code, and return it to caller function
# Create a Project
if op_code._verb == self._primitives['CREATE'] and op_code._inp_params[0] == self._primitives['PROJECT']:
self._project = ProjMan(name=op_code._inp_params[1])
op_code._error=100
elif op_code._verb == self._primitives['EXIT']:
op_code._error=200
elif op_code._verb == self._primitives['MANUAL']:
op_code = self.help(op_code)
else:
op_code._error=901
return op_code
def lookup_primative(self, primative_value):
if primative_value in self._primitives.values():
for k,v in self._primitives.items():
if v == primative_value:
return k
return None
@property
def valid(self):
return self._valid
@property
def release(self):
return self._release
@property
def version(self):
return self._version
def help(self, op_code):
"""Provides Help on how to use the CLI"""
if len(op_code._inp_params) > 0:
inquiry = op_code._inp_params[0]
if isinstance(inquiry, int):
if inquiry in self._primitives.values():
inquiry = self.lookup_primative(inquiry)
# Lookup keywords
# In verbs
if inquiry in self._verbs.keys():
inquiry = self._verbs[inquiry]
# In Parameters
if inquiry in self._parameters.keys():
inquiry = self._parameters[inquiry]
if inquiry in self._primitives.keys():
op_code._feedback = "{} ({}) \n{}".format(op_code._inp_params[0],inquiry, self.help_primitive(inquiry))
else:
op_code._feedback = 'Unable to find keyword {}'.format(op_code._inp_params[0])
op_code._error = 902
op_code._override_feedback = True
return op_code
else:
op_code._feedback = "PyProjMan version {} - {} release!".format(self.version, self.release)
op_code._feedback = op_code._feedback + "\nList of keywords :"
for verb_key, primitive_key in self._verbs.items():
op_code._feedback = op_code._feedback + "\n {} \t- \t{}".format(verb_key,self.help_primitive(primitive_key))
op_code._error = 100
op_code._override_feedback = True
return op_code
def help_primitive(self, prim):
"""Givin a primitive key, get help text"""
return self._help_strings[prim] | {
"content_hash": "2a4c5346b5d7cdbf2df9c3028ba79a33",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 125,
"avg_line_length": 39.140969162995596,
"alnum_prop": 0.5698368036015757,
"repo_name": "aawadall/PyProjMan",
"id": "b01cb3db400cba65d148ceefd4e33bf5537b34d9",
"size": "8885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyProjManUI/parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33470"
}
],
"symlink_target": ""
} |
import time
import json
import threading
import delegator
from tpl import errors
from pykit.path.temp import TempPipe
DEFAULT_SHELL_VARS = {
'pipe': None
}
class ShellExec(threading.Thread):
def __init__(self, command, injected_vars=None):
self.command = command
self.injected_vars = injected_vars
super(ShellExec, self).__init__(name='shell_exec_tpl_constructor')
def run(self):
command = self.command
for key, value in self.injected_vars.items():
command = ' && '.join([
'{}={}'.format(key, value),
command
])
command_res = delegator.run(command, block=True)
if command_res.return_code != 0:
raise errors.ShellExecError(command_res.return_code,
command_res.out,
command_res.err)
# FIXME 需要添加超时支持
def shell_execute(command):
with TempPipe() as tp:
DEFAULT_SHELL_VARS['pipe'] = tp.pipe_path
ShellExec(command, DEFAULT_SHELL_VARS).start()
time.sleep(0.5)
c = tp.pipe.read()
c.strip()
context = json.loads(c)
return context
| {
"content_hash": "27227c60890edd77d70659f8f821fa76",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 74,
"avg_line_length": 28.571428571428573,
"alnum_prop": 0.57,
"repo_name": "faycheng/tpl",
"id": "90e74af6ac970d5f2da36a9a1f6d8a56c5b8d708",
"size": "1240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tpl/sandbox.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22720"
},
{
"name": "Shell",
"bytes": "1714"
}
],
"symlink_target": ""
} |
'''A convenient class for parsing HTML pages.'''
from __future__ import unicode_literals
from HTMLParser import HTMLParser
import logging
import re
from RSSvk.core import Error
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
class HTMLPageParser(HTMLParser):
'''A convenient class for parsing HTML pages.'''
tag_name_regex = '[a-zA-Z][-.a-zA-Z0-9:_]*'
'''A regular expression for tag name.'''
attribute_name_regex = tag_name_regex
'''A regular expression for attribute name.'''
tag_attrs_regex = re.sub(r'\s*', '', r'''
(?:\s+
''' + attribute_name_regex + r'''
(?:\s*=\s*
(?:
'[^']*'
|"[^"]*"
|[^'"/>\s]+
)
)?
)*
''')
'''A regular expression for tag attributes.'''
script_regex = re.compile('<script' + tag_attrs_regex + '>.*?</script>', re.DOTALL | re.IGNORECASE)
'''A regular expression for matching scripts.'''
__invalid_tag_attr_spacing_regex = re.compile(r'''
(
# Tag name
<''' + tag_name_regex + r'''
# Zero or several attributes
''' + tag_attrs_regex + r'''
# Two attributes without a space between them
\s+ # whitespace before attribute name
''' + attribute_name_regex + r''' # attribute name
\s*=\s* # value indicator
(?:
'[^']*' # LITA-enclosed value
|"[^"]*" # LIT-enclosed value
)
)
([^\s>]) # Do not include / to make the preparation replacement for __invalid_tag_attr_regex
''', re.VERBOSE)
'''
A regular expression for matching a common error in specifying tag
attributes.
'''
__invalid_tag_attr_regex = re.compile(r'''
(
# Tag name
<''' + tag_name_regex + r'''
# Zero or several attributes
''' + tag_attrs_regex + r'''
)
\s+(?:
# Invalid characters instead of an attribute
[^\sa-zA-Z/>]\S*
|
# Sole slash
/\s
|
# Invalid characters starting from slash instead of an attribute
/[^>\s]+
)
''', re.VERBOSE)
'''
A regular expression for matching HTML errors like:
<a class="app photo"/app2322149_58238998?from_id=2381857&loc=addneighbour onclick="return cur.needLoginBox()">
'''
__empty_tags = 'area|base|basefont|br|col|frame|hr|img|input|link|meta|param'
'''A list of all HTML empty tags.'''
__misopened_tag_regex = re.compile(r'<(' + __empty_tags + tag_attrs_regex + r')\s*>', re.IGNORECASE)
'''A regular expression for matching opened tags that should be closed.'''
__tag_stack = None
'''A stack of currently opened HTML tags.'''
__cur_data = None
'''
Accumulates data between handle_charref(), handle_entityref() and
handle_data() calls.
'''
def __init__(self):
HTMLParser.__init__(self)
def handle_charref(self, name):
'''Handles a character reference of the form &#ref;.'''
self.__accumulate_data('&#' + name + ';')
def handle_data(self, data):
'''Handles data.'''
self.__accumulate_data(data)
def handle_endtag(self, tag_name):
'''Handles end of a tag.'''
self.__handle_data_if_exists()
if self.__get_cur_tag()['name'] == tag_name:
self.__close_tag(self.__tag_stack.pop())
else:
for tag_id in xrange(len(self.__tag_stack) - 1, -1, -1):
if self.__tag_stack[tag_id]['name'] == tag_name:
for tag in reversed(self.__tag_stack[tag_id + 1:]):
self.__close_tag(tag, forced = True)
self.__tag_stack.pop()
self.__close_tag(self.__tag_stack.pop())
break
else:
LOG.debug('Dropping excess end tag "%s"...', tag_name)
def handle_entityref(self, name):
'''Handles a general entity reference of the form &name;.'''
self.__accumulate_data('&' + name + ';')
def handle_root_data(self, tag, data):
'''Handles data inside of the root of the document.'''
LOG.debug('%s', data)
def handle_root(self, tag, attrs, empty):
'''Handles a tag inside of the root of the document.'''
LOG.debug('<%s %s%s>', tag['name'], attrs, '/' if empty else '')
tag['new_tag_handler'] = self.handle_root
tag['data_handler'] = self.handle_root_data
tag['end_tag_handler'] = self.handle_root_end
def handle_root_end(self, tag):
'''Handles end of the root of the document.'''
LOG.debug('</%s>', tag['name'])
def handle_startendtag(self, tag, attrs):
'''Handles start of an XHTML-style empty tag.'''
self.__handle_data_if_exists()
self.__handle_start_tag(tag, attrs, True)
def handle_starttag(self, tag, attrs):
'''Handles start of a tag.'''
self.__handle_data_if_exists()
self.__handle_start_tag(tag, attrs, False)
def reset(self):
'''Resets the parser.'''
HTMLParser.reset(self)
self.__tag_stack = [{
# Add fake root tag
'name': None,
'new_tag_handler': self.handle_root,
'data_handler': self.handle_root_data,
'end_tag_handler': self.handle_root_end,
}]
def parse(self, html):
'''Parses the specified HTML page.'''
html = self.__fix_html(html)
self.reset()
try:
# Run the parser
self.feed(html)
self.close()
finally:
# Close all unclosed tags
for tag in self.__tag_stack[1:]:
self.__close_tag(tag, True)
def __accumulate_data(self, data):
'''
Accumulates data between handle_charref(), handle_entityref() and
handle_data() calls.
'''
if self.__cur_data is None:
self.__cur_data = data
else:
self.__cur_data += data
def __close_tag(self, tag, forced = False):
'''Forces closing of an unclosed tag.'''
if forced:
LOG.debug('Force closing of unclosed tag "%s".', tag['name'])
else:
LOG.debug('Tag %s closed.', tag)
if 'end_tag_handler' in tag:
tag['end_tag_handler'](tag)
LOG.debug('Current tag: %s.', self.__get_cur_tag())
def __fix_html(self, html):
'''Fixes various things that may confuse the Python's HTML parser.'''
html = self.script_regex.sub('', html)
loop_replacements = (
lambda html: self.__invalid_tag_attr_spacing_regex.subn(r'\1 \2', html),
lambda html: self.__invalid_tag_attr_regex.subn(r'\1 ', html),
)
for loop_replacement in loop_replacements:
for i in xrange(0, 1000):
html, changed = loop_replacement(html)
if not changed:
break
else:
raise Error('Too many errors in the HTML or infinite loop.')
html = self.__misopened_tag_regex.sub(r'<\1 />', html)
return html
def __get_cur_tag(self):
'''Returns currently opened tag.'''
return self.__tag_stack[-1]
def __handle_data_if_exists(self):
'''Handles accumulated data (if exists).'''
data = self.__cur_data
if data is None:
return
self.__cur_data = None
tag = self.__get_cur_tag()
handler = tag.get('data_handler')
if handler is not None:
LOG.debug('Data "%s" in "%s" with handler %s.',
data, tag['name'], handler.func_name)
handler(tag, data)
def __handle_start_tag(self, tag_name, attrs, empty):
'''Handles start of any tag.'''
tag = { 'name': tag_name }
handler = self.__get_cur_tag().get('new_tag_handler')
if handler is not None:
attrs = self.__parse_attrs(attrs)
LOG.debug('Start tag: %s %s with handler %s.',
tag, attrs, handler.func_name)
handler(tag, attrs, empty)
if not empty:
self.__tag_stack.append(tag)
def __parse_attrs(self, attrs_tuple):
'''Converts tag attributes from a tuple to a dictionary.'''
attrs = {}
for attr, value in attrs_tuple:
attrs[attr.lower()] = value
return attrs
| {
"content_hash": "a13a95e167ee884a54f83f3363658631",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 128,
"avg_line_length": 27.548895899053626,
"alnum_prop": 0.5151723348219398,
"repo_name": "Densvin/RSSVK",
"id": "c61b449a86af6e59ef2678f448173dfb6fbe8a8e",
"size": "8733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vkfeed/tools/html_parser.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "1448"
},
{
"name": "Python",
"bytes": "93586"
},
{
"name": "Shell",
"bytes": "255"
}
],
"symlink_target": ""
} |
""" Cisco_IOS_XE_process_cpu_oper
This module contains a collection of YANG definitions for
monitoring CPU usage of processes in a Network Element.Copyright (c) 2016\-2017 by Cisco Systems, Inc.All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class CpuUsage(object):
"""
.. attribute:: cpu_utilization
Data nodes for Total CPU Utilizations Statistics
**type**\: :py:class:`CpuUtilization <ydk.models.cisco_ios_xe.Cisco_IOS_XE_process_cpu_oper.CpuUsage.CpuUtilization>`
"""
_prefix = 'process-cpu-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.cpu_utilization = CpuUsage.CpuUtilization()
self.cpu_utilization.parent = self
class CpuUtilization(object):
"""
Data nodes for Total CPU Utilizations Statistics.
.. attribute:: cpu_usage_processes
Data nodes for System wide Process CPU usage Statistics
**type**\: :py:class:`CpuUsageProcesses <ydk.models.cisco_ios_xe.Cisco_IOS_XE_process_cpu_oper.CpuUsage.CpuUtilization.CpuUsageProcesses>`
.. attribute:: five_minutes
Busy percentage in last five minutes
**type**\: int
**range:** 0..255
**units**\: percent
.. attribute:: five_seconds
Busy percentage in last 5\-seconds
**type**\: int
**range:** 0..255
**units**\: percent
.. attribute:: five_seconds_intr
Interrupt busy percentage in last 5\-seconds
**type**\: int
**range:** 0..255
**units**\: percent
.. attribute:: one_minute
Busy percentage in last one minute
**type**\: int
**range:** 0..255
**units**\: percent
"""
_prefix = 'process-cpu-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.cpu_usage_processes = CpuUsage.CpuUtilization.CpuUsageProcesses()
self.cpu_usage_processes.parent = self
self.five_minutes = None
self.five_seconds = None
self.five_seconds_intr = None
self.one_minute = None
class CpuUsageProcesses(object):
"""
Data nodes for System wide Process CPU usage Statistics.
.. attribute:: cpu_usage_process
The list of software processes on the device
**type**\: list of :py:class:`CpuUsageProcess <ydk.models.cisco_ios_xe.Cisco_IOS_XE_process_cpu_oper.CpuUsage.CpuUtilization.CpuUsageProcesses.CpuUsageProcess>`
"""
_prefix = 'process-cpu-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.cpu_usage_process = YList()
self.cpu_usage_process.parent = self
self.cpu_usage_process.name = 'cpu_usage_process'
class CpuUsageProcess(object):
"""
The list of software processes on the device.
.. attribute:: pid <key>
Process\-ID of the process
**type**\: int
**range:** 0..4294967295
.. attribute:: name <key>
The name of the process
**type**\: str
.. attribute:: avg_run_time
Average Run\-time of this process (uSec)
**type**\: int
**range:** 0..18446744073709551615
**units**\: micro-seconds
.. attribute:: five_minutes
Busy percentage in last five minutes
**type**\: :py:class:`Decimal64<ydk.types.Decimal64>`
**range:** \-92233720368547758.08..92233720368547758.07
**units**\: percent
.. attribute:: five_seconds
Busy percentage in last 5\-seconds
**type**\: :py:class:`Decimal64<ydk.types.Decimal64>`
**range:** \-92233720368547758.08..92233720368547758.07
**units**\: percent
.. attribute:: invocation_count
Total number of invocations
**type**\: int
**range:** 0..4294967295
.. attribute:: one_minute
Busy percentage in last one minute
**type**\: :py:class:`Decimal64<ydk.types.Decimal64>`
**range:** \-92233720368547758.08..92233720368547758.07
**units**\: percent
.. attribute:: total_run_time
Total Run\-time of this process (mSec)
**type**\: int
**range:** 0..18446744073709551615
**units**\: milli-seconds
.. attribute:: tty
TTY bound to by the process
**type**\: int
**range:** 0..65535
"""
_prefix = 'process-cpu-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.pid = None
self.name = None
self.avg_run_time = None
self.five_minutes = None
self.five_seconds = None
self.invocation_count = None
self.one_minute = None
self.total_run_time = None
self.tty = None
@property
def _common_path(self):
if self.pid is None:
raise YPYModelError('Key property pid is None')
if self.name is None:
raise YPYModelError('Key property name is None')
return '/Cisco-IOS-XE-process-cpu-oper:cpu-usage/Cisco-IOS-XE-process-cpu-oper:cpu-utilization/Cisco-IOS-XE-process-cpu-oper:cpu-usage-processes/Cisco-IOS-XE-process-cpu-oper:cpu-usage-process[Cisco-IOS-XE-process-cpu-oper:pid = ' + str(self.pid) + '][Cisco-IOS-XE-process-cpu-oper:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.pid is not None:
return True
if self.name is not None:
return True
if self.avg_run_time is not None:
return True
if self.five_minutes is not None:
return True
if self.five_seconds is not None:
return True
if self.invocation_count is not None:
return True
if self.one_minute is not None:
return True
if self.total_run_time is not None:
return True
if self.tty is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_process_cpu_oper as meta
return meta._meta_table['CpuUsage.CpuUtilization.CpuUsageProcesses.CpuUsageProcess']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-process-cpu-oper:cpu-usage/Cisco-IOS-XE-process-cpu-oper:cpu-utilization/Cisco-IOS-XE-process-cpu-oper:cpu-usage-processes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.cpu_usage_process is not None:
for child_ref in self.cpu_usage_process:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_process_cpu_oper as meta
return meta._meta_table['CpuUsage.CpuUtilization.CpuUsageProcesses']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-process-cpu-oper:cpu-usage/Cisco-IOS-XE-process-cpu-oper:cpu-utilization'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.cpu_usage_processes is not None and self.cpu_usage_processes._has_data():
return True
if self.five_minutes is not None:
return True
if self.five_seconds is not None:
return True
if self.five_seconds_intr is not None:
return True
if self.one_minute is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_process_cpu_oper as meta
return meta._meta_table['CpuUsage.CpuUtilization']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-process-cpu-oper:cpu-usage'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.cpu_utilization is not None and self.cpu_utilization._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_process_cpu_oper as meta
return meta._meta_table['CpuUsage']['meta_info']
| {
"content_hash": "d627b182cb49778981ec3a1a83a2e9bb",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 333,
"avg_line_length": 31.825072886297377,
"alnum_prop": 0.48882374496152436,
"repo_name": "111pontes/ydk-py",
"id": "5f4fcf5cc8bdd0208f09e747730dc372129cae02",
"size": "10916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cisco-ios-xe/ydk/models/cisco_ios_xe/Cisco_IOS_XE_process_cpu_oper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "7226"
},
{
"name": "Python",
"bytes": "446117948"
}
],
"symlink_target": ""
} |
from django.test import TestCase
class mediacracyTest(TestCase):
"""
Tests for mediacracy
"""
def test_mediacracy(self):
pass
| {
"content_hash": "24e6b60556853fcafd584e32a145cdcb",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 32,
"avg_line_length": 17.88888888888889,
"alnum_prop": 0.6086956521739131,
"repo_name": "sixpearls/django-mediacracy",
"id": "7a83bf1dec9ef0768ed6538fb89b605766ae4db1",
"size": "209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mediacracy/tests.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "14696"
},
{
"name": "JavaScript",
"bytes": "9236"
},
{
"name": "Prolog",
"bytes": "3227"
},
{
"name": "Python",
"bytes": "50504"
},
{
"name": "Shell",
"bytes": "3178"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('geral', '0031_fo2_parametro__juda__nulo'),
]
operations = [
migrations.AddField(
model_name='config',
name='usuario',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='usuário'),
),
]
| {
"content_hash": "c695b8d097f333a7cadd2d37ef7fb1e5",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 157,
"avg_line_length": 29.61904761904762,
"alnum_prop": 0.6672025723472669,
"repo_name": "anselmobd/fo2",
"id": "6de57da5f3d1f9a591f220109713c8ed20258550",
"size": "696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/geral/migrations/0032_config_usuario.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "160899"
},
{
"name": "HTML",
"bytes": "855985"
},
{
"name": "JavaScript",
"bytes": "203109"
},
{
"name": "PLSQL",
"bytes": "2762"
},
{
"name": "Python",
"bytes": "3228268"
},
{
"name": "Shell",
"bytes": "2161"
}
],
"symlink_target": ""
} |
import json
import base64
import urllib
import urllib2
import ConfigParser
import subprocess
from baidu_nlu.srv import *
import rospy
'''
Baidu ASR Service need to register in http://yuyin.baidu.com
create your application, after that you will have your cuid,apikey,secretkey
all the information write in the nlu.cfg file like this:
[baidu]
CUID = your cuid
API_KEY = your application key
SECRET_KEY = your secret key
'''
def get_config():
config = ConfigParser.ConfigParser()
config.read('nlu.cfg')
return config.get('baidu','CUID'),config.get('baidu','API_KEY'),config.get('baidu','SECRET_KEY')
def get_baidu_auth():
CUID,API_KEY,SECRET_KEY = get_config()
auth_url = 'https://openapi.baidu.com/oauth/2.0/token?grant_type=client_credentials&client_id='+API_KEY+'&client_secret='+SECRET_KEY
res = json.loads(download(auth_url))
return res['access_token']
def download(link,data=None,headers={}):
try:
req = urllib2.Request(link,data,headers)
response = urllib2.urlopen(req,None,15)
res = response.read()
except:
return None
return res
def asr_test(msg):
CUID,API_KEY,SECRET_KEY = get_config()
subprocess.call('''arecord -r 16000 -f S16_LE -D 'plughw:1,0' -d 3 > in.wav''',shell=True)
with open('in.wav','rb') as fr:
content = fr.read()
base_data = base64.b64encode(content)
params = {}
params['format'] = 'wav'
params['rate'] = 16000
params['channel'] = 1
params['token'] = access_token
params['cuid'] = CUID
params['len'] = len(content)
params['speech'] = base_data
data = json.dumps(params)
headers = {
"Content-Length":len(data),
"Content-Type":"application/json; charset=utf-8",
}
url ='http://vop.baidu.com/server_api'
res = json.loads(download(url,data,headers))
return res['result'][0].encode('utf-8')
def handle_asr(req):
print 'request is ',req.controller_json
return ASRResponse(asr_test(req.controller_json))
def asr_server():
rospy.init_node('asr_server')
s = rospy.Service('asr',ASR,handle_asr)
print 'ready to listen'
rospy.spin()
if __name__ == '__main__':
access_token = get_baidu_auth()
asr_server()
| {
"content_hash": "c9ad69fc9d39dde89c2c1ffbade6f3cb",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 136,
"avg_line_length": 25.568181818181817,
"alnum_prop": 0.648,
"repo_name": "giphub/ros_test_afr",
"id": "150e3b01adc4179f3ca8d733ec997906fe3b2869",
"size": "2287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/asr_server.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22191"
},
{
"name": "Shell",
"bytes": "631"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from appProcafe.models import Department, Unit, Section, Risk, Position, Location,\
Paysheet, Type, CourseRequest, CourseChangeRequest
from appProcafe.models import Telephone, Document, Takes, Course
from appProcafe.models import User, UserProfile, UserApplication, RemoveRequest
from django.utils.translation import ugettext_lazy as _
admin.site.register(Department)
admin.site.register(Unit)
admin.site.register(Section)
admin.site.register(Type)
admin.site.register(Risk)
admin.site.register(Paysheet)
admin.site.register(Position)
admin.site.register(Location)
class TelephoneAdmin(admin.ModelAdmin):
list_display = ('user_ID', 'number')
admin.site.register(Telephone, TelephoneAdmin)
class TakesAdmin(admin.ModelAdmin):
list_display = ('user_ID', 'course_ID', 'term', 'year', 'status')
admin.site.register(Takes, TakesAdmin)
class CourseAdmin(admin.ModelAdmin):
list_display = ('name', 'number_hours')
search_fields = ['name']
admin.site.register(Course, CourseAdmin)
class UserProfileInLine(admin.StackedInline):
model = UserProfile
can_delete = False
verbose_name_plural = 'Perfil'
class MyUserAdmin(UserAdmin):
staff_fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),
# No permissions
(_('Permissions'), {'fields': ('is_active', 'is_staff')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
(_('Grupos'), {'fields': ('groups',)}),
)
inlines = (UserProfileInLine,)
list_display = ('username', 'email', 'get_hours')
def get_hours(self, obj):
return UserProfile.objects.get(user_id=obj.id).finished_hours
get_hours.short_description = "Horas completadas"
def change_view(self, request, *args, **kwargs):
# for non-superuser
if not request.user.is_superuser:
try:
self.fieldsets = self.staff_fieldsets
response = UserAdmin.change_view(self, request, *args, **kwargs)
finally:
# Reset fieldsets to its original value
self.fieldsets = UserAdmin.fieldsets
return response
else:
return UserAdmin.change_view(self, request, *args, **kwargs)
admin.site.unregister(User)
admin.site.register(User, MyUserAdmin)
class UserApplicationAdmin(admin.ModelAdmin):
fieldsets = (
('Datos del Solicitante', {'fields': ('ID_number', 'USB_ID', 'first_name', 'last_name', 'birthdate', 'paysheet', 'type', 'sex', 'location', 'position', 'email')}),
('Solicitud', {'fields': ('request_date', 'status')}),
)
admin.site.register(UserApplication, UserApplicationAdmin)
class RemoveRequestAdmin(admin.ModelAdmin):
fieldsets = (
('Datos del Solicitante', {'fields': ('ID_number', 'USB_ID', 'first_name', 'last_name', 'email')}),
('Solicitud', {'fields': ('course_ID', 'request_type', 'request_date', 'status')}),
)
admin.site.register(RemoveRequest, RemoveRequestAdmin)
| {
"content_hash": "be78b1f00780fdc54a4fb20a7fab9037",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 171,
"avg_line_length": 37.252873563218394,
"alnum_prop": 0.6460968836778772,
"repo_name": "pierorex/PROCAFE-SSL",
"id": "7af4a614a4f09e8901c13163445f321f3096ad80",
"size": "3266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "procafe/appProcafe/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "880054"
},
{
"name": "HTML",
"bytes": "132534"
},
{
"name": "JavaScript",
"bytes": "838229"
},
{
"name": "Python",
"bytes": "74348"
}
],
"symlink_target": ""
} |
import re
from .table import Table
from .column import Column
from .schema_manager import SchemaManager
from .platforms.postgres_platform import PostgresPlatform
class PostgresSchemaManager(SchemaManager):
def _get_portable_table_column_definition(self, table_column):
if table_column['type'].lower() == 'varchar' or table_column['type'] == 'bpchar':
length = re.sub('.*\(([0-9]*)\).*', '\\1', table_column['complete_type'])
table_column['length'] = length
autoincrement = False
match = re.match("^nextval\('?(.*)'?(::.*)?\)$", str(table_column['default']))
if match:
table_column['sequence'] = match.group(1)
table_column['default'] = None
autoincrement = True
match = re.match("^'?([^']*)'?::.*$", str(table_column['default']))
if match:
table_column['default'] = match.group(1)
if str(table_column['default']).find('NULL') == 0:
table_column['default'] = None
if 'length' in table_column:
length = table_column['length']
else:
length = None
if length == '-1' and 'atttypmod' in table_column:
length = table_column['atttypmod'] - 4
if length is None or int(length) <= 0:
length = None
fixed = None
if 'name' not in table_column:
table_column['name'] = ''
precision = None
scale = None
db_type = table_column['type'].lower()
type = self._platform.get_type_mapping(db_type)
if db_type in ['smallint', 'int2']:
length = None
elif db_type in ['int', 'int4', 'integer']:
length = None
elif db_type in ['int8', 'bigint']:
length = None
elif db_type in ['bool', 'boolean']:
if table_column['default'] == 'true':
table_column['default'] = True
if table_column['default'] == 'false':
table_column['default'] = False
length = None
elif db_type == 'text':
fixed = False
elif db_type in ['varchar', 'interval', '_varchar']:
fixed = False
elif db_type in ['char', 'bpchar']:
fixed = True
elif db_type in ['float', 'float4', 'float8',
'double', 'double precision',
'real', 'decimal', 'money', 'numeric']:
match = re.match('([A-Za-z]+\(([0-9]+),([0-9]+)\))', table_column['complete_type'])
if match:
precision = match.group(1)
scale = match.group(2)
length = None
elif db_type == 'year':
length = None
if table_column['default']:
match = re.match("('?([^']+)'?::)", str(table_column['default']))
if match:
table_column['default'] = match.group(1)
options = {
'length': length,
'notnull': table_column['isnotnull'],
'default': table_column['default'],
'primary': table_column['pri'] == 't',
'precision': precision,
'scale': scale,
'fixed': fixed,
'unsigned': False,
'autoincrement': autoincrement
}
column = Column(table_column['field'], type, options)
return column
def get_database_platform(self):
return PostgresPlatform()
| {
"content_hash": "3428c78ad6e38c267c81ef47cba9268d",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 95,
"avg_line_length": 32.99047619047619,
"alnum_prop": 0.5072170900692841,
"repo_name": "sdispater/eloquent",
"id": "e024a411845bbcce0ec359ea75376ca37297e493",
"size": "3489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eloquent/dbal/postgres_schema_manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "741617"
}
],
"symlink_target": ""
} |
from operator import attrgetter
from django.views.generic import DetailView
from ohashi.shortcuts import get_object_or_none
from apps.accounts.views import QuicklinksMixin
from .models import Group, Idol, Membership, Staff
class GroupDetailView(QuicklinksMixin, DetailView):
queryset = Group.objects.all()
template_name = 'people/group_detail.html'
def get_context_data(self, **kwargs):
context = super(GroupDetailView, self).get_context_data(**kwargs)
# We need to split memberships into four groups. The active
# leader, the active members, the former members and the
# former leaders.
target = self.object.ended
memberships = self.object.memberships.order_by('started', 'idol__birthdate').select_related('idol', 'group')
context['fact'] = self.object.facts.order_by('?').first()
context['memberships'] = {
'active_count': len([m for m in memberships if m.ended is None]),
'inactive': [m for m in memberships.inactive(target=target)],
'inactive_count': len([m for m in memberships if m.ended]),
'leader': get_object_or_none(Membership.objects.select_related('idol'), group=self.object.pk, ended__isnull=True, is_leader=True),
'leaders': sorted([m for m in memberships if m.ended and m.is_leader and m.leadership_started is not None], key=attrgetter('leadership_started')),
'leader_count': len([m for m in memberships if m.ended and m.is_leader and m.leadership_started is not None]),
'lineup': [m for m in memberships.lineup(target=target)],
'lineup_count': len([m for m in memberships.lineup(target=target)]),
# 'active': [m for m in memberships if m.ended is None and m.is_leader == False],
# 'inactive': [m for m in memberships if m.ended and m.is_leader == False],
}
context['albums'] = self.object.albums.prefetch_related('editions', 'participating_idols', 'participating_groups')
context['events'] = self.object.events.all()
context['singles'] = self.object.singles.prefetch_related('editions', 'participating_idols', 'participating_groups')
return context
class IdolDetailView(QuicklinksMixin, DetailView):
model = Idol
template_name = 'people/idol_detail.html'
def get_context_data(self, **kwargs):
context = super(IdolDetailView, self).get_context_data(**kwargs)
context['albums'] = self.object.albums.prefetch_related('editions', 'participating_idols', 'participating_groups')
context['events'] = self.object.events.all()
context['fact'] = self.object.facts.order_by('?').first()
context['memberships'] = self.object.memberships.select_related('group')[1:]
context['singles'] = self.object.singles.prefetch_related('editions', 'participating_idols', 'participating_groups')
return context
class StaffDetailView(DetailView):
queryset = Staff.objects.all()
template_name = ''
| {
"content_hash": "69935bba5ccc6cb5abe7415570a01ac0",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 158,
"avg_line_length": 50.2,
"alnum_prop": 0.6726427622841965,
"repo_name": "hello-base/web",
"id": "dcd92d502e9a20e9e0c3d44449f83206d3b132db",
"size": "3012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/people/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "155440"
},
{
"name": "CoffeeScript",
"bytes": "3290"
},
{
"name": "HTML",
"bytes": "187789"
},
{
"name": "Handlebars",
"bytes": "580"
},
{
"name": "JavaScript",
"bytes": "21286"
},
{
"name": "Python",
"bytes": "345982"
},
{
"name": "Ruby",
"bytes": "352"
}
],
"symlink_target": ""
} |
import ast
import re
import six
import pep8
"""
Guidelines for writing new hacking checks
- Use only for Manila specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range M3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the M3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to manila/tests/test_hacking.py
"""
UNDERSCORE_IMPORT_FILES = []
log_translation = re.compile(
r"(.)*LOG\.(audit|error|info|critical|exception)\(\s*('|\")")
log_translation_LC = re.compile(
r"(.)*LOG\.(critical)\(\s*(_\(|'|\")")
log_translation_LE = re.compile(
r"(.)*LOG\.(error|exception)\(\s*(_\(|'|\")")
log_translation_LI = re.compile(
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
log_translation_LW = re.compile(
r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")")
translated_log = re.compile(
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)"
"\(\s*_\(\s*('|\")")
string_translation = re.compile(r"[^_]*_\(\s*('|\")")
underscore_import_check = re.compile(r"(.)*import _$")
underscore_import_check_multi = re.compile(r"(.)*import (.)*_, (.)*")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
oslo_namespace_imports = re.compile(r"from[\s]*oslo[.](.*)")
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
assert_no_xrange_re = re.compile(r"\s*xrange\s*\(")
assert_True = re.compile(r".*assertEqual\(True, .*\)")
assert_None = re.compile(r".*assertEqual\(None, .*\)")
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
CHECK_DESC = 'No check message specified'
def __init__(self, tree, filename):
"""This object is created automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pep8."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
message = message or self.CHECK_DESC
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
def _check_call_names(self, call_node, names):
if isinstance(call_node, ast.Call):
if isinstance(call_node.func, ast.Name):
if call_node.func.id in names:
return True
return False
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
* Use filename so we can start enforcing this in specific folders instead
of needing to do so all at once.
M319
"""
if logical_line.startswith("LOG.debug(_("):
yield(0, "M319 Don't translate debug level logs")
class CheckLoggingFormatArgs(BaseASTChecker):
"""Check for improper use of logging format arguments.
LOG.debug("Volume %s caught fire and is at %d degrees C and climbing.",
('volume1', 500))
The format arguments should not be a tuple as it is easy to miss.
"""
CHECK_DESC = 'M310 Log method arguments should not be a tuple.'
LOG_METHODS = [
'debug', 'info',
'warn', 'warning',
'error', 'exception',
'critical', 'fatal',
'trace', 'log'
]
def _find_name(self, node):
"""Return the fully qualified name or a Name or Attribute."""
if isinstance(node, ast.Name):
return node.id
elif (isinstance(node, ast.Attribute)
and isinstance(node.value, (ast.Name, ast.Attribute))):
method_name = node.attr
obj_name = self._find_name(node.value)
if obj_name is None:
return None
return obj_name + '.' + method_name
elif isinstance(node, six.string_types):
return node
else: # could be Subscript, Call or many more
return None
def visit_Call(self, node):
"""Look for the 'LOG.*' calls."""
# extract the obj_name and method_name
if isinstance(node.func, ast.Attribute):
obj_name = self._find_name(node.func.value)
if isinstance(node.func.value, ast.Name):
method_name = node.func.attr
elif isinstance(node.func.value, ast.Attribute):
obj_name = self._find_name(node.func.value)
method_name = node.func.attr
else: # could be Subscript, Call or many more
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# obj must be a logger instance and method must be a log helper
if (obj_name != 'LOG'
or method_name not in self.LOG_METHODS):
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# the call must have arguments
if not len(node.args):
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# any argument should not be a tuple
for arg in node.args:
if isinstance(arg, ast.Tuple):
self.add_error(arg)
return super(CheckLoggingFormatArgs, self).generic_visit(node)
def validate_log_translations(logical_line, physical_line, filename):
# Translations are not required in the test and tempest
# directories.
if ("manila/tests" in filename or "manila_tempest_tests" in filename or
"contrib/tempest" in filename):
return
if pep8.noqa(physical_line):
return
msg = "M327: LOG.critical messages require translations `_LC()`!"
if log_translation_LC.match(logical_line):
yield (0, msg)
msg = ("M328: LOG.error and LOG.exception messages require translations "
"`_LE()`!")
if log_translation_LE.match(logical_line):
yield (0, msg)
msg = "M329: LOG.info messages require translations `_LI()`!"
if log_translation_LI.match(logical_line):
yield (0, msg)
msg = "M330: LOG.warning messages require translations `_LW()`!"
if log_translation_LW.match(logical_line):
yield (0, msg)
msg = "M331: Log messages require translations!"
if log_translation.match(logical_line):
yield (0, msg)
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate logs are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
if filename in UNDERSCORE_IMPORT_FILES:
pass
elif (underscore_import_check.match(logical_line) or
underscore_import_check_multi.match(logical_line) or
custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif (translated_log.match(logical_line) or
string_translation.match(logical_line)):
yield(0, "M323: Found use of _() without explicit import of _ !")
class CheckForStrUnicodeExc(BaseASTChecker):
"""Checks for the use of str() or unicode() on an exception.
This currently only handles the case where str() or unicode()
is used in the scope of an exception handler. If the exception
is passed into a function, returned from an assertRaises, or
used on an exception created in the same scope, this does not
catch it.
"""
CHECK_DESC = ('M325 str() and unicode() cannot be used on an '
'exception. Remove or use six.text_type()')
def __init__(self, tree, filename):
super(CheckForStrUnicodeExc, self).__init__(tree, filename)
self.name = []
self.already_checked = []
# Python 2
def visit_TryExcept(self, node):
for handler in node.handlers:
if handler.name:
self.name.append(handler.name.id)
super(CheckForStrUnicodeExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrUnicodeExc, self).generic_visit(node)
# Python 3
def visit_ExceptHandler(self, node):
if node.name:
self.name.append(node.name)
super(CheckForStrUnicodeExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrUnicodeExc, self).generic_visit(node)
def visit_Call(self, node):
if self._check_call_names(node, ['str', 'unicode']):
if node not in self.already_checked:
self.already_checked.append(node)
if isinstance(node.args[0], ast.Name):
if node.args[0].id in self.name:
self.add_error(node.args[0])
super(CheckForStrUnicodeExc, self).generic_visit(node)
class CheckForTransAdd(BaseASTChecker):
"""Checks for the use of concatenation on a translated string.
Translations should not be concatenated with other strings, but
should instead include the string being added to the translated
string to give the translators the most information.
"""
CHECK_DESC = ('M326 Translated messages cannot be concatenated. '
'String should be included in translated message.')
TRANS_FUNC = ['_', '_LI', '_LW', '_LE', '_LC']
def visit_BinOp(self, node):
if isinstance(node.op, ast.Add):
if self._check_call_names(node.left, self.TRANS_FUNC):
self.add_error(node.left)
elif self._check_call_names(node.right, self.TRANS_FUNC):
self.add_error(node.right)
super(CheckForTransAdd, self).generic_visit(node)
def check_oslo_namespace_imports(logical_line, physical_line, filename):
if pep8.noqa(physical_line):
return
if re.match(oslo_namespace_imports, logical_line):
msg = ("M333: '%s' must be used instead of '%s'.") % (
logical_line.replace('oslo.', 'oslo_'),
logical_line)
yield(0, msg)
def dict_constructor_with_list_copy(logical_line):
msg = ("M336: Must use a dict comprehension instead of a dict constructor"
" with a sequence of key-value pairs."
)
if dict_constructor_with_list_copy_re.match(logical_line):
yield (0, msg)
def no_xrange(logical_line):
if assert_no_xrange_re.match(logical_line):
yield(0, "M337: Do not use xrange().")
def validate_assertTrue(logical_line):
if re.match(assert_True, logical_line):
msg = ("M313: Unit tests should use assertTrue(value) instead"
" of using assertEqual(True, value).")
yield(0, msg)
def validate_assertIsNone(logical_line):
if re.match(assert_None, logical_line):
msg = ("M312: Unit tests should use assertIsNone(value) instead"
" of using assertEqual(None, value).")
yield(0, msg)
def factory(register):
register(validate_log_translations)
register(check_explicit_underscore_import)
register(no_translate_debug_logs)
register(CheckForStrUnicodeExc)
register(CheckLoggingFormatArgs)
register(CheckForTransAdd)
register(check_oslo_namespace_imports)
register(dict_constructor_with_list_copy)
register(no_xrange)
register(validate_assertTrue)
register(validate_assertIsNone)
| {
"content_hash": "63b3c31b231d161e53f0ecb15b4749c6",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 78,
"avg_line_length": 36.63636363636363,
"alnum_prop": 0.628992235651965,
"repo_name": "NetApp/manila",
"id": "1778293ae2ad2f8049ce9b541a8b1d8c15fe2a96",
"size": "13099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila/hacking/checks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "8111068"
},
{
"name": "Shell",
"bytes": "91643"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
# encoding: UTF-8
from tests.mock import Client
from tml.api.pagination import allpages
import unittest
from tml import Application
from tml.language import Language
from tml.translation import Key
from tml.dictionary.source import SourceDictionary
from tml.rules.contexts.gender import Gender
from tests.mock.fallback import Fallback
from json import dumps, loads
from tml.strings import to_string
class SourceTest(unittest.TestCase):
""" Test loading tranlations over API """
def setUp(self):
self.client = Client.read_all()
self.client.reloaded = []
self.app = Application.load_default(self.client)
self.lang = Language.load_by_locale(self.app, 'ru')
def test_translate(self):
f = Fallback()
dict = SourceDictionary(language = self.lang, source = 'index')
t = dict.get_translation(Key(label = '{actor} give you {count}',
description = 'somebody give you few apples',
language = self.lang))
self.assertEquals(3, len(t.options), 'All options loaded')
self.assertEquals(to_string('Маша любезно дала тебе 2 яблока'), t.execute({'actor':Gender.female('Маша'),'count':2}, {}), 'Female few')
def test_default(self):
dict = SourceDictionary(language = self.lang, source = 'index')
label = 'No translation'
key = Key(label = label, language = self.lang, level = 2)
t = dict.get_translation(key)
self.assertEquals(label, t.execute({}, {}), 'Use default tranlation')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "fbb27e7c9ae8520ee38127d7811f8001",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 143,
"avg_line_length": 39.023809523809526,
"alnum_prop": 0.6571079926784624,
"repo_name": "translationexchange/tml-python",
"id": "30d5d2af49299b659330afa0ee03e616d7e35a7d",
"size": "1668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/dictionary/source.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1262"
},
{
"name": "Python",
"bytes": "446575"
},
{
"name": "Shell",
"bytes": "294"
}
],
"symlink_target": ""
} |
import json
import tweepy
import sys
import os.path
import argparse
import datetime
import shutil
def get_file_name(data_path, file_name, create_path=False, warn=False):
# check the data path and file
data_file_name = os.path.join(data_path, file_name)
if create_path:
if not os.path.isdir(data_path):
if not os.path.exists(data_path):
# create the new path
os.makedirs(data_path)
else:
# path exists, but it's not a directory
raise ValueError('ERROR - data path (' + str(data_path) + ')already exists, but it is not a directory.')
if warn:
if not os.path.isfile(data_file_name):
print 'WARNING - file does not exist: ' + data_file_name
return data_file_name
def convert_tweet_v0_to_v0_1(v0_tweet, collection_datetime, collector_config):
v0_1_tweet = {'collected at': collection_datetime.strftime('%Y-%m-%d %H:%M:%S'),
'collected by': collector_config['consumer']['key'] + '-' + collector_config['access']['token'],
'version': 0.1,
'status': v0_tweet}
return v0_1_tweet
def observed_tweets(data_file_name):
# get the set of tweets already in the data file to avoid duplicates
tweet_ids = set()
if os.path.isfile(data_file_name):
with open(data_file_name, 'r') as data_file:
for line in data_file:
status = json.loads(line)
tweet_ids.add(status['id'])
else:
print 'WARNING - Did not find an existing data file. Will create new file: ' + str(data_file_name)
return tweet_ids
def main(args):
# load config file with Twitter account details
if os.path.isfile(args.old_config_file):
with open(args.old_config_file, 'r') as old_config_file:
config = json.load(old_config_file)
else:
raise ValueError('ERROR - config file not found:' + str(args.old_config_file))
# check the data path and file
old_data_file_name = get_file_name(args.data_path, args.old_data_file, warn=True)
new_data_file_name = get_file_name(args.data_path, args.new_data_file, create_path=True, warn=True)
# create backup of old file
backup_file_name = old_data_file_name + '.bak'
shutil.copy(old_data_file_name, backup_file_name)
# set the collection time that will added to all old tweets
# since v0 didn't have a collection time, use a single value for all old tweets
collection_time = datetime.datetime.strptime(args.collection_time, '%Y-%m-%d %H:%M:%S')
with open(new_data_file_name, 'a') as new_data_file:
with open(old_data_file_name, 'r') as old_data_file:
for line in old_data_file:
old_status = json.loads(line)
new_status = convert_tweet_v0_to_v0_1(old_status, collection_time, config)
print >> new_data_file, json.dumps(new_status)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--old-config-file", help="config file name of the config used to collect the old tweets")
parser.add_argument("--collection-time", help="collection time of the old tweets (YYYY-MM-DD HH:MM:SS)")
parser.add_argument("--data-path", default="data", help="path to data file")
parser.add_argument("--old-data-file", help="data file name containing old version tweets")
parser.add_argument("--new-data-file", default="trump_dump.json", help="data file name for new version tweets - does not need to be empty")
args = parser.parse_args()
main(args) | {
"content_hash": "a1fd8b30bd4aeaf8134a5bbbb8839a89",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 145,
"avg_line_length": 38.702127659574465,
"alnum_prop": 0.6297416162726773,
"repo_name": "rwsharp/statement_collector",
"id": "a59ae5b6a944a81002c367c14c1c74ba97c3efb9",
"size": "3638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "update_old_tweets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51084"
},
{
"name": "Shell",
"bytes": "6667"
}
],
"symlink_target": ""
} |
import re
import os
import os.path
import pathlib
import json
import hashlib
import random
import itertools
from . import topologies
from . import tests
from . import analyze
from lib.bash import CommandBlock
from pydblite import Base
import warnings
warnings.formatwarning = lambda message, category, *a: '{}: {}\n'.format(category.__name__, message)
results_dir = 'results/'
def generate_combinations(constants, variables, skip_fn=lambda x: False):
n = 0
variables_keys = tuple(variables.keys())
for combi in itertools.product(*variables.values()):
settings = constants.copy()
for i, value in enumerate(combi):
settings[variables_keys[i]] = value
if len(settings) is 0:
continue
if not skip_fn(settings):
generate(**settings)
n += 1
print('Generated {} cases. Now go and run run_test.py in order to run them!'.format(n))
def generate(**settings):
settings_json = json.dumps(settings, sort_keys=True)
settings_hash = hashlib.sha1(settings_json.encode('utf-8')).hexdigest()
m, ns1, ns2 = getattr(topologies, settings['topology'])(**settings)
settings['ns1'] = ns1
settings['ns2'] = ns2
settings['result_file'] = results_dir
# if settings['collection'] is not None:
# settings['result_file'] += settings['collection'] + '/'
settings['result_file'] += settings_hash
script = tests.begin()
script += m.get_script()
script += getattr(tests, settings['iperf_name'])(**settings)
with open(settings['result_file'] + '.config', 'w') as f:
f.write(settings_json)
with open(settings['result_file'] + '.sh', 'w') as f:
f.write(str(script))
os.chmod(settings['result_file'] + '.sh', 0o777)
return settings_hash, script
def run_all(target_repetitions=0, dry_run=False, debug=False, recursion_limit=10):
if not hasattr(run_all, "scripts"):
run_all.scripts = {} # hash => CommandBlock instance
to_run = [] # each script will appear N times, so to reach the target_repetitions
p = pathlib.Path(results_dir)
max_count = 0
forecast_time = 0
for script_file in p.glob('*.sh'):
settings_hash = script_file.stem
count = 0
try:
with script_file.parent.joinpath(settings_hash + '.count').open() as count_fh:
count = int(count_fh.read())
except (FileNotFoundError, ValueError):
pass
max_count = max(max_count, count)
needed_repetitions = target_repetitions - count
if needed_repetitions > 0:
with script_file.open() as script_fh:
run_all.scripts[settings_hash] = CommandBlock() + script_fh.read()
to_run.extend([settings_hash] * needed_repetitions)
forecast_time += run_all.scripts[settings_hash].execution_time() * needed_repetitions
if target_repetitions == 0 and max_count > 0:
return run_all(max_count, dry_run, debug, recursion_limit)
if not dry_run and len(to_run) > 0:
random.shuffle(to_run) # the order becomes unpredictable: I think it's a good idea
for current, settings_hash in enumerate(to_run, start=1):
script = run_all.scripts[settings_hash]
print("Running {} ({}/{})...".format(settings_hash, current, len(to_run)))
script.run(add_bash=settings_hash if debug else False)
if recursion_limit <= 0:
warnings.warn("Hit recursion limit. Some tests didn't run correctly!")
else:
run_all(target_repetitions, False, debug, recursion_limit - 1)
return len(to_run), forecast_time, target_repetitions
def get_results_db(clear_cache=False, skip=[]):
cache_file = 'cache/results.pdl'
db = Base(cache_file)
if clear_cache or not db.exists() or os.path.getmtime(cache_file) < os.path.getmtime(results_dir):
warnings.warn('Rebuilding results cache...')
columns = set()
rows = []
p = pathlib.Path(results_dir)
for config_file in p.glob('*.config'):
with config_file.open() as config_fh:
settings_hash = config_file.stem
row = json.loads(config_fh.read())
if settings_hash in skip:
continue
row['hash'] = settings_hash
tests_count = analyze.count(config_file.parent, settings_hash)
row['iostat_cpu'], len_cpu_values = analyze.iostat_cpu(config_file.parent, settings_hash)
row['iperf_result'], len_iperf_values = getattr(analyze, row['iperf_name'])(config_file.parent, settings_hash, row)
if tests_count != len_cpu_values or tests_count != len_iperf_values:
raise analyze.AnalysisException('For test {}, mismatch in cardinality of tests between count ({}), iostat ({}) and iperf ({})'.format(settings_hash, tests_count, len_cpu_values, len_iperf_values), settings_hash)
if len_iperf_values > 0:
min_fairness = row['iperf_result']['fairness'][0] - row['iperf_result']['fairness'][1]
if min_fairness < (1 - 1 / (2 * row['parallelism'])):
warnings.warn('For test {}, fairness has a critical value: {}.'.format(settings_hash, row['iperf_result']['fairness']), RuntimeWarning)
columns = columns | set(row.keys())
rows.append(row)
db.create(*columns, mode='override')
for r in rows:
db.insert(**r)
db.commit()
warnings.warn('Results cache built.')
else:
warnings.warn('Reusing results cache.')
db.open()
return db
| {
"content_hash": "d5bda39c5228b311b7b3f15ad7d5fbfa",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 227,
"avg_line_length": 41.696296296296296,
"alnum_prop": 0.6180493871025049,
"repo_name": "serl/topoblocktest",
"id": "ac8e3539a9bbf2e7f01df1d065bc62009f5bfb2a",
"size": "5629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/test_master.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "107096"
},
{
"name": "Shell",
"bytes": "6347"
}
],
"symlink_target": ""
} |
from q2_diversity_lib.alpha import METRICS
from ._pipeline import alpha, alpha_phylogenetic
from ._visualizer import (alpha_group_significance, alpha_correlation,
alpha_rarefaction,
alpha_rarefaction_unsupported_metrics)
__all__ = [
'alpha', 'alpha_phylogenetic', 'alpha_group_significance',
'alpha_correlation', 'alpha_rarefaction', 'METRICS',
'alpha_rarefaction_unsupported_metrics',
]
| {
"content_hash": "514cdd19abffe5fbfc4557cd17099c62",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 70,
"avg_line_length": 35,
"alnum_prop": 0.6681318681318681,
"repo_name": "jakereps/q2-diversity",
"id": "191456d9f4875ea96bc74602b384e6038c26e906",
"size": "805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "q2_diversity/_alpha/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "15437"
},
{
"name": "JavaScript",
"bytes": "36122"
},
{
"name": "Makefile",
"bytes": "1523"
},
{
"name": "Python",
"bytes": "314381"
},
{
"name": "TeX",
"bytes": "5073"
}
],
"symlink_target": ""
} |
import sys, os, io, yaml, re, functools
# all open() calls have an implied encoding parameter, UTF-8 by default
open = functools.partial(__builtins__.open,
encoding=os.environ.get("SOURCE_ENCODING", "utf8"))
# some constants to make our lives easier
TAB = "\t"
EOL = "\n"
# regular expression for matching dedent specifier: 1 or 2 digits
DIGITS = re.compile("[0-9][0-9]?")
# returns cached contents of a file if it exists, or reads it into the cache and
# returns it if not. cache is stored as a default parameter value.
#
# the cache is used only when there are duplicate snippets in two or more source files.
# only one copy of the file is ever cached (the first one that was found) so this shouldn't
# run up memory too much if you don't have many duplicate snippets.
def cached(path, cache={}):
if path not in cache:
with open(path) as infile:
cache[path] = infile.read().rstrip()
return cache[path]
# a file-like object used to avoid writing duplicate snippets we've already extracted
# in situations where this is not an error
class DummyFile:
def __init__(self, *args, **kwargs):
pass
def write(self, text):
pass
def close(self):
pass
# auto-vivifying dict (like DefaultDict but we don't need to import it)
class AutoDict(dict):
def __init__(self, T):
self.T = T
def __missing__(self, key):
self[key] = self.T()
return self[key]
# the class that does the snippet extraction. instantiate it passing the directory to
# which snippets should be extracted. call the instance with each source file.
class Snipper:
# initialize Snipper
def __init__(self, snippetdir):
self.dir = snippetdir # directory where snippets will be extracted
self.source = {} # source file of each snippet
self.count = 0 # number of snippets extracted
self.errors = 0 # processing errors
self.issues = AutoDict(set) # files with issues
self.index = AutoDict(list) # index of snippets to files (this should probably be merged with self.source)
self.log = io.StringIO()
# if used as context manager, we capture the log instead of printing it as we go
# by switching print() to print to a StringIO object
def __enter__(self):
global print
print = functools.partial(__builtins__.print, file=self.log)
return self
def __exit__(self, *args):
global print
print = __builtins__.print
# extract snippets from a single file
def __call__(self, path, markers):
print(path)
self.started = set() # snippets we've started in this source file
self.duplicates = set() # snippets we've determined are duplicates so we won't append/echo
tag = re.compile(f" *({'|'.join(markers)}) ?snippet-") # e.g. if ext is "// #" end up with regex: " *(#|//) ?snippet-"
self.files = {} # files currently open to write snippets
self.dedent = {} # amount of whitespace to strip from each line of snippet
self.path = path # source file we are working with (store it on instance so we can use it in error messages)
self.markers = markers
try:
with open(path) as infile: # read source file entirely into memory
self.text = infile.read().rstrip()
except IOError as ex:
print("ERROR reading file", ex)
self.errors += 1
return
if TAB in self.text and "snippet-start" in self.text:
print(" WARNING tab(s) found in %s may cause formatting problems in docs" % path)
# process each line in source file. self.i is the line we're on (for error messages)
for self.i, self.line in enumerate(self.text.splitlines(keepends=False), start=1):
line = self.line # use a local variable for a bit more performance
if tag.match(line): # line is a snippet directive, parse and process it
self.directive = line.split("snippet-")[1].split(":")[0].rstrip() # get e.g. append fron snippet-append
self.arg = line.split("[")[1].split("]")[0].rstrip() # get e.g. snippet-name from [snippet-name]
func = getattr(self, self.directive.lstrip("_"), None)
if func and callable(func):
func(self.arg) # call our method named same as directive (e.g. start(..) for snippet-start)
else:
print(" ERROR invalid directive snippet-%s at %s in %s" % (self.directive, self.i, self.path))
self.errors += 1
self.issues[path].add("invalid directive snippet-%s" % self.directive)
else: # line is NOT a snippet directive. write it to any open snippet files
for snip, file in self.files.items(): # for each snippet file we're writing, write the line
dedent = self.dedent[snip]
if dedent and line[:dedent].strip(): # is the text we want to strip to dedent all whitespace? error if not
print((" ERROR unable to dedent %s space(s) " % dedent) +
("in snippet %s at line %s in %s " % self._where) +
f"(only indented {len(line) - len(line.lstrip())} spaces)")
self.errors += 1
file.write(line[dedent:].rstrip() + EOL) # write it (strip whitespace at end just to be neat)
# done processing this file. make sure all snippets had snippet-end tags
for snip, file in self.files.items():
print(" ERROR snippet-end tag for %s missing in %s, extracted to end of file" % (snip, path))
file.close()
self.issues[path].add("snippet-end tag for %s missing" % snip)
self.errors += 1
# directive: beginning of snippet
def start(self, arg):
path = os.path.join(self.dir, f"{arg}.txt")
indicator = "EXTRACT"
opener = open
printer = print
if arg in self.files:
printer = lambda *a: print(" ERROR snippet %s already open at line %s in %s" % self._where)
self.issues[self.path].add("snippet %s opened multiple times")
self.errors += 1
elif os.path.isfile(path):
# if snippet output already exists, this is OK only if it source file has the same name and identical content
if self.path != self.source[arg] and self.path.rpartition("/")[2] == self.source[arg].rpartition("/")[2] and self.text == cached(self.source[arg]):
printer = lambda *a: print("WARNING redundant snippet %s at line %s in %s" % self._where)
self.duplicates.add(arg)
else:
printer = lambda *a: print(" ERROR duplicate snippet %s at line %s in %s" % self._where,
"(also in %s)" % self.source[arg])
pfxlen = len(os.path.commonprefix([self.path, self.source[arg]]))
path1 = self.source[arg][pfxlen:]
if "/" not in path1: path1 = self.source[arg]
path2 = self.path[pfxlen:]
if "/" not in path2: path2 = self.path
self.issues[self.path].add("%s also declared in %s" % (arg, path1))
self.issues[self.source[arg]].add("%s also declared in %s" % (arg, path2))
self.errors += 1
opener = DummyFile # don't write to the file, but still track it so we can detect missing snippet-end
else:
self.count += 1
# parse number at end of line as dedent value
self.dedent[arg] = int(DIGITS.search(self.line.rpartition("]")[2] + " 0").group(0))
self.files[arg] = opener(path, "w") # open real file or dummy
self.index[arg].append(self.path)
self.started.add(arg) # record that we started this snippet in this source file
if arg not in self.source: # record that we *first* saw this snippet in this source file
self.source[arg] = self.path
printer(" ", indicator, arg)
# directive: append to given file (for extracting multiple chunks of code to a single snippet)
def append(self, arg):
if arg in self.files: # is the file already open?
print(" ERROR snippet %s already open at line %s in %s" % self._where)
self.issues[self,path].add("snippet %s opened multiple times" % arg)
self.errors += 1
return
if arg not in self.started: # did we start this snippet in current source file?
print(" ERROR snippet file %s not found at line %s in %s" % self._where)
self.issues[self.path].add("snippet %s doesn't exist" % arg)
self.errors += 1
return
self.files[arg] = DummyFile() if arg in self.duplicates else open(os.path.join(self.dir, arg) + ".txt", "a")
print(" APPEND", arg)
# directive: end of snippet
def end(self, arg):
if arg in self.files:
self.files[arg].close()
del self.files[arg]
else:
print(" ERROR snippet file %s not open at %s in %s" % self._where)
self.issues[self.path].add("snippet-end tag for %s which is not open" % arg)
self.errors += 1
# directive: insert arg verbatim as a line into all currently open snippets
# useful for e.g. adding closing brackets to partial code block (could also use append for that)
def echo(self, arg):
arg = arg.rstrip() + EOL
if self.files:
for file in self.files.values():
file.write(arg)
else:
print(" ERROR echo '%s' outside snippet at %s in %s" % self._where)
self.issues[self.path].add("echo outside snippet")
self.errors += 1
# do-nothing handler used for directives that we ignore
def _nop(self, arg): return
# the aforementioned ignored directives
service = comment = keyword = sourceauthor = sourcedate = sourcedescription = sourcetype = sourcesyntax = _nop
# convenience property for returning error location tuple (used in error messages)
@property
def _where(self):
return self.arg, self.i, self.path
def err_exit(msg):
print("ERROR", msg)
sys.exit(1)
# ----------------------------------------------------------------------------
if __name__ == "__main__":
# read list of filenames from stdin first, so we don't get broken pipe if we error out
stdin_lines = []
if not sys.stdin.isatty():
stdin_lines = sys.stdin.readlines()
# get output directory from command line, or error
if len(sys.argv) > 1 and os.path.isdir(sys.argv[1]):
snippetdir = sys.argv[1]
else:
err_exit("snippet output directory not passed or does not exist")
# get filename of extersions list from command line, or use default, then load it
if len(sys.argv) > 2:
commentfile = sys.argv[2]
else:
commentfile = "snippet-extensions.yml"
# reports to be printed can be passed in via environment variable REPORTS
# if this value is not set, print all reports
reports = os.environ.get("REPORTS", "log issues index").lower().split()
# if no directory specified, file is in same directory as script
if "/" not in commentfile and "\\" not in commentfile:
commentfile = os.path.join(os.path.dirname(__file__), commentfile)
if not os.path.isfile(commentfile):
err_exit("source file extension map %s not found" % commentfile)
with open(commentfile) as comments:
MAP_EXT_MARKER = yaml.safe_load(comments)
if not isinstance(MAP_EXT_MARKER, dict):
err_exit("source map is not a key-value store (dictionary)")
for k, v in MAP_EXT_MARKER.items():
if isinstance(k, str) and isinstance(v, str):
MAP_EXT_MARKER[k] = v.split()
else:
err_exit("key, value must both be strings; got %s, %s (%s, %s)" %
(k, v, type(k).__name__, type(v).__name__))
print("==== extracting snippets in source files",
" ".join(ex for ex in MAP_EXT_MARKER if ex and MAP_EXT_MARKER[ex]), "\n")
print("reports:", " ".join(reports).upper(), end="\n\n")
# initialize snipper instance and our counters
with Snipper(snippetdir) as snipper:
seen = processed = 0
# main loop: for each file named on stdin, check to see if we should process it, and if so, do so
for path in sorted(stdin_lines):
path = path.strip()
if not path: # skip blank lines in input
continue
# make sure relative path starts with ./ so that e.g. /Makefile in the extensions map
# can be used to match an entire filename.
if not (path.startswith(("./", "/", "\\")) or # already relative or Linux/Mac absolute path or UNC path
(path[0].isalpha() and path[1] == ":")): # already Windows absolute path
path = "./" + path
if "/." in path or "\\." in path: # skip hidden file or directory
continue
seen += 1 # count files seen (not hidden)
# find first extension from extension map that matches current file
# replace backslashes with forward slashes for purposes of matching so it works with Windows or UNC paths
ext = next((ext for ext in MAP_EXT_MARKER if path.replace("\\", "/").endswith(ext)), None)
markers = MAP_EXT_MARKER.get(ext, ())
if markers: # process it if we know its comment markers
snipper(path, markers)
processed += 1
# files with issues report (files with most issues first)
if "issues" in reports:
if snipper.issues:
print("====", len(snipper.issues), "file(s) with issues:", end="\n\n")
for issue, details in sorted(snipper.issues.items(), key=lambda item: -len(item[1])):
print(issue, end="\n ")
print(*sorted(details), sep="\n ", end="\n\n")
else:
print("---- no issues found\n")
# snippet index report (snippets that appear in the most files first)
if "index" in reports:
if snipper.index:
print("====", len(snipper.index), "snippet(s) extracted from", processed, "files:", end="\n\n")
for snippet, files in sorted(snipper.index.items(), key=lambda item: -len(item[1])):
print(snippet, "declared in:", end="\n ")
print(*sorted(files), sep="\n ", end="\n\n")
else:
print("--- no snippets were extracted\n")
# print log
if "log" in reports:
print("==== Complete processing log\n")
if processed:
print(snipper.log.getvalue(), end="\n\n")
else:
print("No files were processed\n")
# print summary
print("====", snipper.count, "snippet(s) extracted from", processed,
"source file(s) processed of", seen, "candidate(s) with", snipper.errors,
"error(s) in", len(snipper.issues), "file(s)\n")
# exit with nonzero status if we found any errors, so caller won't commit the snippets
sys.exit(snipper.errors > 0)
| {
"content_hash": "537890388b476e740604f5f8eb7186b9",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 159,
"avg_line_length": 50.533762057877816,
"alnum_prop": 0.5775642657164672,
"repo_name": "awsdocs/aws-doc-sdk-examples",
"id": "234f6cefce096cea1a1367f58bdaf841815a93ac",
"size": "17362",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": ".github/extract-snippets/extract-snippets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "476653"
},
{
"name": "Batchfile",
"bytes": "900"
},
{
"name": "C",
"bytes": "3852"
},
{
"name": "C#",
"bytes": "2051923"
},
{
"name": "C++",
"bytes": "943634"
},
{
"name": "CMake",
"bytes": "82068"
},
{
"name": "CSS",
"bytes": "33378"
},
{
"name": "Dockerfile",
"bytes": "2243"
},
{
"name": "Go",
"bytes": "1764292"
},
{
"name": "HTML",
"bytes": "319090"
},
{
"name": "Java",
"bytes": "4966853"
},
{
"name": "JavaScript",
"bytes": "1655476"
},
{
"name": "Jupyter Notebook",
"bytes": "9749"
},
{
"name": "Kotlin",
"bytes": "1099902"
},
{
"name": "Makefile",
"bytes": "4922"
},
{
"name": "PHP",
"bytes": "1220594"
},
{
"name": "Python",
"bytes": "2507509"
},
{
"name": "Ruby",
"bytes": "500331"
},
{
"name": "Rust",
"bytes": "558811"
},
{
"name": "Shell",
"bytes": "63776"
},
{
"name": "Swift",
"bytes": "267325"
},
{
"name": "TypeScript",
"bytes": "119632"
}
],
"symlink_target": ""
} |
DOCUMENTATION = '''
---
module: hashivault_policy_delete
version_added: "2.1.0"
short_description: Hashicorp Vault policy delete module
description:
- Module to delete a policy from Hashicorp Vault.
options:
url:
description:
- url for vault
default: to environment variable VAULT_ADDR
ca_cert:
description:
- "path to a PEM-encoded CA cert file to use to verify the Vault server TLS certificate"
default: to environment variable VAULT_CACERT
ca_path:
description:
- "path to a directory of PEM-encoded CA cert files to verify the Vault server TLS certificate : if ca_cert is specified, its value will take precedence"
default: to environment variable VAULT_CAPATH
client_cert:
description:
- "path to a PEM-encoded client certificate for TLS authentication to the Vault server"
default: to environment variable VAULT_CLIENT_CERT
client_key:
description:
- "path to an unencrypted PEM-encoded private key matching the client certificate"
default: to environment variable VAULT_CLIENT_KEY
verify:
description:
- "if set, do not verify presented TLS certificate before communicating with Vault server : setting this variable is not recommended except during testing"
default: to environment variable VAULT_SKIP_VERIFY
authtype:
description:
- "authentication type to use: token, userpass, github, ldap"
default: token
token:
description:
- token for vault
default: to environment variable VAULT_TOKEN
username:
description:
- username to login to vault.
password:
description:
- password to login to vault.
name:
description:
- policy name.
'''
EXAMPLES = '''
---
- hosts: localhost
tasks:
- hashivault_policy_delete:
name: 'annie'
register: 'vault_policy_delete'
- debug: msg="User policy is {{vault_policy_delete.policy}}"
'''
def main():
argspec = hashivault_argspec()
argspec['name'] = dict(required=True, type='str')
module = hashivault_init(argspec)
result = hashivault_policy_delete(module.params)
if result.get('failed'):
module.fail_json(**result)
else:
module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.hashivault import *
@hashiwrapper
def hashivault_policy_delete(params):
name = params.get('name')
client = hashivault_auth_client(params)
if name not in client.list_policies():
return {'changed': False}
client.delete_policy(name)
return {'changed': True}
if __name__ == '__main__':
main()
| {
"content_hash": "f939fa5dae1fdd5cab6692c251f703c6",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 167,
"avg_line_length": 31.568181818181817,
"alnum_prop": 0.6501079913606912,
"repo_name": "cloudvisory/ansible-modules-hashivault",
"id": "e394df2858ca7a88550d948d6a693deac83afc7e",
"size": "2800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ansible/modules/hashivault/hashivault_policy_delete.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88628"
},
{
"name": "Shell",
"bytes": "2831"
}
],
"symlink_target": ""
} |
import os, sys, copy, types
import Utilities, random
from math import *
import numpy as np
class NestedData(object):
"""This class manages a multiply nested dictionary and derives properties such the
levels of nesting. It also allows the dictionary to be re-arranged by secondary
keys. The methods assume that the lowest level children are lists or tuples"""
def __init__(self, data=None):
if data != None:
self.data = data
else:
self.data = {}
self.initialdata = copy.deepcopy(data)
return
def add(self, rec, name):
return
def levels(self):
"""Level of nesting"""
return self.getDictNesting(self.data)
def getDictNesting(self, data, level=1):
"""Get level of nesting for a dictionary"""
keys = data.keys()
if len(keys) == 0:
return 0
if type(data[keys[0]]) is types.DictType:
return self.getDictNesting(data[keys[0]], level+1)
else:
return level
def __setitem__(self, key, data):
self.data[key] = data
def __repr__(self):
if len(self.data)==0:
s = 'dataset with no data'
else:
s = 'dataset with %s primary keys and ' %len(self.data.keys())
s += '%s levels of nesting' %self.levels()
return s
def show(self):
self.printStructure(self.data)
def printStructure(self, data):
"""Preview the dict structure"""
if type(data) is types.DictType:
keys = data.keys()
print keys
self.printStructure(data[keys[0]])
else:
print type(data)
return
def restoreData(self):
"""Reset data to the original form"""
self.data = copy.deepcopy(self.initialdata)
return
def arrangeDictbySecondaryKey(self):
self.getLabels()
self.data = Utilities.arrangeDictbySecondaryKey(self.data, self.namelabels)
print self.data
return
def buildNestedStructure(self, indexes=[0], match='both'):
"""Rebuild a nested dictionary from a flat sructure according to labels
extracted from the key names, which must be separated by some symbol"""
print indexes
data = self.data
keys = data.keys()
labels = []
#get set of labels in the correct order
for i in indexes:
labels.append(Utilities.parseNames(keys, ind=i, sep='', match=match))
#print labels
self.data = self.recursiveBuild(labels, data, 0)
return
def recursiveBuild(self, labels, data, i):
"""Recursively build a new dictionary based on the labels extracted
from a flat dict keys, this is order dependant"""
names = labels[i]
newdata = {}
if i==len(labels)-1:
for key in data:
n = names[key]
#print key, n
#tuple expected
if type(data[key]) is types.DictType:
item = data[key][data[key].keys()[0]]
else:
item = data[key]
newdata[n] = item
else:
for j in names:
n = names[j]
newdata[n] = self.recursiveBuild(labels, data, i+1)
return newdata
def flatten(self):
"""Flatten a nested dictionary by creating one key from each
childs sub-keys"""
return
def averageReplicates(self, level=0):
"""Average replicates"""
#self.data = Utilities.addReplicates(self.data)
data = self.data
newdata = {}
#labels = self.parseNames(keys, ind=0, match='words')
print 'processing replicates..'
self.data = newdata
return
def getKeys(keytypes=0):
if keytypes == 0:
keytypes = random.randint(1,2)
if keytypes == 1:
keys = Utilities.createRandomStrings(3,6)
elif keytypes == 2:
keys = [i for i in range(random.randint(1,4),random.randint(6,9))]
return keys
def tempData(slope=0.2,noise=0.01):
x = range(250,360,10)
vals = [round(slope*i/random.normalvariate(10,noise),2) for i in x]
return x,vals
def createNestedData(level=1, current=None, keytypes=0, keys={}):
"""create test nested data"""
data = {}
if level==0:
data = tempData()
#print type(data),level
else:
if not keys.has_key(level):
keys[level] = getKeys()
#print keys[level],level
for k in keys[level]:
data[k] = createNestedData(level=level-1, keytypes=keytypes, keys=keys)
return data
def multiFileData():
"""Simulate a dict representing imported multiple file data with replicates"""
data={}
names = Utilities.createRandomStrings(3,6)
reps = range(1,3)
for name in names:
for rep in reps:
for i in np.arange(2,10,1.0):
key = name+'_ph'+str(i)+'_rep'+str(rep)+'.txt'
val = 1/(1+exp((i-5)/1.2))
data[key] = {}
data[key]['ph'] = tempData(val)
#print data
return data
def test():
#data = createNestedData(2)
data = multiFileData()
#print data
D = NestedData(data)
D.buildNestedStructure([0],[0,1])
print D
D.show()
D.averageReplicates()
if __name__ == '__main__':
test()
| {
"content_hash": "3b3425877f1eafd30ed992a39e109d97",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 86,
"avg_line_length": 30.262569832402235,
"alnum_prop": 0.5659959387114639,
"repo_name": "dmnfarrell/peat",
"id": "eddab2ed6978eac3aa71caeb611fb3bea0cfdb33",
"size": "6328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DataPipeline/Data.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "243"
},
{
"name": "C",
"bytes": "744763"
},
{
"name": "C++",
"bytes": "999138"
},
{
"name": "CSS",
"bytes": "10879"
},
{
"name": "Gnuplot",
"bytes": "311"
},
{
"name": "JavaScript",
"bytes": "60380"
},
{
"name": "Makefile",
"bytes": "12428"
},
{
"name": "Mathematica",
"bytes": "964"
},
{
"name": "Matlab",
"bytes": "820"
},
{
"name": "Mercury",
"bytes": "26238794"
},
{
"name": "PHP",
"bytes": "92905"
},
{
"name": "Python",
"bytes": "5466696"
},
{
"name": "Shell",
"bytes": "2984"
}
],
"symlink_target": ""
} |
"""Tests for Pruning callbacks."""
import os
import tempfile
import numpy as np
import tensorflow as tf
# TODO(b/139939526): move to public API.
from tensorflow_model_optimization.python.core.keras import test_utils as keras_test_utils
from tensorflow_model_optimization.python.core.sparsity.keras import prune
from tensorflow_model_optimization.python.core.sparsity.keras import pruning_callbacks
keras = tf.keras
errors_impl = tf.errors
class PruneCallbacksTest(tf.test.TestCase):
_BATCH_SIZE = 20
def _assertLogsExist(self, log_dir):
self.assertNotEmpty(os.listdir(log_dir))
def _pruned_model_setup(self, custom_training_loop=False):
pruned_model = prune.prune_low_magnitude(
keras_test_utils.build_simple_dense_model())
x_train = np.random.rand(self._BATCH_SIZE, 10)
y_train = keras.utils.to_categorical(
np.random.randint(5, size=(self._BATCH_SIZE, 1)), 5)
loss = keras.losses.categorical_crossentropy
optimizer = keras.optimizers.SGD()
if custom_training_loop:
return pruned_model, loss, optimizer, x_train, y_train
else:
pruned_model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
return pruned_model, x_train, y_train
def testUpdatePruningStepsAndLogsSummaries(self):
log_dir = tempfile.mkdtemp()
pruned_model, x_train, y_train = self._pruned_model_setup()
pruned_model.fit(
x_train,
y_train,
batch_size=self._BATCH_SIZE,
epochs=3,
callbacks=[
pruning_callbacks.UpdatePruningStep(),
pruning_callbacks.PruningSummaries(log_dir=log_dir)
])
self.assertEqual(
3, tf.keras.backend.get_value(pruned_model.layers[0].pruning_step))
self.assertEqual(
3, tf.keras.backend.get_value(pruned_model.layers[1].pruning_step))
self._assertLogsExist(log_dir)
# This style of custom training loop isn't available in graph mode.
def testUpdatePruningStepsAndLogsSummaries_CustomTrainingLoop(self):
log_dir = tempfile.mkdtemp()
pruned_model, loss, optimizer, x_train, y_train = self._pruned_model_setup(
custom_training_loop=True)
unused_arg = -1
step_callback = pruning_callbacks.UpdatePruningStep()
log_callback = pruning_callbacks.PruningSummaries(log_dir=log_dir)
# TODO(tfmot): we need a separate API for custom training loops
# that doesn't rely on users setting the model and optimizer.
#
# Example is currently based on callbacks.py configure_callbacks
# and model.compile internals.
step_callback.set_model(pruned_model)
log_callback.set_model(pruned_model)
pruned_model.optimizer = optimizer
step_callback.on_train_begin()
for _ in range(3):
log_callback.on_epoch_begin(epoch=unused_arg)
# only one batch given batch_size = 20 and input shape.
step_callback.on_train_batch_begin(batch=unused_arg)
inp = np.reshape(x_train,
[self._BATCH_SIZE, 10]) # original shape: from [10].
with tf.GradientTape() as tape:
logits = pruned_model(inp, training=True)
loss_value = loss(y_train, logits)
grads = tape.gradient(loss_value, pruned_model.trainable_variables)
optimizer.apply_gradients(zip(grads, pruned_model.trainable_variables))
step_callback.on_epoch_end(batch=unused_arg)
self.assertEqual(
3, tf.keras.backend.get_value(pruned_model.layers[0].pruning_step))
self.assertEqual(
3, tf.keras.backend.get_value(pruned_model.layers[1].pruning_step))
self._assertLogsExist(log_dir)
def testUpdatePruningStepsAndLogsSummaries_RunInference(self):
pruned_model, _, _, x_train, _ = self._pruned_model_setup(
custom_training_loop=True)
model_output = pruned_model(x_train)
del model_output
self.assertEqual(
-1, tf.keras.backend.get_value(pruned_model.layers[0].pruning_step))
self.assertEqual(
-1, tf.keras.backend.get_value(pruned_model.layers[1].pruning_step))
def testPruneTrainingRaisesError_PruningStepCallbackMissing(self):
pruned_model, x_train, y_train = self._pruned_model_setup()
# Throws an error since UpdatePruningStep is missing.
with self.assertRaises(errors_impl.InvalidArgumentError):
pruned_model.fit(x_train, y_train)
# This style of custom training loop isn't available in graph mode.
def testPruneTrainingLoopRaisesError_PruningStepCallbackMissing_CustomTrainingLoop(
self):
pruned_model, _, _, x_train, _ = self._pruned_model_setup(
custom_training_loop=True)
# Throws an error since UpdatePruningStep is missing.
with self.assertRaises(errors_impl.InvalidArgumentError):
inp = np.reshape(x_train, [self._BATCH_SIZE, 10]) # original shape: [10].
with tf.GradientTape():
pruned_model(inp, training=True)
def testPruningSummariesRaisesError_LogDirNotNonEmptyString(self):
with self.assertRaises(ValueError):
pruning_callbacks.PruningSummaries(log_dir='')
with self.assertRaises(ValueError):
pruning_callbacks.PruningSummaries(log_dir=None)
with self.assertRaises(ValueError):
pruning_callbacks.PruningSummaries(log_dir=object())
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "72d6ab8cd4141df041a8184f21be93ba",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 90,
"avg_line_length": 36.71328671328671,
"alnum_prop": 0.7022857142857143,
"repo_name": "tensorflow/model-optimization",
"id": "31106100d935770a9a960ab72604e81700984462",
"size": "5939",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_model_optimization/python/core/sparsity/keras/pruning_callbacks_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1770"
},
{
"name": "Jupyter Notebook",
"bytes": "285964"
},
{
"name": "Python",
"bytes": "1700675"
},
{
"name": "Shell",
"bytes": "8525"
},
{
"name": "Starlark",
"bytes": "84517"
}
],
"symlink_target": ""
} |
"""Run all tests."""
from __future__ import absolute_import, division, print_function, unicode_literals
from os import path
import sys
import unittest
from six import PY3
if not PY3:
try:
from mock import __version__
except ImportError:
print('Please run `pip install mock`')
raise
# Allow to run from console as ./tests/run.py
BASEPATH = path.abspath(path.join(path.dirname(__file__), '..'))
if BASEPATH not in sys.path:
sys.path.append(BASEPATH)
def main():
"""Main function to run as shell script."""
loader = unittest.TestLoader()
suite = loader.discover(path.abspath(path.dirname(__file__)), pattern='test_*.py')
runner = unittest.TextTestRunner(buffer=True)
runner.run(suite)
if __name__ == '__main__':
main()
| {
"content_hash": "689b710713c2fe21e14fd2c40b020bc8",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 86,
"avg_line_length": 24.5,
"alnum_prop": 0.6594387755102041,
"repo_name": "frost-nzcr4/find_forks",
"id": "364b6a31163b03ba2ba020ed92632c6aa47de558",
"size": "822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/run.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16725"
},
{
"name": "Shell",
"bytes": "671"
}
],
"symlink_target": ""
} |
'''
Use parameters from Diskfit in the Galaxy class
'''
from galaxies import Galaxy
from astropy.table import Table
import astropy.units as u
from cube_analysis.rotation_curves import update_galaxy_params
from paths import fourteenB_HI_data_path, fourteenB_HI_data_wGBT_path
# The models from the peak velocity aren't as biased, based on comparing
# the VLA and VLA+GBT velocity curves. Using these as the defaults
folder_name = "diskfit_peakvels_noasymm_noradial_nowarp_output"
param_name = \
fourteenB_HI_data_path("{}/rad.out.params.csv".format(folder_name))
param_table = Table.read(param_name)
gal = Galaxy("M33")
update_galaxy_params(gal, param_table)
# Load in the model from the feathered data as well.
folder_name = "diskfit_peakvels_noasymm_noradial_nowarp_output"
param_name = \
fourteenB_HI_data_wGBT_path("{}/rad.out.params.csv".format(folder_name))
param_table = Table.read(param_name)
gal_feath = Galaxy("M33")
update_galaxy_params(gal_feath, param_table)
# Force 840 kpc for the distance
gal.distance = 840 * u.kpc
gal_feath.distance = 840 * u.kpc
| {
"content_hash": "9a43c060940db16e3292ca25368ebaa1",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 76,
"avg_line_length": 25.904761904761905,
"alnum_prop": 0.75,
"repo_name": "e-koch/VLA_Lband",
"id": "c831661a4478f0ae697ba155875ba48e79c5b6e8",
"size": "1089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "galaxy_params.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2740022"
},
{
"name": "Shell",
"bytes": "98570"
}
],
"symlink_target": ""
} |
import requests
from bs4 import BeautifulSoup
import collections
import validation
def get_api_key(admin_domain, username, password):
endpoint_string = 'http://' + admin_domain + '/api/1/get.asmx/GetAPIKey'
payload = dict(username=username, password=password)
valid_call, soup = validation.soup_validation(endpoint_string,payload)
if valid_call == True:
api_key = soup.find('string').get_text()
else:
api_key = ''
return api_key
| {
"content_hash": "413aa02168d5f2dfd1bc89cc5c1639c9",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 76,
"avg_line_length": 25,
"alnum_prop": 0.6863157894736842,
"repo_name": "cake-tools/ConversionExportTool",
"id": "62f2159edae91112865caf05bd6997c6d2a9e88c",
"size": "475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ckapi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1065"
},
{
"name": "HTML",
"bytes": "4752"
},
{
"name": "PHP",
"bytes": "172065"
},
{
"name": "Python",
"bytes": "30573"
}
],
"symlink_target": ""
} |
'''
网易云音乐 Menu
'''
import curses
import locale
import threading
import sys
import os
import time
import webbrowser
import platform
from api import NetEase
from player import Player
from ui import Ui
from const import Constant
from config import Config
import logger
import signal
from storage import Storage
from cache import Cache
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
log = logger.getLogger(__name__)
try:
import keybinder
bind_global = True
except ImportError:
bind_global = False
log.warn("keybinder module not installed.")
log.warn("Not binding global hotkeys.")
home = os.path.expanduser("~")
if os.path.isdir(Constant.conf_dir) is False:
os.mkdir(Constant.conf_dir)
locale.setlocale(locale.LC_ALL, "")
code = locale.getpreferredencoding()
# carousel x in [left, right]
carousel = lambda left, right, x: left if (x > right) else (right if x < left else x)
shortcut = [
['j', 'Down ', '下移'],
['k', 'Up ', '上移'],
['h', 'Back ', '后退'],
['l', 'Forward ', '前进'],
['u', 'Prev page ', '上一页'],
['d', 'Next page ', '下一页'],
['f', 'Search ', '快速搜索'],
['[', 'Prev song ', '上一曲'],
[']', 'Next song ', '下一曲'],
[' ', 'Play/Pause', '播放/暂停'],
['?', 'Shuffle ', '手气不错'],
['=', 'Volume+ ', '音量增加'],
['-', 'Volume- ', '音量减少'],
['m', 'Menu ', '主菜单'],
['p', 'Present/History ', '当前/历史播放列表'],
["i", 'Music Info ', '当前音乐信息'],
['Shift+p', 'Playing Mode ', '播放模式切换'],
['a', 'Add ', '添加曲目到打碟'],
['z', 'DJ list ', '打碟列表'],
['s', 'Star ', '添加到收藏'],
['c', 'Collection', '收藏列表'],
['r', 'Remove ', '删除当前条目'],
['Shift+j', 'Move Down ', '向下移动当前条目'],
['Shift+k', 'Move Up ', '向上移动当前条目'],
[',', 'Like ', '喜爱'],
['Shfit+c', 'Cache ', '缓存歌曲到本地'],
['.', 'Trash FM ', '删除 FM'],
['/', 'Next FM ', '下一 FM'],
['q', 'Quit ', '退出'],
["w", 'Quit&Clear', '退出并清除用户信息']
]
class Menu:
def __init__(self):
reload(sys)
sys.setdefaultencoding('UTF-8')
self.config = Config()
self.datatype = 'main'
self.title = '网易云音乐'
self.datalist = ['排行榜', '艺术家', '新碟上架', '精选歌单', '我的歌单', 'DJ节目', '每日推荐', '私人FM', '搜索', '帮助']
self.offset = 0
self.index = 0
self.storage = Storage()
self.storage.load()
self.collection = self.storage.database['collections'][0]
self.player = Player()
self.player.playing_song_changed_callback = self.song_changed_callback
self.cache = Cache()
self.ui = Ui()
self.netease = NetEase()
self.screen = curses.initscr()
self.screen.keypad(1)
self.step = 10
self.stack = []
self.djstack = []
self.userid = self.storage.database["user"]["user_id"]
self.username = self.storage.database["user"]["nickname"]
self.resume_play = True
self.at_playing_list = False
signal.signal(signal.SIGWINCH, self.change_term)
signal.signal(signal.SIGINT, self.send_kill)
self.START = time.time()
def notify(self, msg, type):
if type == 0:
if platform.system() == 'Darwin':
os.system('/usr/bin/osascript -e \'display notification "' + msg + '"\'')
else:
os.system('/usr/bin/notify-send "' + msg + '"')
else:
if platform.system() == 'Darwin':
os.system('/usr/bin/osascript -e \'display notification "' + msg + '"sound name "/System/Library/Sounds/Ping.aiff"\'')
else:
os.system('/usr/bin/notify-send "' + msg + '"')
def change_term(self, signum, frame):
self.ui.screen.clear()
self.ui.screen.refresh()
def send_kill(self, signum, fram):
self.player.stop()
self.cache.quit()
self.storage.save()
curses.endwin()
sys.exit()
def update_alert(self, version):
latest = Menu().check_version()
if latest != version and latest != 0:
self.notify("MusicBox Update is available", 1)
time.sleep(0.5)
self.notify("NetEase-MusicBox installed version:" + version + "\nNetEase-MusicBox latest version:" + latest, 0)
def check_version(self):
# 检查更新 && 签到
try:
mobilesignin = self.netease.daily_signin(0)
if mobilesignin != -1 and mobilesignin['code'] != -2:
self.notify("Mobile signin success", 1)
time.sleep(0.5)
pcsignin = self.netease.daily_signin(1)
if pcsignin != -1 and pcsignin['code'] != -2:
self.notify("PC signin success", 1)
tree = ET.ElementTree(ET.fromstring(str(self.netease.get_version())))
root = tree.getroot()
return root[0][4][0][0].text
except:
return 0
def start_fork(self, version):
pid = os.fork()
if pid == 0:
Menu().update_alert(version)
else:
Menu().start()
def play_pause(self):
if len(self.storage.database["player_info"]["player_list"]) == 0:
return
if self.player.pause_flag:
self.player.resume()
else:
self.player.pause()
time.sleep(0.1)
def next_song(self):
if len(self.storage.database["player_info"]["player_list"]) == 0:
return
self.player.next()
time.sleep(0.1)
def previous_song(self):
if len(self.storage.database["player_info"]["player_list"]) == 0:
return
self.player.prev()
time.sleep(0.1)
def start(self):
self.START = time.time() // 1
self.ui.build_menu(self.datatype, self.title, self.datalist, self.offset, self.index, self.step, self.START)
self.ui.build_process_bar(self.player.process_location, self.player.process_length, self.player.playing_flag,
self.player.pause_flag, self.storage.database['player_info']['playing_mode'])
self.stack.append([self.datatype, self.title, self.datalist, self.offset, self.index])
if bind_global:
keybinder.bind(self.config.get_item("global_play_pause"), self.play_pause)
keybinder.bind(self.config.get_item("global_next"), self.next_song)
keybinder.bind(self.config.get_item("global_previous"), self.previous_song)
while True:
datatype = self.datatype
title = self.title
datalist = self.datalist
offset = self.offset
idx = index = self.index
step = self.step
stack = self.stack
djstack = self.djstack
self.screen.timeout(500)
key = self.screen.getch()
if bind_global:
keybinder.gtk.main_iteration(False)
self.ui.screen.refresh()
# term resize
if key == -1:
self.ui.update_size()
self.player.update_size()
# 退出
if key == ord('q'):
break
# 退出并清除用户信息
if key == ord('w'):
self.storage.database['user'] = {
"username": "",
"password": "",
"user_id": "",
"nickname": "",
}
try:
os.remove(self.storage.cookie_path)
except:
break
break
# 上移
elif key == ord('k'):
# turn page if at beginning
if idx == offset:
if offset == 0:
continue
self.offset -= step
# 移动光标到最后一列
self.index = offset - 1
else:
self.index = carousel(offset, min(len(datalist), offset + step) - 1, idx - 1)
self.START = time.time()
# 下移
elif key == ord('j'):
# turn page if at end
if idx == min(len(datalist), offset + step) - 1:
if offset + step >= len(datalist):
continue
self.offset += step
# 移动光标到第一列
self.index = offset + step
else:
self.index = carousel(offset, min(len(datalist), offset + step) - 1, idx + 1)
self.START = time.time()
# 数字快捷键
elif ord('0') <= key <= ord('9'):
if self.datatype == 'songs' or self.datatype == 'djchannels' or self.datatype == 'help':
continue
idx = key - ord('0')
self.ui.build_menu(self.datatype, self.title, self.datalist, self.offset, idx, self.step, self.START)
self.ui.build_loading()
self.dispatch_enter(idx)
self.index = 0
self.offset = 0
# 向上翻页
elif key == ord('u'):
if offset == 0:
continue
self.START = time.time()
self.offset -= step
# e.g. 23 - 10 = 13 --> 10
self.index = (index - step) // step * step
# 向下翻页
elif key == ord('d'):
if offset + step >= len(datalist):
continue
self.START = time.time()
self.offset += step
# e.g. 23 + 10 = 33 --> 30
self.index = (index + step) // step * step
# 前进
elif key == ord('l') or key == 10:
if self.datatype == 'songs' or self.datatype == 'djchannels' or self.datatype == 'help' or len(self.datalist) <= 0:
continue
self.START = time.time()
self.ui.build_loading()
self.dispatch_enter(idx)
self.index = 0
self.offset = 0
# 回退
elif key == ord('h'):
# if not main menu
if len(self.stack) == 1:
continue
self.START = time.time()
up = stack.pop()
self.datatype = up[0]
self.title = up[1]
self.datalist = up[2]
self.offset = up[3]
self.index = up[4]
self.at_playing_list = False
# 搜索
elif key == ord('f'):
# 8 is the 'search' menu
self.dispatch_enter(8)
# 播放下一曲
elif key == ord(']'):
self.next_song()
# 播放上一曲
elif key == ord('['):
self.previous_song()
# 增加音量
elif key == ord('='):
self.player.volume_up()
# 减少音量
elif key == ord('-'):
self.player.volume_down()
# 随机播放
elif key == ord('?'):
if len(self.storage.database["player_info"]["player_list"]) == 0:
continue
self.player.shuffle()
time.sleep(0.1)
# 喜爱
elif key == ord(','):
return_data = self.request_api(self.netease.fm_like, self.player.get_playing_id())
if return_data != -1:
self.notify("Added successfully!", 0)
else:
self.notify("Existing song!", 0)
# 删除FM
elif key == ord('.'):
if self.datatype == 'fmsongs':
if len(self.storage.database["player_info"]["player_list"]) == 0:
continue
self.player.next()
return_data = self.request_api(self.netease.fm_trash, self.player.get_playing_id())
if return_data != -1:
self.notify("Deleted successfully!", 0)
time.sleep(0.1)
# 下一FM
elif key == ord('/'):
if self.datatype == 'fmsongs':
if len(self.storage.database["player_info"]["player_list"]) == 0:
continue
self.player.next()
time.sleep(0.1)
# 播放、暂停
elif key == ord(' '):
# If not open a new playing list, just play and pause.
try:
if self.datalist[idx]['song_id'] == self.player.playing_id:
self.player.play_and_pause(self.storage.database['player_info']['idx'])
time.sleep(0.1)
continue
except:
pass
# If change to a new playing list. Add playing list and play.
if datatype == 'songs':
self.resume_play = False
self.player.new_player_list('songs', self.title, self.datalist, -1)
self.player.end_callback = None
self.player.play_and_pause(idx)
self.at_playing_list = True
elif datatype == 'djchannels':
self.resume_play = False
self.player.new_player_list('djchannels', self.title, self.datalist, -1)
self.player.end_callback = None
self.player.play_and_pause(idx)
self.at_playing_list = True
elif datatype == 'fmsongs':
self.resume_play = False
self.storage.database['player_info']['playing_mode'] = 0
self.player.new_player_list('fmsongs', self.title, self.datalist, -1)
self.player.end_callback = self.fm_callback
self.player.play_and_pause(idx)
self.at_playing_list = True
else:
self.player.play_and_pause(self.storage.database['player_info']['idx'])
time.sleep(0.1)
# 加载当前播放列表
elif key == ord('p'):
self.show_playing_song()
# 播放模式切换
elif key == ord('P'):
self.storage.database['player_info']['playing_mode'] = \
(self.storage.database['player_info']['playing_mode'] + 1) % 5
# 添加到打碟歌单
elif key == ord('a'):
if datatype == 'songs' and len(datalist) != 0:
self.djstack.append(datalist[idx])
elif datatype == 'artists':
pass
# 加载打碟歌单
elif key == ord('z'):
self.stack.append([datatype, title, datalist, offset, index])
self.datatype = 'songs'
self.title = '网易云音乐 > 打碟'
self.datalist = self.djstack
self.offset = 0
self.index = 0
# 添加到收藏歌曲
elif key == ord('s'):
if (datatype == 'songs' or datatype == 'djchannels') and len(datalist) != 0:
self.collection.append(datalist[idx])
self.notify("Added successfully", 0)
# 加载收藏歌曲
elif key == ord('c'):
self.stack.append([datatype, title, datalist, offset, index])
self.datatype = 'songs'
self.title = '网易云音乐 > 收藏'
self.datalist = self.collection
self.offset = 0
self.index = 0
# 从当前列表移除
elif key == ord('r'):
if (datatype == 'songs' or datatype == 'djchannels') and len(datalist) != 0:
self.datalist.pop(idx)
self.index = carousel(offset, min(len(datalist), offset + step) - 1, idx)
# 当前项目下移
elif key == ord("J"):
if datatype != 'main' and len(datalist) != 0 and idx + 1 != len(self.datalist):
self.START = time.time()
song = self.datalist.pop(idx)
self.datalist.insert(idx + 1, song)
self.index = idx + 1
# 翻页
if self.index >= offset + step:
self.offset = offset + step
# 当前项目上移
elif key == ord("K"):
if datatype != 'main' and len(datalist) != 0 and idx != 0:
self.START = time.time()
song = self.datalist.pop(idx)
self.datalist.insert(idx - 1, song)
self.index = idx - 1
# 翻页
if self.index < offset:
self.offset = offset - step
elif key == ord('m'):
if datatype != 'main':
self.stack.append([datatype, title, datalist, offset, index])
self.datatype = self.stack[0][0]
self.title = self.stack[0][1]
self.datalist = self.stack[0][2]
self.offset = 0
self.index = 0
elif key == ord('g'):
if datatype == 'help':
webbrowser.open_new_tab('https://github.com/darknessomi/musicbox')
# 开始下载
elif key == ord("C"):
s = self.datalist[idx]
cache_thread = threading.Thread(target=self.player.cacheSong1time, args=(
s['song_id'], s['song_name'], s['artist'], s['mp3_url']))
cache_thread.start()
elif key == ord('i'):
if self.player.playing_id != -1:
webbrowser.open_new_tab('http://music.163.com/#/song?id=' + str(self.player.playing_id))
self.ui.build_process_bar(self.player.process_location, self.player.process_length,
self.player.playing_flag,
self.player.pause_flag, self.storage.database['player_info']['playing_mode'])
self.ui.build_menu(self.datatype, self.title, self.datalist, self.offset, self.index, self.step, self.START)
self.player.stop()
self.cache.quit()
self.storage.save()
curses.endwin()
def dispatch_enter(self, idx):
# The end of stack
netease = self.netease
datatype = self.datatype
title = self.title
datalist = self.datalist
offset = self.offset
index = self.index
self.stack.append([datatype, title, datalist, offset, index])
if idx > len(self.datalist):
return False
if datatype == 'main':
self.choice_channel(idx)
# 该艺术家的热门歌曲
elif datatype == 'artists':
artist_id = datalist[idx]['artist_id']
songs = netease.artists(artist_id)
self.datatype = 'songs'
self.datalist = netease.dig_info(songs, 'songs')
self.title += ' > ' + datalist[idx]['artists_name']
# 该专辑包含的歌曲
elif datatype == 'albums':
album_id = datalist[idx]['album_id']
songs = netease.album(album_id)
self.datatype = 'songs'
self.datalist = netease.dig_info(songs, 'songs')
self.title += ' > ' + datalist[idx]['albums_name']
# 精选歌单选项
elif datatype == 'playlists':
data = self.datalist[idx]
self.datatype = data['datatype']
self.datalist = netease.dig_info(data['callback'](), self.datatype)
self.title += ' > ' + data['title']
# 全站置顶歌单包含的歌曲
elif datatype == 'top_playlists':
log.debug(datalist)
playlist_id = datalist[idx]['playlist_id']
songs = netease.playlist_detail(playlist_id)
self.datatype = 'songs'
self.datalist = netease.dig_info(songs, 'songs')
self.title += ' > ' + datalist[idx]['playlists_name']
# 分类精选
elif datatype == 'playlist_classes':
# 分类名称
data = self.datalist[idx]
self.datatype = 'playlist_class_detail'
self.datalist = netease.dig_info(data, self.datatype)
self.title += ' > ' + data
log.debug(self.datalist)
# 某一分类的详情
elif datatype == 'playlist_class_detail':
# 子类别
data = self.datalist[idx]
self.datatype = 'top_playlists'
self.datalist = netease.dig_info(netease.top_playlists(data), self.datatype)
log.debug(self.datalist)
self.title += ' > ' + data
# 歌曲榜单
elif datatype == 'toplists':
songs = netease.top_songlist(idx)
self.title += ' > ' + self.datalist[idx]
self.datalist = netease.dig_info(songs, 'songs')
self.datatype = 'songs'
# 搜索菜单
elif datatype == 'search':
ui = self.ui
# no need to do stack.append, Otherwise there will be a bug when you input key 'h' to return
# if idx in range(1, 5):
# self.stack.append([self.datatype, self.title, self.datalist, self.offset, self.index])
self.index = 0
self.offset = 0
if idx == 0:
# 搜索结果可以用top_playlists处理
self.datatype = 'top_playlists'
self.datalist = ui.build_search('search_playlist')
self.title = '精选歌单搜索列表'
elif idx == 1:
self.datatype = 'songs'
self.datalist = ui.build_search('songs')
self.title = '歌曲搜索列表'
elif idx == 2:
self.datatype = 'artists'
self.datalist = ui.build_search('artists')
self.title = '艺术家搜索列表'
elif idx == 3:
self.datatype = 'albums'
self.datalist = ui.build_search('albums')
self.title = '专辑搜索列表'
def show_playing_song(self):
if len(self.storage.database['player_info']['player_list']) == 0:
return
if not self.at_playing_list:
self.stack.append([self.datatype, self.title, self.datalist, self.offset, self.index])
self.at_playing_list = True
self.datatype = self.storage.database['player_info']['player_list_type']
self.title = self.storage.database['player_info']['player_list_title']
self.datalist = []
for i in self.storage.database['player_info']['player_list']:
self.datalist.append(self.storage.database['songs'][i])
self.index = self.storage.database['player_info']['idx']
self.offset = self.storage.database['player_info']['idx'] / self.step * self.step
if self.resume_play:
if self.datatype == "fmsongs":
self.player.end_callback = self.fm_callback
else:
self.player.end_callback = None
self.storage.database['player_info']['idx'] = -1
self.player.play_and_pause(self.index)
self.resume_play = False
def song_changed_callback(self):
if self.at_playing_list:
self.show_playing_song()
def fm_callback(self):
log.debug("FM CallBack.")
data = self.get_new_fm()
self.player.append_songs(data)
if self.datatype == 'fmsongs':
if len(self.storage.database['player_info']['player_list']) == 0:
return
self.datatype = self.storage.database['player_info']['player_list_type']
self.title = self.storage.database['player_info']['player_list_title']
self.datalist = []
for i in self.storage.database['player_info']['player_list']:
self.datalist.append(self.storage.database['songs'][i])
self.index = self.storage.database['player_info']['idx']
self.offset = self.storage.database['player_info']['idx'] / self.step * self.step
def request_api(self, func, *args):
if self.storage.database['user']['user_id'] != "":
result = func(*args)
if result != -1:
return result
log.debug("Re Login.")
user_info = {}
if self.storage.database['user']['username'] != "":
user_info = self.netease.login(self.storage.database['user']['username'],
self.storage.database['user']['password'])
if self.storage.database['user']['username'] == "" or user_info['code'] != 200:
data = self.ui.build_login()
# 取消登录
if data == -1:
return -1
user_info = data[0]
self.storage.database['user']['username'] = data[1][0]
self.storage.database['user']['password'] = data[1][1]
self.storage.database['user']['user_id'] = user_info['account']['id']
self.storage.database['user']['nickname'] = user_info['profile']['nickname']
self.userid = self.storage.database["user"]["user_id"]
self.username = self.storage.database["user"]["nickname"]
return func(*args)
def get_new_fm(self):
myplaylist = []
for count in range(0, 1):
data = self.request_api(self.netease.personal_fm)
if data == -1:
break
myplaylist += data
time.sleep(0.2)
return self.netease.dig_info(myplaylist, "fmsongs")
def choice_channel(self, idx):
# 排行榜
netease = self.netease
if idx == 0:
self.datalist = netease.return_toplists()
self.title += ' > 排行榜'
self.datatype = 'toplists'
# 艺术家
elif idx == 1:
artists = netease.top_artists()
self.datalist = netease.dig_info(artists, 'artists')
self.title += ' > 艺术家'
self.datatype = 'artists'
# 新碟上架
elif idx == 2:
albums = netease.new_albums()
self.datalist = netease.dig_info(albums, 'albums')
self.title += ' > 新碟上架'
self.datatype = 'albums'
# 精选歌单
elif idx == 3:
self.datalist = [
{
'title': '全站置顶',
'datatype': 'top_playlists',
'callback': netease.top_playlists
},
{
'title': '分类精选',
'datatype': 'playlist_classes',
'callback': netease.playlist_classes
}
]
self.title += ' > 精选歌单'
self.datatype = 'playlists'
# 我的歌单
elif idx == 4:
myplaylist = self.request_api(self.netease.user_playlist, self.userid)
if myplaylist == -1:
return
self.datatype = 'top_playlists'
self.datalist = netease.dig_info(myplaylist, self.datatype)
self.title += ' > ' + self.username + ' 的歌单'
# DJ节目
elif idx == 5:
self.datatype = 'djchannels'
self.title += ' > DJ节目'
self.datalist = netease.djchannels()
# 每日推荐
elif idx == 6:
self.datatype = 'songs'
self.title += ' > 每日推荐'
myplaylist = self.request_api(self.netease.recommend_playlist)
if myplaylist == -1:
return
self.datalist = self.netease.dig_info(myplaylist, self.datatype)
# 私人FM
elif idx == 7:
self.datatype = 'fmsongs'
self.title += ' > 私人FM'
self.datalist = self.get_new_fm()
# 搜索
elif idx == 8:
self.datatype = 'search'
self.title += ' > 搜索'
self.datalist = ['歌曲', '艺术家', '专辑', '网易精选集']
# 帮助
elif idx == 9:
self.datatype = 'help'
self.title += ' > 帮助'
self.datalist = shortcut
self.offset = 0
self.index = 0
| {
"content_hash": "a1a1d6889199b4a77b594ad684a0241d",
"timestamp": "",
"source": "github",
"line_count": 765,
"max_line_length": 134,
"avg_line_length": 36.840522875816994,
"alnum_prop": 0.48444097505588474,
"repo_name": "Ma233/musicbox",
"id": "de97be3f19d61d614f2d5a27691f87355cad47b5",
"size": "29385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NEMbox/menu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "109672"
}
],
"symlink_target": ""
} |
print "Hello World!"
# print "Hello Again"
print "I like typing this."
print "This is fun."
print 'Yay! Printing.'
print "I'd much rather you 'not'."
print 'I "said" do not touch this.'
print "I \"said\" do not touch this." | {
"content_hash": "dbd50e38516b43167b90e850133830e4",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 37,
"avg_line_length": 27.875,
"alnum_prop": 0.6771300448430493,
"repo_name": "pratikdesai/learnpython",
"id": "4e808f523c6d0ae9388fcef2c28de43e0b639624",
"size": "223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5755"
}
],
"symlink_target": ""
} |
"""
Copyright 2015 Reverb Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class BuildExecution(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'buildSetContentId': 'str',
'topContentId': 'str',
'projectName': 'str',
'buildExecutionType': 'BuildExecutionType',
'buildContentId': 'str'
}
self.attributeMap = {
'buildSetContentId': 'buildSetContentId',
'topContentId': 'topContentId',
'projectName': 'projectName',
'buildExecutionType': 'buildExecutionType',
'buildContentId': 'buildContentId'
}
self.buildSetContentId = None # str
self.topContentId = None # str
self.projectName = None # str
self.buildExecutionType = None # BuildExecutionType
self.buildContentId = None # str
| {
"content_hash": "ec29f5d1449b1e471670d6b961fe278e",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 97,
"avg_line_length": 27.746666666666666,
"alnum_prop": 0.5444497837578087,
"repo_name": "ahmedlawi92/pnc-cli",
"id": "f0f244d56511941d02c7bd6c47487bdf74e32816",
"size": "2103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pnc/client/models/BuildExecution.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "211446"
}
],
"symlink_target": ""
} |
import argparse
import json
import multiprocessing
import os
import re
import subprocess
import sys
import threading
from pathlib import Path
import queue as queue
def get_format_invocation(f, cmake_format_binary):
"""Gets a command line for cmake-format."""
start = [cmake_format_binary, "-i", f]
return start
def run_format(args, file_queue, lock, return_codes):
"""Takes filenames out of queue and runs clang-format on them."""
while True:
name = file_queue.get()
invocation = get_format_invocation(name, args.cmake_format_binary, args.check)
proc = subprocess.Popen(
invocation, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
output, err = proc.communicate()
with lock:
return_codes.append(proc.returncode)
sys.stdout.write(" ".join(invocation) + "\n" + output.decode("utf-8"))
if len(err) > 0:
sys.stdout.flush()
sys.stderr.write(err.decode("utf-8"))
file_queue.task_done()
def main():
parser = argparse.ArgumentParser(
description="Runs cmake-format over all CMakeLists.txt"
)
parser.add_argument(
"-cmake-format-binary",
metavar="PATH",
default="cmake-format",
help="path to cmake-format binary",
)
parser.add_argument(
"-i", action="store_true", help="Inplace edit <file>s, if specified"
)
parser.add_argument(
"-j",
type=int,
default=0,
help="number of format instances to be run in parallel.",
)
parser.add_argument(
"files",
nargs="*",
default=[".*"],
help="files to be processed (regex on path)",
)
parser.add_argument(
"-p",
dest="build_path",
help="Path used to read a compile command database.",
)
parser.add_argument(
"--check",
action="store_true",
help="Exit with status code 0 if formatting would not change "
"file contents, or status code 1 if it would",
)
args = parser.parse_args()
cwd = Path.cwd()
# Build up a big regexy filter from all command line arguments.
file_name_re = re.compile("|".join(args.files))
files = []
for file in cwd.glob("**/CMakeLists.txt"):
relative_file = file.relative_to(cwd)
if file_name_re.search(str(relative_file)):
files.append(str(file))
print(relative_file)
# print(file)
max_task = args.j
if max_task == 0:
max_task = multiprocessing.cpu_count()
return_codes = []
try:
# Spin up a bunch of tidy-launching threads.
task_queue = queue.Queue(max_task)
# List of files with a non-zero return code.
lock = threading.Lock()
for _ in range(max_task):
t = threading.Thread(
target=run_format, args=(args, task_queue, lock, return_codes)
)
t.daemon = True
t.start()
# Fill the queue with files.
for name in files:
task_queue.put(name)
# Wait for all threads to be done.
task_queue.join()
except KeyboardInterrupt:
# This is a sad hack. Unfortunately subprocess goes
# bonkers with ctrl-c and we start forking merrily.
print("\nCtrl-C detected, goodbye.")
os.kill(0, 9)
for return_code in return_codes:
if return_code != 0:
sys.exit(return_code)
sys.exit(0)
if __name__ == "__main__":
main()
| {
"content_hash": "c84e0294cf16731ded20da507b19a626",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 86,
"avg_line_length": 28.007874015748033,
"alnum_prop": 0.5805454034298566,
"repo_name": "RoboJackets/robocup-software",
"id": "400d15b65b11995e16de0403815601e0feb90e28",
"size": "3580",
"binary": false,
"copies": "1",
"ref": "refs/heads/ros2",
"path": "util/run-cmake-format.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1174090"
},
{
"name": "CMake",
"bytes": "62217"
},
{
"name": "Dockerfile",
"bytes": "2286"
},
{
"name": "MATLAB",
"bytes": "18772"
},
{
"name": "Makefile",
"bytes": "9066"
},
{
"name": "Python",
"bytes": "334582"
},
{
"name": "Shell",
"bytes": "21042"
}
],
"symlink_target": ""
} |
import logging
import sqlite3
import logger
import problemdb
import teamdb
from api import api
from flask import Flask, jsonify, make_response, render_template, session, redirect, url_for
from flask_limiter import Limiter
from decorators import admins_only, redirect_if_not_logged_in
app = Flask(__name__)
limiter = Limiter(app)
@app.errorhandler(429)
def error_handler(optional_argument=""):
if "tid" in session:
logger.log("spam", logger.CRITICAL, "%s is using the api too quickly!", session["tid"])
return make_response(jsonify(message="Slow down!"), 200)
app.debug = True
app.secret_key = open(".secret_key", "r").read()
app.jinja_env.trim_blocks = True
limiter.limit("10/minute", error_message=error_handler, exempt_when=lambda: is_admin())(api)
app.register_blueprint(api)
conn = sqlite3.connect("introctf.db", check_same_thread=False)
conn.text_factory = str
@app.route('/')
def index():
return render_template("index.html", logged_in=is_logged_in(), admin=is_admin())
@app.route("/scoreboard")
def scoreboard():
scoreboard = teamdb.get_scoreboard_data()
return render_template("scoreboard.html", logged_in=is_logged_in(), admin=is_admin(), scoreboard=scoreboard)
@app.route("/problems")
@redirect_if_not_logged_in
def problems():
problems = problemdb.get_problems()
if len(problems) == 0:
return render_template("problems.html", logged_in=is_logged_in(), admin=is_admin())
team = session["tid"]
solved = teamdb.get_solves(team)
return render_template("problems.html", logged_in=is_logged_in(), admin=is_admin(), problems=problems, solved=solved)
@app.route("/login", methods=["GET"])
def login():
return render_template("login.html", logged_in=is_logged_in(), admin=is_admin())
@app.route("/register", methods=["GET"])
def register():
return render_template("register.html")
@app.route("/logout", methods=["GET"])
def logout():
session.clear()
return redirect(url_for("index"))
@app.route("/admin", methods=["GET"])
@admins_only
def admin():
problems = problemdb.get_problems()
return render_template("admin_dashboard.html", problems=problems, logged_in=is_logged_in(), admin=is_admin())
def is_logged_in():
return "logged_in" in session and session["logged_in"]
def is_admin():
return "admin" in session and session["admin"]
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
if __name__ == '__main__':
logger.initalize_logs()
app.run(host="0.0.0.0", port=5000)
| {
"content_hash": "ea0ede09265762df91955f68fe52e0c1",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 121,
"avg_line_length": 31.14814814814815,
"alnum_prop": 0.6963931827189853,
"repo_name": "james9909/IntroCTF",
"id": "e1edba96886e4fb441e3239ccf4eb4a34f2c711f",
"size": "2523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1646"
},
{
"name": "HTML",
"bytes": "21714"
},
{
"name": "JavaScript",
"bytes": "7518"
},
{
"name": "Makefile",
"bytes": "118"
},
{
"name": "PHP",
"bytes": "6995"
},
{
"name": "Python",
"bytes": "40825"
},
{
"name": "Shell",
"bytes": "4260"
}
],
"symlink_target": ""
} |
"""
Utility module to handle Java Manifest.mf files
:author: Thomas Calmant
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import contextlib
import shlex
import sys
PYTHON3 = (sys.version_info[0] == 3)
if PYTHON3:
# Python 3
import io
StringIO = io.StringIO
else:
# Python 2
import StringIO
StringIO = StringIO.StringIO
# ------------------------------------------------------------------------------
# Documentation strings format
__docformat__ = "restructuredtext en"
# Version
__version_info__ = (1, 1, 0)
__version__ = ".".join(str(x) for x in __version_info__)
# ------------------------------------------------------------------------------
# iPOJO components description key
IPOJO_COMPONENTS_KEY = 'iPOJO-Components'
# ------------------------------------------------------------------------------
class Manifest(object):
"""
Java Manifest parser
"""
def __init__(self):
"""
Sets up the parser
"""
# Manifest entries
self.entries = {}
# get() shortcut
self.get = self.entries.get
def extract_packages_list(self, manifest_key):
"""
Retrieves a list of packages and their attributes
:param manifest_key: Name of the package list in the manifest
:return: A dictionary: package -> dictionary of attributes
"""
parsed_list = {}
packages_list = self.entries.get(manifest_key, '').strip()
if packages_list:
# Use shlex to handle quotes
parser = shlex.shlex(packages_list, posix=True)
parser.whitespace = ','
parser.whitespace_split = True
for package_str in parser:
# Extract import values
package_info = package_str.strip().split(';')
name = package_info[0]
attributes = {}
for value in package_info[1:]:
if value:
attr_name, attr_value = value.split('=', 1)
if attr_name[-1] == ':':
# Remove the ':' of ':=' in some attributes
attr_name = attr_name[:-1].strip()
attributes[attr_name] = attr_value.strip()
parsed_list[name] = attributes
return parsed_list
def format(self):
"""
Formats the entries to be Manifest format compliant
"""
# First line: Manifest version
lines = [': '.join(('Manifest-Version',
self.entries.get('Manifest-Version', '1.0')))]
# Sort keys, except the version
keys = [key.strip() for key in self.entries.keys()
if key != 'Manifest-Version']
keys.sort()
# Wrap values
for key in keys:
line = ': '.join((key, self.entries[key].strip()))
lines.extend(self._wrap_line(line))
return '\n'.join(lines)
def parse(self, manifest):
"""
Parses the given Manifest file content to fill this Manifest
representation
:param manifest: The content of a Manifest file
"""
# Clear current entries
self.entries.clear()
if PYTHON3 and not isinstance(manifest, str):
# Python 3 doesn't like bytes
manifest = str(manifest, 'UTF-8')
# Read the manifest, line by line
with contextlib.closing(StringIO(manifest)) as manifest_io:
key = None
for line in manifest_io.readlines():
if key is not None and line[0] == ' ':
# Line continuation
self.entries[key] += line.strip()
else:
# Strip the line
line = line.strip()
if not line:
# Empty line
key = None
continue
# We have a key
key, value = line.split(':', 1)
# Strip values
self.entries[key] = value.strip()
@staticmethod
def _wrap_line(line):
"""
Wraps a line, Manifest style
:param line: The line to wrap
:return: The wrapped line
"""
# 70 chars for the first line
lines = [line[:70]]
# space + 69 chars for the others
chunk = line[70:]
while chunk:
lines.append(' ' + chunk[:69])
chunk = chunk[69:]
return lines
| {
"content_hash": "a7a54ff666cd9b0efc64d56b42855ee5",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 80,
"avg_line_length": 29.48,
"alnum_prop": 0.5150222911416942,
"repo_name": "isandlaTech/cohorte-devtools",
"id": "7af3b3e7bd3e5f571e0db547f1f6442b5ecd5384",
"size": "5209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "org.cohorte.eclipse.runner.basic/files/test/cohorte/repositories/java/manifest.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "151318"
},
{
"name": "HTML",
"bytes": "113064"
},
{
"name": "Java",
"bytes": "172793"
},
{
"name": "JavaScript",
"bytes": "2165497"
},
{
"name": "Python",
"bytes": "13926564"
},
{
"name": "Shell",
"bytes": "1490"
}
],
"symlink_target": ""
} |
from datetime import datetime
from itertools import izip
from stdnet.test import TestCase
from stdnet.utils import populate
from examples.models import Calendar, DateValue
NUM_DATES = 100
dates = populate('date',NUM_DATES)
values = populate('string', NUM_DATES, min_len = 10, max_len = 120)
class TestOrderedSet(TestCase):
def setUp(self):
self.orm.register(Calendar)
self.orm.register(DateValue)
ts = Calendar(name = 'MyCalendar').save()
for dt,value in izip(dates,values):
ts.add(dt,value)
ts.save()
def testOrder(self):
ts = Calendar.objects.get(name = 'MyCalendar')
self.assertEqual(ts.data.size(),NUM_DATES)
dprec = None
for event in ts.data:
if dprec:
self.assertTrue(event.dt >= dprec)
dprec = event.dt
| {
"content_hash": "d1d884cad3c097aa4cdd766c049cfe44",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 67,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.569620253164557,
"repo_name": "TheProjecter/python-stdnet",
"id": "a99696404c68d0ffcd57af96bc296e3de18ea706",
"size": "948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stdnet/tests/ordered_set.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "479"
},
{
"name": "Python",
"bytes": "203665"
}
],
"symlink_target": ""
} |
import os
import weakref
from ConfigParser import SafeConfigParser
class SingletonMixin(object):
"""
Adds a singleton behaviour to an existing class.
weakrefs are used in order to keep a low memory footprint.
As a result, args and kwargs passed to classes initializers
must be of weakly refereable types.
"""
_instances = weakref.WeakValueDictionary()
def __new__(cls, *args, **kwargs):
key = (cls, args, tuple(kwargs.items()))
if key in cls._instances:
return cls._instances[key]
new_instance = super(type(cls), cls).__new__(cls, *args, **kwargs)
cls._instances[key] = new_instance
return new_instance
class Configuration(SingletonMixin):
"""
Acts as a proxy to the ConfigParser module
"""
def __init__(self, fp, parser_dep=SafeConfigParser):
self.conf = parser_dep()
self.conf.readfp(fp)
@classmethod
def from_env(cls):
try:
filepath = os.environ['CITEDBY_SETTINGS_FILE']
except KeyError:
if __debug__:
# load the test configurations
cwd = os.path.join(os.path.dirname(__file__))
filepath = os.path.join(cwd, '..', 'config-test.ini')
else:
raise ValueError('missing env variable CITEDBY_SETTINGS_FILE')
return cls.from_file(filepath)
@classmethod
def from_file(cls, filepath):
"""
Returns an instance of Configuration
``filepath`` is a text string.
"""
fp = open(filepath, 'rb')
return cls(fp)
def __getattr__(self, attr):
return getattr(self.conf, attr)
def items(self):
"""Settings as key-value pair.
"""
return [(section, dict(self.conf.items(section))) for \
section in [section for section in self.conf.sections()]] | {
"content_hash": "61fe1ec7012c3f3af2815363f8d1eb07",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 78,
"avg_line_length": 28.666666666666668,
"alnum_prop": 0.5919661733615222,
"repo_name": "jamilatta/citedby",
"id": "750fdefd0563ccedb799e3feebca6dc0a2a0b0f2",
"size": "1892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "citedby/utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
} |
def test_resources(e2e_plan_runner):
"Test that plan works and the numbers of resources is as expected."
modules, resources = e2e_plan_runner()
assert len(modules) > 0
assert len(resources) > 0
| {
"content_hash": "cbfc27e8d29d6d918ec2bd1d6433c5d9",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 69,
"avg_line_length": 40.4,
"alnum_prop": 0.7277227722772277,
"repo_name": "GoogleCloudPlatform/cloud-foundation-fabric",
"id": "74705e423efa5a9095ab21fafe9925f445ef97dc",
"size": "778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/blueprints/factories/bigquery_factory/test_plan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "6486"
},
{
"name": "Go",
"bytes": "16234"
},
{
"name": "HCL",
"bytes": "1945131"
},
{
"name": "JavaScript",
"bytes": "9392"
},
{
"name": "PowerShell",
"bytes": "16024"
},
{
"name": "Python",
"bytes": "479385"
},
{
"name": "Shell",
"bytes": "17367"
},
{
"name": "Smarty",
"bytes": "7294"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.