commit
stringlengths
40
40
old_file
stringlengths
4
118
new_file
stringlengths
4
118
old_contents
stringlengths
0
2.94k
new_contents
stringlengths
1
4.43k
subject
stringlengths
15
444
message
stringlengths
16
3.45k
lang
stringclasses
1 value
license
stringclasses
13 values
repos
stringlengths
5
43.2k
prompt
stringlengths
17
4.58k
response
stringlengths
1
4.43k
prompt_tagged
stringlengths
58
4.62k
response_tagged
stringlengths
1
4.43k
text
stringlengths
132
7.29k
text_tagged
stringlengths
173
7.33k
89249f0b6663745172a6842291ffed9805f07f83
test/test_Table_class.py
test/test_Table_class.py
#!/usr/bin/env python # ScraperWiki Limited # Ian Hopkinson, 2013-07-30 # -*- coding: utf-8 -*- """ Tests the Table class which contains metadata """ import sys sys.path.append('code') from pdftables import get_tables from nose.tools import * def test_it_includes_page_numbers(): fh = open('fixtures/sample_data/AnimalExampleTables.pdf', 'rb') result = get_tables(fh) assert_equals(result[0].page_total, 4) assert_equals(result[0].page, 2) assert_equals(result[1].page_total, 4) assert_equals(result[1].page, 3) assert_equals(result[2].page_total, 4) assert_equals(result[2].page, 4)
Test for adding page numbers using Table class
Test for adding page numbers using Table class
Python
bsd-2-clause
davidastephens/pdftables,okfn/pdftables,okfn/pdftables,davidastephens/pdftables
Test for adding page numbers using Table class
#!/usr/bin/env python # ScraperWiki Limited # Ian Hopkinson, 2013-07-30 # -*- coding: utf-8 -*- """ Tests the Table class which contains metadata """ import sys sys.path.append('code') from pdftables import get_tables from nose.tools import * def test_it_includes_page_numbers(): fh = open('fixtures/sample_data/AnimalExampleTables.pdf', 'rb') result = get_tables(fh) assert_equals(result[0].page_total, 4) assert_equals(result[0].page, 2) assert_equals(result[1].page_total, 4) assert_equals(result[1].page, 3) assert_equals(result[2].page_total, 4) assert_equals(result[2].page, 4)
<commit_before><commit_msg>Test for adding page numbers using Table class<commit_after>
#!/usr/bin/env python # ScraperWiki Limited # Ian Hopkinson, 2013-07-30 # -*- coding: utf-8 -*- """ Tests the Table class which contains metadata """ import sys sys.path.append('code') from pdftables import get_tables from nose.tools import * def test_it_includes_page_numbers(): fh = open('fixtures/sample_data/AnimalExampleTables.pdf', 'rb') result = get_tables(fh) assert_equals(result[0].page_total, 4) assert_equals(result[0].page, 2) assert_equals(result[1].page_total, 4) assert_equals(result[1].page, 3) assert_equals(result[2].page_total, 4) assert_equals(result[2].page, 4)
Test for adding page numbers using Table class#!/usr/bin/env python # ScraperWiki Limited # Ian Hopkinson, 2013-07-30 # -*- coding: utf-8 -*- """ Tests the Table class which contains metadata """ import sys sys.path.append('code') from pdftables import get_tables from nose.tools import * def test_it_includes_page_numbers(): fh = open('fixtures/sample_data/AnimalExampleTables.pdf', 'rb') result = get_tables(fh) assert_equals(result[0].page_total, 4) assert_equals(result[0].page, 2) assert_equals(result[1].page_total, 4) assert_equals(result[1].page, 3) assert_equals(result[2].page_total, 4) assert_equals(result[2].page, 4)
<commit_before><commit_msg>Test for adding page numbers using Table class<commit_after>#!/usr/bin/env python # ScraperWiki Limited # Ian Hopkinson, 2013-07-30 # -*- coding: utf-8 -*- """ Tests the Table class which contains metadata """ import sys sys.path.append('code') from pdftables import get_tables from nose.tools import * def test_it_includes_page_numbers(): fh = open('fixtures/sample_data/AnimalExampleTables.pdf', 'rb') result = get_tables(fh) assert_equals(result[0].page_total, 4) assert_equals(result[0].page, 2) assert_equals(result[1].page_total, 4) assert_equals(result[1].page, 3) assert_equals(result[2].page_total, 4) assert_equals(result[2].page, 4)
2291964afbbb8f2d896bb069bb37f24afc2fb081
snippet_parser/cs.py
snippet_parser/cs.py
from base import * class SnippetParser(SnippetParserBase): def strip_template(self, template, normalize, collapse): if self.is_citation_needed(template): # These templates often contain other information # (date/justification), so we drop it all here return CITATION_NEEDED_MARKER return ''
Drop dates and comments from Czech templates.
Drop dates and comments from Czech templates.
Python
mit
Stryn/citationhunt,Stryn/citationhunt,Stryn/citationhunt,jhsoby/citationhunt,jhsoby/citationhunt,jhsoby/citationhunt,jhsoby/citationhunt,Stryn/citationhunt
Drop dates and comments from Czech templates.
from base import * class SnippetParser(SnippetParserBase): def strip_template(self, template, normalize, collapse): if self.is_citation_needed(template): # These templates often contain other information # (date/justification), so we drop it all here return CITATION_NEEDED_MARKER return ''
<commit_before><commit_msg>Drop dates and comments from Czech templates.<commit_after>
from base import * class SnippetParser(SnippetParserBase): def strip_template(self, template, normalize, collapse): if self.is_citation_needed(template): # These templates often contain other information # (date/justification), so we drop it all here return CITATION_NEEDED_MARKER return ''
Drop dates and comments from Czech templates.from base import * class SnippetParser(SnippetParserBase): def strip_template(self, template, normalize, collapse): if self.is_citation_needed(template): # These templates often contain other information # (date/justification), so we drop it all here return CITATION_NEEDED_MARKER return ''
<commit_before><commit_msg>Drop dates and comments from Czech templates.<commit_after>from base import * class SnippetParser(SnippetParserBase): def strip_template(self, template, normalize, collapse): if self.is_citation_needed(template): # These templates often contain other information # (date/justification), so we drop it all here return CITATION_NEEDED_MARKER return ''
68d4b700fe1ba8e0e7324a32821e31cdc899a66f
scriptserver.py
scriptserver.py
#!/usr/bin/env python '''ZoneScriptServer A server that runs scripts for all the objects in a zone. ''' import threading from threading import Timer import sched, time s = sched.scheduler(time.time, time.sleep) from settings import CLIENT_UPDATE_FREQ class ZoneScriptRunner(object): '''This is a class that holds all sorts of methods for running scripts for a zone. It does not talk to the HTTP handler(s) directly, but instead uses the same database. It might take player movement updates directly in the future for speed, but this is unlikely.''' def __init__(self): self.scriptnames = [] self.scripts = {} # Query DB for a list of all objects' script names, # ordered according to proximity to players # Store list of script names in self # For each script name in the list: # Import those by name via __import__ # For each class object in each one's dir() # call class() # store object instance in a dict like {scriptname: classinstance} def tick(self): '''Iterate through all known scripts and call their tick method.''' # Tick all the things print "tick" for script in self.scriptnames: # TODO: Pass some locals or somesuch so that they can query the db self.scripts[script].tick() def start(self): print "Running scriptserver" dilation = 1.0 maxframelen = (CLIENT_UPDATE_FREQ/200.0) print "Max frame length is %f seconds." % maxframelen lasttick = time.time() while True: maxframelength = maxframelen*dilation # If there are too many scripts running that are taking up too many # resources, change the time dilation so that they have more time to # run before the next loop. # This may not be needed since ScriptServer is a separate process. utilization = (time.time()-lasttick)/maxframelength if utilization > 1: dilation = dilation*1.05 print "Changing dilation to %f" % dilation elif utilization < 0.80: dilation = dilation*0.95 print "Changing dilation to %f" % dilation # Max frame length minus the Time taken ticking is how much to sleep # 0.10s - (0.03s) = sleep for 0.7s. sleeptime = max(maxframelength-(time.time()-lasttick), 0) print "Sleeping for %f seconds. (%3.4f%% utilization.)" % (sleeptime, utilization*100) time.sleep(sleeptime) # TODO: Trigger any events for scripts. May need some more DB queries. # TODO: Trigger object proximity events, scripts can filter down to players # Finally, tick all the scripts. self.tick() lasttick = time.time() if __name__ == "__main__": zsr = ZoneScriptRunner() zsr.start()
Add a rudimentary script server. Does nothing at the moment.
Add a rudimentary script server. Does nothing at the moment.
Python
agpl-3.0
cnelsonsic/SimpleMMO,cnelsonsic/SimpleMMO,cnelsonsic/SimpleMMO
Add a rudimentary script server. Does nothing at the moment.
#!/usr/bin/env python '''ZoneScriptServer A server that runs scripts for all the objects in a zone. ''' import threading from threading import Timer import sched, time s = sched.scheduler(time.time, time.sleep) from settings import CLIENT_UPDATE_FREQ class ZoneScriptRunner(object): '''This is a class that holds all sorts of methods for running scripts for a zone. It does not talk to the HTTP handler(s) directly, but instead uses the same database. It might take player movement updates directly in the future for speed, but this is unlikely.''' def __init__(self): self.scriptnames = [] self.scripts = {} # Query DB for a list of all objects' script names, # ordered according to proximity to players # Store list of script names in self # For each script name in the list: # Import those by name via __import__ # For each class object in each one's dir() # call class() # store object instance in a dict like {scriptname: classinstance} def tick(self): '''Iterate through all known scripts and call their tick method.''' # Tick all the things print "tick" for script in self.scriptnames: # TODO: Pass some locals or somesuch so that they can query the db self.scripts[script].tick() def start(self): print "Running scriptserver" dilation = 1.0 maxframelen = (CLIENT_UPDATE_FREQ/200.0) print "Max frame length is %f seconds." % maxframelen lasttick = time.time() while True: maxframelength = maxframelen*dilation # If there are too many scripts running that are taking up too many # resources, change the time dilation so that they have more time to # run before the next loop. # This may not be needed since ScriptServer is a separate process. utilization = (time.time()-lasttick)/maxframelength if utilization > 1: dilation = dilation*1.05 print "Changing dilation to %f" % dilation elif utilization < 0.80: dilation = dilation*0.95 print "Changing dilation to %f" % dilation # Max frame length minus the Time taken ticking is how much to sleep # 0.10s - (0.03s) = sleep for 0.7s. sleeptime = max(maxframelength-(time.time()-lasttick), 0) print "Sleeping for %f seconds. (%3.4f%% utilization.)" % (sleeptime, utilization*100) time.sleep(sleeptime) # TODO: Trigger any events for scripts. May need some more DB queries. # TODO: Trigger object proximity events, scripts can filter down to players # Finally, tick all the scripts. self.tick() lasttick = time.time() if __name__ == "__main__": zsr = ZoneScriptRunner() zsr.start()
<commit_before><commit_msg>Add a rudimentary script server. Does nothing at the moment.<commit_after>
#!/usr/bin/env python '''ZoneScriptServer A server that runs scripts for all the objects in a zone. ''' import threading from threading import Timer import sched, time s = sched.scheduler(time.time, time.sleep) from settings import CLIENT_UPDATE_FREQ class ZoneScriptRunner(object): '''This is a class that holds all sorts of methods for running scripts for a zone. It does not talk to the HTTP handler(s) directly, but instead uses the same database. It might take player movement updates directly in the future for speed, but this is unlikely.''' def __init__(self): self.scriptnames = [] self.scripts = {} # Query DB for a list of all objects' script names, # ordered according to proximity to players # Store list of script names in self # For each script name in the list: # Import those by name via __import__ # For each class object in each one's dir() # call class() # store object instance in a dict like {scriptname: classinstance} def tick(self): '''Iterate through all known scripts and call their tick method.''' # Tick all the things print "tick" for script in self.scriptnames: # TODO: Pass some locals or somesuch so that they can query the db self.scripts[script].tick() def start(self): print "Running scriptserver" dilation = 1.0 maxframelen = (CLIENT_UPDATE_FREQ/200.0) print "Max frame length is %f seconds." % maxframelen lasttick = time.time() while True: maxframelength = maxframelen*dilation # If there are too many scripts running that are taking up too many # resources, change the time dilation so that they have more time to # run before the next loop. # This may not be needed since ScriptServer is a separate process. utilization = (time.time()-lasttick)/maxframelength if utilization > 1: dilation = dilation*1.05 print "Changing dilation to %f" % dilation elif utilization < 0.80: dilation = dilation*0.95 print "Changing dilation to %f" % dilation # Max frame length minus the Time taken ticking is how much to sleep # 0.10s - (0.03s) = sleep for 0.7s. sleeptime = max(maxframelength-(time.time()-lasttick), 0) print "Sleeping for %f seconds. (%3.4f%% utilization.)" % (sleeptime, utilization*100) time.sleep(sleeptime) # TODO: Trigger any events for scripts. May need some more DB queries. # TODO: Trigger object proximity events, scripts can filter down to players # Finally, tick all the scripts. self.tick() lasttick = time.time() if __name__ == "__main__": zsr = ZoneScriptRunner() zsr.start()
Add a rudimentary script server. Does nothing at the moment.#!/usr/bin/env python '''ZoneScriptServer A server that runs scripts for all the objects in a zone. ''' import threading from threading import Timer import sched, time s = sched.scheduler(time.time, time.sleep) from settings import CLIENT_UPDATE_FREQ class ZoneScriptRunner(object): '''This is a class that holds all sorts of methods for running scripts for a zone. It does not talk to the HTTP handler(s) directly, but instead uses the same database. It might take player movement updates directly in the future for speed, but this is unlikely.''' def __init__(self): self.scriptnames = [] self.scripts = {} # Query DB for a list of all objects' script names, # ordered according to proximity to players # Store list of script names in self # For each script name in the list: # Import those by name via __import__ # For each class object in each one's dir() # call class() # store object instance in a dict like {scriptname: classinstance} def tick(self): '''Iterate through all known scripts and call their tick method.''' # Tick all the things print "tick" for script in self.scriptnames: # TODO: Pass some locals or somesuch so that they can query the db self.scripts[script].tick() def start(self): print "Running scriptserver" dilation = 1.0 maxframelen = (CLIENT_UPDATE_FREQ/200.0) print "Max frame length is %f seconds." % maxframelen lasttick = time.time() while True: maxframelength = maxframelen*dilation # If there are too many scripts running that are taking up too many # resources, change the time dilation so that they have more time to # run before the next loop. # This may not be needed since ScriptServer is a separate process. utilization = (time.time()-lasttick)/maxframelength if utilization > 1: dilation = dilation*1.05 print "Changing dilation to %f" % dilation elif utilization < 0.80: dilation = dilation*0.95 print "Changing dilation to %f" % dilation # Max frame length minus the Time taken ticking is how much to sleep # 0.10s - (0.03s) = sleep for 0.7s. sleeptime = max(maxframelength-(time.time()-lasttick), 0) print "Sleeping for %f seconds. (%3.4f%% utilization.)" % (sleeptime, utilization*100) time.sleep(sleeptime) # TODO: Trigger any events for scripts. May need some more DB queries. # TODO: Trigger object proximity events, scripts can filter down to players # Finally, tick all the scripts. self.tick() lasttick = time.time() if __name__ == "__main__": zsr = ZoneScriptRunner() zsr.start()
<commit_before><commit_msg>Add a rudimentary script server. Does nothing at the moment.<commit_after>#!/usr/bin/env python '''ZoneScriptServer A server that runs scripts for all the objects in a zone. ''' import threading from threading import Timer import sched, time s = sched.scheduler(time.time, time.sleep) from settings import CLIENT_UPDATE_FREQ class ZoneScriptRunner(object): '''This is a class that holds all sorts of methods for running scripts for a zone. It does not talk to the HTTP handler(s) directly, but instead uses the same database. It might take player movement updates directly in the future for speed, but this is unlikely.''' def __init__(self): self.scriptnames = [] self.scripts = {} # Query DB for a list of all objects' script names, # ordered according to proximity to players # Store list of script names in self # For each script name in the list: # Import those by name via __import__ # For each class object in each one's dir() # call class() # store object instance in a dict like {scriptname: classinstance} def tick(self): '''Iterate through all known scripts and call their tick method.''' # Tick all the things print "tick" for script in self.scriptnames: # TODO: Pass some locals or somesuch so that they can query the db self.scripts[script].tick() def start(self): print "Running scriptserver" dilation = 1.0 maxframelen = (CLIENT_UPDATE_FREQ/200.0) print "Max frame length is %f seconds." % maxframelen lasttick = time.time() while True: maxframelength = maxframelen*dilation # If there are too many scripts running that are taking up too many # resources, change the time dilation so that they have more time to # run before the next loop. # This may not be needed since ScriptServer is a separate process. utilization = (time.time()-lasttick)/maxframelength if utilization > 1: dilation = dilation*1.05 print "Changing dilation to %f" % dilation elif utilization < 0.80: dilation = dilation*0.95 print "Changing dilation to %f" % dilation # Max frame length minus the Time taken ticking is how much to sleep # 0.10s - (0.03s) = sleep for 0.7s. sleeptime = max(maxframelength-(time.time()-lasttick), 0) print "Sleeping for %f seconds. (%3.4f%% utilization.)" % (sleeptime, utilization*100) time.sleep(sleeptime) # TODO: Trigger any events for scripts. May need some more DB queries. # TODO: Trigger object proximity events, scripts can filter down to players # Finally, tick all the scripts. self.tick() lasttick = time.time() if __name__ == "__main__": zsr = ZoneScriptRunner() zsr.start()
dfc51c72149f437d8e3f0064d2b8da65adb0ee49
migrations/versions/201608231135_5596683819c9_enable_abstract_feature_for_conferences.py
migrations/versions/201608231135_5596683819c9_enable_abstract_feature_for_conferences.py
"""Enable abstract feature for conferences Revision ID: 5596683819c9 Revises: ccd9d0858ff Create Date: 2016-08-23 11:35:26.018462 """ import json import sqlalchemy as sa from alembic import context, op # revision identifiers, used by Alembic. revision = '5596683819c9' down_revision = 'ccd9d0858ff' _update_setting_query = 'UPDATE events.settings SET value = :value WHERE id = :id' def upgrade(): if context.is_offline_mode(): raise Exception('This upgrade is only possible in online mode') conn = op.get_bind() query = ''' SELECT s.id, s.value FROM events.settings s JOIN events.events e ON (e.id = s.event_id) WHERE module = 'features' AND name = 'enabled' AND value::jsonb != 'null'::jsonb AND NOT (value::jsonb ? 'abstracts') AND e.type = 3; ''' res = conn.execute(query) for id_, value in res: value = sorted(value + ['abstracts']) conn.execute(sa.text(_update_setting_query).bindparams(id=id_, value=json.dumps(value))) def downgrade(): if context.is_offline_mode(): raise Exception('This downgrade is only possible in online mode') conn = op.get_bind() query = ''' SELECT id, value FROM events.settings WHERE module = 'features' AND name = 'enabled' AND value::jsonb != 'null'::jsonb AND value::jsonb ? 'abstracts'; ''' res = conn.execute(query) for id_, value in res: value = sorted(set(value) - {'abstracts'}) conn.execute(sa.text(_update_setting_query).bindparams(id=id_, value=json.dumps(value)))
Add alembic revision to enable abstract feature
Add alembic revision to enable abstract feature
Python
mit
DirkHoffmann/indico,pferreir/indico,OmeGak/indico,mvidalgarcia/indico,mic4ael/indico,DirkHoffmann/indico,DirkHoffmann/indico,pferreir/indico,mvidalgarcia/indico,indico/indico,ThiefMaster/indico,mvidalgarcia/indico,pferreir/indico,OmeGak/indico,pferreir/indico,DirkHoffmann/indico,ThiefMaster/indico,OmeGak/indico,mic4ael/indico,indico/indico,mvidalgarcia/indico,indico/indico,OmeGak/indico,mic4ael/indico,ThiefMaster/indico,mic4ael/indico,ThiefMaster/indico,indico/indico
Add alembic revision to enable abstract feature
"""Enable abstract feature for conferences Revision ID: 5596683819c9 Revises: ccd9d0858ff Create Date: 2016-08-23 11:35:26.018462 """ import json import sqlalchemy as sa from alembic import context, op # revision identifiers, used by Alembic. revision = '5596683819c9' down_revision = 'ccd9d0858ff' _update_setting_query = 'UPDATE events.settings SET value = :value WHERE id = :id' def upgrade(): if context.is_offline_mode(): raise Exception('This upgrade is only possible in online mode') conn = op.get_bind() query = ''' SELECT s.id, s.value FROM events.settings s JOIN events.events e ON (e.id = s.event_id) WHERE module = 'features' AND name = 'enabled' AND value::jsonb != 'null'::jsonb AND NOT (value::jsonb ? 'abstracts') AND e.type = 3; ''' res = conn.execute(query) for id_, value in res: value = sorted(value + ['abstracts']) conn.execute(sa.text(_update_setting_query).bindparams(id=id_, value=json.dumps(value))) def downgrade(): if context.is_offline_mode(): raise Exception('This downgrade is only possible in online mode') conn = op.get_bind() query = ''' SELECT id, value FROM events.settings WHERE module = 'features' AND name = 'enabled' AND value::jsonb != 'null'::jsonb AND value::jsonb ? 'abstracts'; ''' res = conn.execute(query) for id_, value in res: value = sorted(set(value) - {'abstracts'}) conn.execute(sa.text(_update_setting_query).bindparams(id=id_, value=json.dumps(value)))
<commit_before><commit_msg>Add alembic revision to enable abstract feature<commit_after>
"""Enable abstract feature for conferences Revision ID: 5596683819c9 Revises: ccd9d0858ff Create Date: 2016-08-23 11:35:26.018462 """ import json import sqlalchemy as sa from alembic import context, op # revision identifiers, used by Alembic. revision = '5596683819c9' down_revision = 'ccd9d0858ff' _update_setting_query = 'UPDATE events.settings SET value = :value WHERE id = :id' def upgrade(): if context.is_offline_mode(): raise Exception('This upgrade is only possible in online mode') conn = op.get_bind() query = ''' SELECT s.id, s.value FROM events.settings s JOIN events.events e ON (e.id = s.event_id) WHERE module = 'features' AND name = 'enabled' AND value::jsonb != 'null'::jsonb AND NOT (value::jsonb ? 'abstracts') AND e.type = 3; ''' res = conn.execute(query) for id_, value in res: value = sorted(value + ['abstracts']) conn.execute(sa.text(_update_setting_query).bindparams(id=id_, value=json.dumps(value))) def downgrade(): if context.is_offline_mode(): raise Exception('This downgrade is only possible in online mode') conn = op.get_bind() query = ''' SELECT id, value FROM events.settings WHERE module = 'features' AND name = 'enabled' AND value::jsonb != 'null'::jsonb AND value::jsonb ? 'abstracts'; ''' res = conn.execute(query) for id_, value in res: value = sorted(set(value) - {'abstracts'}) conn.execute(sa.text(_update_setting_query).bindparams(id=id_, value=json.dumps(value)))
Add alembic revision to enable abstract feature"""Enable abstract feature for conferences Revision ID: 5596683819c9 Revises: ccd9d0858ff Create Date: 2016-08-23 11:35:26.018462 """ import json import sqlalchemy as sa from alembic import context, op # revision identifiers, used by Alembic. revision = '5596683819c9' down_revision = 'ccd9d0858ff' _update_setting_query = 'UPDATE events.settings SET value = :value WHERE id = :id' def upgrade(): if context.is_offline_mode(): raise Exception('This upgrade is only possible in online mode') conn = op.get_bind() query = ''' SELECT s.id, s.value FROM events.settings s JOIN events.events e ON (e.id = s.event_id) WHERE module = 'features' AND name = 'enabled' AND value::jsonb != 'null'::jsonb AND NOT (value::jsonb ? 'abstracts') AND e.type = 3; ''' res = conn.execute(query) for id_, value in res: value = sorted(value + ['abstracts']) conn.execute(sa.text(_update_setting_query).bindparams(id=id_, value=json.dumps(value))) def downgrade(): if context.is_offline_mode(): raise Exception('This downgrade is only possible in online mode') conn = op.get_bind() query = ''' SELECT id, value FROM events.settings WHERE module = 'features' AND name = 'enabled' AND value::jsonb != 'null'::jsonb AND value::jsonb ? 'abstracts'; ''' res = conn.execute(query) for id_, value in res: value = sorted(set(value) - {'abstracts'}) conn.execute(sa.text(_update_setting_query).bindparams(id=id_, value=json.dumps(value)))
<commit_before><commit_msg>Add alembic revision to enable abstract feature<commit_after>"""Enable abstract feature for conferences Revision ID: 5596683819c9 Revises: ccd9d0858ff Create Date: 2016-08-23 11:35:26.018462 """ import json import sqlalchemy as sa from alembic import context, op # revision identifiers, used by Alembic. revision = '5596683819c9' down_revision = 'ccd9d0858ff' _update_setting_query = 'UPDATE events.settings SET value = :value WHERE id = :id' def upgrade(): if context.is_offline_mode(): raise Exception('This upgrade is only possible in online mode') conn = op.get_bind() query = ''' SELECT s.id, s.value FROM events.settings s JOIN events.events e ON (e.id = s.event_id) WHERE module = 'features' AND name = 'enabled' AND value::jsonb != 'null'::jsonb AND NOT (value::jsonb ? 'abstracts') AND e.type = 3; ''' res = conn.execute(query) for id_, value in res: value = sorted(value + ['abstracts']) conn.execute(sa.text(_update_setting_query).bindparams(id=id_, value=json.dumps(value))) def downgrade(): if context.is_offline_mode(): raise Exception('This downgrade is only possible in online mode') conn = op.get_bind() query = ''' SELECT id, value FROM events.settings WHERE module = 'features' AND name = 'enabled' AND value::jsonb != 'null'::jsonb AND value::jsonb ? 'abstracts'; ''' res = conn.execute(query) for id_, value in res: value = sorted(set(value) - {'abstracts'}) conn.execute(sa.text(_update_setting_query).bindparams(id=id_, value=json.dumps(value)))
0bad9bbe99296044a631780d76e390f009cb977a
setup.py
setup.py
#!/usr/bin/env python """Setup script for the kconfiglib module.""" from distutils.core import setup setup (# Distribution meta-data name = "kconfiglib", version = "0.0.1", description = "A flexible Python Kconfig parser", author = "Ulfalizer Magnusson", author_email = "kconfiglib@gmail.com", url = "https://github.com/ulfalizer/Kconfiglib", # Description of the modules and packages in the distribution py_modules = ['kconfiglib'], )
Add dist-utils support for gentoo ebuild
Add dist-utils support for gentoo ebuild
Python
isc
ulfalizer/Kconfiglib,ulfalizer/Kconfiglib
Add dist-utils support for gentoo ebuild
#!/usr/bin/env python """Setup script for the kconfiglib module.""" from distutils.core import setup setup (# Distribution meta-data name = "kconfiglib", version = "0.0.1", description = "A flexible Python Kconfig parser", author = "Ulfalizer Magnusson", author_email = "kconfiglib@gmail.com", url = "https://github.com/ulfalizer/Kconfiglib", # Description of the modules and packages in the distribution py_modules = ['kconfiglib'], )
<commit_before><commit_msg>Add dist-utils support for gentoo ebuild<commit_after>
#!/usr/bin/env python """Setup script for the kconfiglib module.""" from distutils.core import setup setup (# Distribution meta-data name = "kconfiglib", version = "0.0.1", description = "A flexible Python Kconfig parser", author = "Ulfalizer Magnusson", author_email = "kconfiglib@gmail.com", url = "https://github.com/ulfalizer/Kconfiglib", # Description of the modules and packages in the distribution py_modules = ['kconfiglib'], )
Add dist-utils support for gentoo ebuild#!/usr/bin/env python """Setup script for the kconfiglib module.""" from distutils.core import setup setup (# Distribution meta-data name = "kconfiglib", version = "0.0.1", description = "A flexible Python Kconfig parser", author = "Ulfalizer Magnusson", author_email = "kconfiglib@gmail.com", url = "https://github.com/ulfalizer/Kconfiglib", # Description of the modules and packages in the distribution py_modules = ['kconfiglib'], )
<commit_before><commit_msg>Add dist-utils support for gentoo ebuild<commit_after>#!/usr/bin/env python """Setup script for the kconfiglib module.""" from distutils.core import setup setup (# Distribution meta-data name = "kconfiglib", version = "0.0.1", description = "A flexible Python Kconfig parser", author = "Ulfalizer Magnusson", author_email = "kconfiglib@gmail.com", url = "https://github.com/ulfalizer/Kconfiglib", # Description of the modules and packages in the distribution py_modules = ['kconfiglib'], )
74e6406b8a7e04b5092d5673e9911b89b0861bc1
sk_lr.py
sk_lr.py
import numpy as np import pylab as pl import feature_extractor as fe from sklearn import linear_model (features, targets) = fe.extract_train() (features_test, targets_test) = fe.extract_test() classifier = linear_model.LogisticRegression(C = 1e5, tol = 0.0001) classifier.fit(features, targets) target_test_hat = classifier.predict(features_test) accuracy = (1.0 * (target_test_hat == targets_test)).sum(0) / targets_test.shape print accuracy
Implement first version of LR classifier
Implement first version of LR classifier
Python
mit
trein/quora-classifier
Implement first version of LR classifier
import numpy as np import pylab as pl import feature_extractor as fe from sklearn import linear_model (features, targets) = fe.extract_train() (features_test, targets_test) = fe.extract_test() classifier = linear_model.LogisticRegression(C = 1e5, tol = 0.0001) classifier.fit(features, targets) target_test_hat = classifier.predict(features_test) accuracy = (1.0 * (target_test_hat == targets_test)).sum(0) / targets_test.shape print accuracy
<commit_before><commit_msg>Implement first version of LR classifier<commit_after>
import numpy as np import pylab as pl import feature_extractor as fe from sklearn import linear_model (features, targets) = fe.extract_train() (features_test, targets_test) = fe.extract_test() classifier = linear_model.LogisticRegression(C = 1e5, tol = 0.0001) classifier.fit(features, targets) target_test_hat = classifier.predict(features_test) accuracy = (1.0 * (target_test_hat == targets_test)).sum(0) / targets_test.shape print accuracy
Implement first version of LR classifierimport numpy as np import pylab as pl import feature_extractor as fe from sklearn import linear_model (features, targets) = fe.extract_train() (features_test, targets_test) = fe.extract_test() classifier = linear_model.LogisticRegression(C = 1e5, tol = 0.0001) classifier.fit(features, targets) target_test_hat = classifier.predict(features_test) accuracy = (1.0 * (target_test_hat == targets_test)).sum(0) / targets_test.shape print accuracy
<commit_before><commit_msg>Implement first version of LR classifier<commit_after>import numpy as np import pylab as pl import feature_extractor as fe from sklearn import linear_model (features, targets) = fe.extract_train() (features_test, targets_test) = fe.extract_test() classifier = linear_model.LogisticRegression(C = 1e5, tol = 0.0001) classifier.fit(features, targets) target_test_hat = classifier.predict(features_test) accuracy = (1.0 * (target_test_hat == targets_test)).sum(0) / targets_test.shape print accuracy
65af5543d02ccbfb04a3137a03ed0ff3874bc0ca
app/migrations/versions/4673b3fa0d68_.py
app/migrations/versions/4673b3fa0d68_.py
"""Add geom field to zones to hold regions. Revision ID: 4673b3fa0d68 Revises: 4ef20b76cab1 Create Date: 2015-02-11 21:12:26.578588 """ # revision identifiers, used by Alembic. revision = '4673b3fa0d68' down_revision = '4ef20b76cab1' from alembic import op import sqlalchemy as sa import geoalchemy2 as geo def upgrade(): op.add_column('zones', sa.Column('geog', geo.Geography(geometry_type='MULTIPOLYGON', srid=4326))) def downgrade(): op.drop_column('zones', 'geog')
Add geography column to zones.
Add geography column to zones.
Python
mit
openchattanooga/cpd-zones-old,openchattanooga/cpd-zones-old
Add geography column to zones.
"""Add geom field to zones to hold regions. Revision ID: 4673b3fa0d68 Revises: 4ef20b76cab1 Create Date: 2015-02-11 21:12:26.578588 """ # revision identifiers, used by Alembic. revision = '4673b3fa0d68' down_revision = '4ef20b76cab1' from alembic import op import sqlalchemy as sa import geoalchemy2 as geo def upgrade(): op.add_column('zones', sa.Column('geog', geo.Geography(geometry_type='MULTIPOLYGON', srid=4326))) def downgrade(): op.drop_column('zones', 'geog')
<commit_before><commit_msg>Add geography column to zones.<commit_after>
"""Add geom field to zones to hold regions. Revision ID: 4673b3fa0d68 Revises: 4ef20b76cab1 Create Date: 2015-02-11 21:12:26.578588 """ # revision identifiers, used by Alembic. revision = '4673b3fa0d68' down_revision = '4ef20b76cab1' from alembic import op import sqlalchemy as sa import geoalchemy2 as geo def upgrade(): op.add_column('zones', sa.Column('geog', geo.Geography(geometry_type='MULTIPOLYGON', srid=4326))) def downgrade(): op.drop_column('zones', 'geog')
Add geography column to zones."""Add geom field to zones to hold regions. Revision ID: 4673b3fa0d68 Revises: 4ef20b76cab1 Create Date: 2015-02-11 21:12:26.578588 """ # revision identifiers, used by Alembic. revision = '4673b3fa0d68' down_revision = '4ef20b76cab1' from alembic import op import sqlalchemy as sa import geoalchemy2 as geo def upgrade(): op.add_column('zones', sa.Column('geog', geo.Geography(geometry_type='MULTIPOLYGON', srid=4326))) def downgrade(): op.drop_column('zones', 'geog')
<commit_before><commit_msg>Add geography column to zones.<commit_after>"""Add geom field to zones to hold regions. Revision ID: 4673b3fa0d68 Revises: 4ef20b76cab1 Create Date: 2015-02-11 21:12:26.578588 """ # revision identifiers, used by Alembic. revision = '4673b3fa0d68' down_revision = '4ef20b76cab1' from alembic import op import sqlalchemy as sa import geoalchemy2 as geo def upgrade(): op.add_column('zones', sa.Column('geog', geo.Geography(geometry_type='MULTIPOLYGON', srid=4326))) def downgrade(): op.drop_column('zones', 'geog')
13619ef1404ca88ba08e74e0aa2cfa7d418c4c6b
tests/rules_tests/grammarManipulation_tests/InactiveRulesTest.py
tests/rules_tests/grammarManipulation_tests/InactiveRulesTest.py
#!/usr/bin/env python """ :Author Patrik Valkovic :Created 23.06.2017 16:39 :Licence GNUv3 Part of grammpy """ from unittest import main, TestCase from grammpy import Grammar, Nonterminal, Rule as _R from ..grammar import * class InactiveRulesTest(TestCase): def __init__(self, *args): super().__init__(*args) self.g = Grammar() def setUp(self): g = Grammar() g.add_term([0, 1, 2, 'a', 'b', 'c']) g.add_nonterm([NFirst, NSecond, NThird, NFourth]) self.g = g if __name__ == '__main__': main()
Add file for inactive rules tests
Add file for inactive rules tests
Python
mit
PatrikValkovic/grammpy
Add file for inactive rules tests
#!/usr/bin/env python """ :Author Patrik Valkovic :Created 23.06.2017 16:39 :Licence GNUv3 Part of grammpy """ from unittest import main, TestCase from grammpy import Grammar, Nonterminal, Rule as _R from ..grammar import * class InactiveRulesTest(TestCase): def __init__(self, *args): super().__init__(*args) self.g = Grammar() def setUp(self): g = Grammar() g.add_term([0, 1, 2, 'a', 'b', 'c']) g.add_nonterm([NFirst, NSecond, NThird, NFourth]) self.g = g if __name__ == '__main__': main()
<commit_before><commit_msg>Add file for inactive rules tests<commit_after>
#!/usr/bin/env python """ :Author Patrik Valkovic :Created 23.06.2017 16:39 :Licence GNUv3 Part of grammpy """ from unittest import main, TestCase from grammpy import Grammar, Nonterminal, Rule as _R from ..grammar import * class InactiveRulesTest(TestCase): def __init__(self, *args): super().__init__(*args) self.g = Grammar() def setUp(self): g = Grammar() g.add_term([0, 1, 2, 'a', 'b', 'c']) g.add_nonterm([NFirst, NSecond, NThird, NFourth]) self.g = g if __name__ == '__main__': main()
Add file for inactive rules tests#!/usr/bin/env python """ :Author Patrik Valkovic :Created 23.06.2017 16:39 :Licence GNUv3 Part of grammpy """ from unittest import main, TestCase from grammpy import Grammar, Nonterminal, Rule as _R from ..grammar import * class InactiveRulesTest(TestCase): def __init__(self, *args): super().__init__(*args) self.g = Grammar() def setUp(self): g = Grammar() g.add_term([0, 1, 2, 'a', 'b', 'c']) g.add_nonterm([NFirst, NSecond, NThird, NFourth]) self.g = g if __name__ == '__main__': main()
<commit_before><commit_msg>Add file for inactive rules tests<commit_after>#!/usr/bin/env python """ :Author Patrik Valkovic :Created 23.06.2017 16:39 :Licence GNUv3 Part of grammpy """ from unittest import main, TestCase from grammpy import Grammar, Nonterminal, Rule as _R from ..grammar import * class InactiveRulesTest(TestCase): def __init__(self, *args): super().__init__(*args) self.g = Grammar() def setUp(self): g = Grammar() g.add_term([0, 1, 2, 'a', 'b', 'c']) g.add_nonterm([NFirst, NSecond, NThird, NFourth]) self.g = g if __name__ == '__main__': main()
4e1985a0f69da6b8e2261795d2d710d86382596a
create_ands_rif_cs_xml.py
create_ands_rif_cs_xml.py
""" Create an ANDS RIF-CS XML file. Links ----- - http://ands.org.au/guides/cpguide/cpgrifcs.html - http://services.ands.org.au/documentation/rifcs/guidelines/rif-cs.html - http://www.ands.org.au/resource/rif-cs.html """ import logging import os from settings import ( ANDS_XML_FILE_NAME, ANDS_XML_FOLDER_PATH, ANDS_XML_START, ANDS_XML_STOP ) logger = logging.getLogger(__name__) def main(): with open(ANDS_XML_FILE_NAME, 'w') as w: w.write(ANDS_XML_START) for file_path in os.listdir(ANDS_XML_FOLDER_PATH): with open(file_path) as r: w.write(r.read()) w.write(ANDS_XML_STOP) if '__main__' == __name__: logging.basicConfig(level=logging.DEBUG) main()
Add ANDS RIF-CS conversion script
Add ANDS RIF-CS conversion script
Python
mit
AustralianAntarcticDataCentre/metadata_xml_convert,AustralianAntarcticDataCentre/metadata_xml_convert
Add ANDS RIF-CS conversion script
""" Create an ANDS RIF-CS XML file. Links ----- - http://ands.org.au/guides/cpguide/cpgrifcs.html - http://services.ands.org.au/documentation/rifcs/guidelines/rif-cs.html - http://www.ands.org.au/resource/rif-cs.html """ import logging import os from settings import ( ANDS_XML_FILE_NAME, ANDS_XML_FOLDER_PATH, ANDS_XML_START, ANDS_XML_STOP ) logger = logging.getLogger(__name__) def main(): with open(ANDS_XML_FILE_NAME, 'w') as w: w.write(ANDS_XML_START) for file_path in os.listdir(ANDS_XML_FOLDER_PATH): with open(file_path) as r: w.write(r.read()) w.write(ANDS_XML_STOP) if '__main__' == __name__: logging.basicConfig(level=logging.DEBUG) main()
<commit_before><commit_msg>Add ANDS RIF-CS conversion script<commit_after>
""" Create an ANDS RIF-CS XML file. Links ----- - http://ands.org.au/guides/cpguide/cpgrifcs.html - http://services.ands.org.au/documentation/rifcs/guidelines/rif-cs.html - http://www.ands.org.au/resource/rif-cs.html """ import logging import os from settings import ( ANDS_XML_FILE_NAME, ANDS_XML_FOLDER_PATH, ANDS_XML_START, ANDS_XML_STOP ) logger = logging.getLogger(__name__) def main(): with open(ANDS_XML_FILE_NAME, 'w') as w: w.write(ANDS_XML_START) for file_path in os.listdir(ANDS_XML_FOLDER_PATH): with open(file_path) as r: w.write(r.read()) w.write(ANDS_XML_STOP) if '__main__' == __name__: logging.basicConfig(level=logging.DEBUG) main()
Add ANDS RIF-CS conversion script""" Create an ANDS RIF-CS XML file. Links ----- - http://ands.org.au/guides/cpguide/cpgrifcs.html - http://services.ands.org.au/documentation/rifcs/guidelines/rif-cs.html - http://www.ands.org.au/resource/rif-cs.html """ import logging import os from settings import ( ANDS_XML_FILE_NAME, ANDS_XML_FOLDER_PATH, ANDS_XML_START, ANDS_XML_STOP ) logger = logging.getLogger(__name__) def main(): with open(ANDS_XML_FILE_NAME, 'w') as w: w.write(ANDS_XML_START) for file_path in os.listdir(ANDS_XML_FOLDER_PATH): with open(file_path) as r: w.write(r.read()) w.write(ANDS_XML_STOP) if '__main__' == __name__: logging.basicConfig(level=logging.DEBUG) main()
<commit_before><commit_msg>Add ANDS RIF-CS conversion script<commit_after>""" Create an ANDS RIF-CS XML file. Links ----- - http://ands.org.au/guides/cpguide/cpgrifcs.html - http://services.ands.org.au/documentation/rifcs/guidelines/rif-cs.html - http://www.ands.org.au/resource/rif-cs.html """ import logging import os from settings import ( ANDS_XML_FILE_NAME, ANDS_XML_FOLDER_PATH, ANDS_XML_START, ANDS_XML_STOP ) logger = logging.getLogger(__name__) def main(): with open(ANDS_XML_FILE_NAME, 'w') as w: w.write(ANDS_XML_START) for file_path in os.listdir(ANDS_XML_FOLDER_PATH): with open(file_path) as r: w.write(r.read()) w.write(ANDS_XML_STOP) if '__main__' == __name__: logging.basicConfig(level=logging.DEBUG) main()
fa6b42061c4aaa9354922c6942511892f3ff6386
extra/update_comments_from_spec.py
extra/update_comments_from_spec.py
import sys, os import re default_source_file = os.path.join(os.path.dirname(__file__), '../amqp/channel.py') def update_comments(comments_file, impl_file, result_file): text_file = open(impl_file, 'r') source = text_file.read() comments = get_comments(comments_file) for def_name, comment in comments.items(): source = repalce_comment_per_def(source, result_file, def_name, comment) new_file = open(result_file, 'w+') new_file.write(source) def get_comments(filename): text_file = open(filename, 'r') whole_source = text_file.read() comments = {} regex = '(?P<methodsig>def\s+(?P<mname>[a-zA-Z0-9_]+)\(.*?\):\n+\s+""")(?P<comment>.*?)(?=""")' all_matches = re.finditer(regex, whole_source, re.MULTILINE| re.DOTALL) for match in all_matches: comments[match.group('mname')] = match.group('comment') #print 'method: %s \ncomment: %s' %(match.group('mname'), match.group('comment')) return comments def repalce_comment_per_def(source, result_file, def_name, new_comment): regex = '(?P<methodsig>def\s+' + def_name + '\(.*?\):\n+\s+""".*?\n).*?(?=""")' #print 'method and comment:' + def_name + new_comment result = re.sub(regex, '\g<methodsig>' + new_comment, source, 0, re.MULTILINE| re.DOTALL) return result def main(argv=None): if argv is None: argv = sys.argv if len(argv) < 3: print 'Usage: %s <comments-file> <output-file> [<source-file>]' % argv[0] return 1 impl_file = default_source_file if len(argv)>= 4: impl_file = argv[3] update_comments(argv[1], impl_file, argv[2]) if __name__ == '__main__': sys.exit(main())
Add a script for updating comments(in methods). The script is used to sync the channnel.py comments with the AMQP spec.
Add a script for updating comments(in methods). The script is used to sync the channnel.py comments with the AMQP spec.
Python
lgpl-2.1
dims/py-amqp,yetone/py-amqp,yetone/py-amqp,jonahbull/py-amqp,dallasmarlow/py-amqp,smurfix/aio-py-amqp,dims/py-amqp,dallasmarlow/py-amqp,smurfix/aio-py-amqp,jonahbull/py-amqp
Add a script for updating comments(in methods). The script is used to sync the channnel.py comments with the AMQP spec.
import sys, os import re default_source_file = os.path.join(os.path.dirname(__file__), '../amqp/channel.py') def update_comments(comments_file, impl_file, result_file): text_file = open(impl_file, 'r') source = text_file.read() comments = get_comments(comments_file) for def_name, comment in comments.items(): source = repalce_comment_per_def(source, result_file, def_name, comment) new_file = open(result_file, 'w+') new_file.write(source) def get_comments(filename): text_file = open(filename, 'r') whole_source = text_file.read() comments = {} regex = '(?P<methodsig>def\s+(?P<mname>[a-zA-Z0-9_]+)\(.*?\):\n+\s+""")(?P<comment>.*?)(?=""")' all_matches = re.finditer(regex, whole_source, re.MULTILINE| re.DOTALL) for match in all_matches: comments[match.group('mname')] = match.group('comment') #print 'method: %s \ncomment: %s' %(match.group('mname'), match.group('comment')) return comments def repalce_comment_per_def(source, result_file, def_name, new_comment): regex = '(?P<methodsig>def\s+' + def_name + '\(.*?\):\n+\s+""".*?\n).*?(?=""")' #print 'method and comment:' + def_name + new_comment result = re.sub(regex, '\g<methodsig>' + new_comment, source, 0, re.MULTILINE| re.DOTALL) return result def main(argv=None): if argv is None: argv = sys.argv if len(argv) < 3: print 'Usage: %s <comments-file> <output-file> [<source-file>]' % argv[0] return 1 impl_file = default_source_file if len(argv)>= 4: impl_file = argv[3] update_comments(argv[1], impl_file, argv[2]) if __name__ == '__main__': sys.exit(main())
<commit_before><commit_msg>Add a script for updating comments(in methods). The script is used to sync the channnel.py comments with the AMQP spec.<commit_after>
import sys, os import re default_source_file = os.path.join(os.path.dirname(__file__), '../amqp/channel.py') def update_comments(comments_file, impl_file, result_file): text_file = open(impl_file, 'r') source = text_file.read() comments = get_comments(comments_file) for def_name, comment in comments.items(): source = repalce_comment_per_def(source, result_file, def_name, comment) new_file = open(result_file, 'w+') new_file.write(source) def get_comments(filename): text_file = open(filename, 'r') whole_source = text_file.read() comments = {} regex = '(?P<methodsig>def\s+(?P<mname>[a-zA-Z0-9_]+)\(.*?\):\n+\s+""")(?P<comment>.*?)(?=""")' all_matches = re.finditer(regex, whole_source, re.MULTILINE| re.DOTALL) for match in all_matches: comments[match.group('mname')] = match.group('comment') #print 'method: %s \ncomment: %s' %(match.group('mname'), match.group('comment')) return comments def repalce_comment_per_def(source, result_file, def_name, new_comment): regex = '(?P<methodsig>def\s+' + def_name + '\(.*?\):\n+\s+""".*?\n).*?(?=""")' #print 'method and comment:' + def_name + new_comment result = re.sub(regex, '\g<methodsig>' + new_comment, source, 0, re.MULTILINE| re.DOTALL) return result def main(argv=None): if argv is None: argv = sys.argv if len(argv) < 3: print 'Usage: %s <comments-file> <output-file> [<source-file>]' % argv[0] return 1 impl_file = default_source_file if len(argv)>= 4: impl_file = argv[3] update_comments(argv[1], impl_file, argv[2]) if __name__ == '__main__': sys.exit(main())
Add a script for updating comments(in methods). The script is used to sync the channnel.py comments with the AMQP spec.import sys, os import re default_source_file = os.path.join(os.path.dirname(__file__), '../amqp/channel.py') def update_comments(comments_file, impl_file, result_file): text_file = open(impl_file, 'r') source = text_file.read() comments = get_comments(comments_file) for def_name, comment in comments.items(): source = repalce_comment_per_def(source, result_file, def_name, comment) new_file = open(result_file, 'w+') new_file.write(source) def get_comments(filename): text_file = open(filename, 'r') whole_source = text_file.read() comments = {} regex = '(?P<methodsig>def\s+(?P<mname>[a-zA-Z0-9_]+)\(.*?\):\n+\s+""")(?P<comment>.*?)(?=""")' all_matches = re.finditer(regex, whole_source, re.MULTILINE| re.DOTALL) for match in all_matches: comments[match.group('mname')] = match.group('comment') #print 'method: %s \ncomment: %s' %(match.group('mname'), match.group('comment')) return comments def repalce_comment_per_def(source, result_file, def_name, new_comment): regex = '(?P<methodsig>def\s+' + def_name + '\(.*?\):\n+\s+""".*?\n).*?(?=""")' #print 'method and comment:' + def_name + new_comment result = re.sub(regex, '\g<methodsig>' + new_comment, source, 0, re.MULTILINE| re.DOTALL) return result def main(argv=None): if argv is None: argv = sys.argv if len(argv) < 3: print 'Usage: %s <comments-file> <output-file> [<source-file>]' % argv[0] return 1 impl_file = default_source_file if len(argv)>= 4: impl_file = argv[3] update_comments(argv[1], impl_file, argv[2]) if __name__ == '__main__': sys.exit(main())
<commit_before><commit_msg>Add a script for updating comments(in methods). The script is used to sync the channnel.py comments with the AMQP spec.<commit_after>import sys, os import re default_source_file = os.path.join(os.path.dirname(__file__), '../amqp/channel.py') def update_comments(comments_file, impl_file, result_file): text_file = open(impl_file, 'r') source = text_file.read() comments = get_comments(comments_file) for def_name, comment in comments.items(): source = repalce_comment_per_def(source, result_file, def_name, comment) new_file = open(result_file, 'w+') new_file.write(source) def get_comments(filename): text_file = open(filename, 'r') whole_source = text_file.read() comments = {} regex = '(?P<methodsig>def\s+(?P<mname>[a-zA-Z0-9_]+)\(.*?\):\n+\s+""")(?P<comment>.*?)(?=""")' all_matches = re.finditer(regex, whole_source, re.MULTILINE| re.DOTALL) for match in all_matches: comments[match.group('mname')] = match.group('comment') #print 'method: %s \ncomment: %s' %(match.group('mname'), match.group('comment')) return comments def repalce_comment_per_def(source, result_file, def_name, new_comment): regex = '(?P<methodsig>def\s+' + def_name + '\(.*?\):\n+\s+""".*?\n).*?(?=""")' #print 'method and comment:' + def_name + new_comment result = re.sub(regex, '\g<methodsig>' + new_comment, source, 0, re.MULTILINE| re.DOTALL) return result def main(argv=None): if argv is None: argv = sys.argv if len(argv) < 3: print 'Usage: %s <comments-file> <output-file> [<source-file>]' % argv[0] return 1 impl_file = default_source_file if len(argv)>= 4: impl_file = argv[3] update_comments(argv[1], impl_file, argv[2]) if __name__ == '__main__': sys.exit(main())
e81e36304412e5cd86d91e3f79a17b845ec7a90d
django/neverlate/tasks.py
django/neverlate/tasks.py
from __future__ import absolute_import from celery import shared_task from calparser.tasks import parse_ical_from_url from calparser.models import CalendarEntry from django.contrib.auth.models import User @shared_task def reload_all_user_calendars(): """Reload calendars for all users using Celery""" for user in User.objects.all(): reload_user_calendars.delay(user) @shared_task def reload_user_calendars(user): """Delete all CalendarEntries for user and reload each calendar""" CalendarEntry.objects.all().filter(user=user).delete() for url in [str(icalurl.url) for icalurl in user.userprofile.icalurl_set.all()]: parse_ical_from_url(url, user)
Integrate calparser app with neverlate
Integrate calparser app with neverlate
Python
mit
myrjola/neverlate,myrjola/neverlate,myrjola/neverlate,myrjola/neverlate
Integrate calparser app with neverlate
from __future__ import absolute_import from celery import shared_task from calparser.tasks import parse_ical_from_url from calparser.models import CalendarEntry from django.contrib.auth.models import User @shared_task def reload_all_user_calendars(): """Reload calendars for all users using Celery""" for user in User.objects.all(): reload_user_calendars.delay(user) @shared_task def reload_user_calendars(user): """Delete all CalendarEntries for user and reload each calendar""" CalendarEntry.objects.all().filter(user=user).delete() for url in [str(icalurl.url) for icalurl in user.userprofile.icalurl_set.all()]: parse_ical_from_url(url, user)
<commit_before><commit_msg>Integrate calparser app with neverlate<commit_after>
from __future__ import absolute_import from celery import shared_task from calparser.tasks import parse_ical_from_url from calparser.models import CalendarEntry from django.contrib.auth.models import User @shared_task def reload_all_user_calendars(): """Reload calendars for all users using Celery""" for user in User.objects.all(): reload_user_calendars.delay(user) @shared_task def reload_user_calendars(user): """Delete all CalendarEntries for user and reload each calendar""" CalendarEntry.objects.all().filter(user=user).delete() for url in [str(icalurl.url) for icalurl in user.userprofile.icalurl_set.all()]: parse_ical_from_url(url, user)
Integrate calparser app with neverlatefrom __future__ import absolute_import from celery import shared_task from calparser.tasks import parse_ical_from_url from calparser.models import CalendarEntry from django.contrib.auth.models import User @shared_task def reload_all_user_calendars(): """Reload calendars for all users using Celery""" for user in User.objects.all(): reload_user_calendars.delay(user) @shared_task def reload_user_calendars(user): """Delete all CalendarEntries for user and reload each calendar""" CalendarEntry.objects.all().filter(user=user).delete() for url in [str(icalurl.url) for icalurl in user.userprofile.icalurl_set.all()]: parse_ical_from_url(url, user)
<commit_before><commit_msg>Integrate calparser app with neverlate<commit_after>from __future__ import absolute_import from celery import shared_task from calparser.tasks import parse_ical_from_url from calparser.models import CalendarEntry from django.contrib.auth.models import User @shared_task def reload_all_user_calendars(): """Reload calendars for all users using Celery""" for user in User.objects.all(): reload_user_calendars.delay(user) @shared_task def reload_user_calendars(user): """Delete all CalendarEntries for user and reload each calendar""" CalendarEntry.objects.all().filter(user=user).delete() for url in [str(icalurl.url) for icalurl in user.userprofile.icalurl_set.all()]: parse_ical_from_url(url, user)
8da10de91645206c6e47afbc7a955f5d69efa552
tests/font/BULLET.py
tests/font/BULLET.py
#!/usr/bin/env python '''Test that font.Text horizontal alignment works. Three labels will be rendered aligned left, center and right. ''' __docformat__ = 'restructuredtext' __version__ = '$Id: $' import unittest from pyglet import font import base_text class TEST_HALIGN(base_text.TextTestBase): font_name = '' font_size = 60 text = u'\u2022'*5 if __name__ == '__main__': unittest.main()
Test case for incorrect rendering of bullet noticed in a wydget test case.
Test case for incorrect rendering of bullet noticed in a wydget test case. git-svn-id: d4fdfcd4de20a449196f78acc655f735742cd30d@1197 14d46d22-621c-0410-bb3d-6f67920f7d95
Python
bsd-3-clause
regular/pyglet-avbin-optimizations,regular/pyglet-avbin-optimizations,regular/pyglet-avbin-optimizations,regular/pyglet-avbin-optimizations
Test case for incorrect rendering of bullet noticed in a wydget test case. git-svn-id: d4fdfcd4de20a449196f78acc655f735742cd30d@1197 14d46d22-621c-0410-bb3d-6f67920f7d95
#!/usr/bin/env python '''Test that font.Text horizontal alignment works. Three labels will be rendered aligned left, center and right. ''' __docformat__ = 'restructuredtext' __version__ = '$Id: $' import unittest from pyglet import font import base_text class TEST_HALIGN(base_text.TextTestBase): font_name = '' font_size = 60 text = u'\u2022'*5 if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Test case for incorrect rendering of bullet noticed in a wydget test case. git-svn-id: d4fdfcd4de20a449196f78acc655f735742cd30d@1197 14d46d22-621c-0410-bb3d-6f67920f7d95<commit_after>
#!/usr/bin/env python '''Test that font.Text horizontal alignment works. Three labels will be rendered aligned left, center and right. ''' __docformat__ = 'restructuredtext' __version__ = '$Id: $' import unittest from pyglet import font import base_text class TEST_HALIGN(base_text.TextTestBase): font_name = '' font_size = 60 text = u'\u2022'*5 if __name__ == '__main__': unittest.main()
Test case for incorrect rendering of bullet noticed in a wydget test case. git-svn-id: d4fdfcd4de20a449196f78acc655f735742cd30d@1197 14d46d22-621c-0410-bb3d-6f67920f7d95#!/usr/bin/env python '''Test that font.Text horizontal alignment works. Three labels will be rendered aligned left, center and right. ''' __docformat__ = 'restructuredtext' __version__ = '$Id: $' import unittest from pyglet import font import base_text class TEST_HALIGN(base_text.TextTestBase): font_name = '' font_size = 60 text = u'\u2022'*5 if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Test case for incorrect rendering of bullet noticed in a wydget test case. git-svn-id: d4fdfcd4de20a449196f78acc655f735742cd30d@1197 14d46d22-621c-0410-bb3d-6f67920f7d95<commit_after>#!/usr/bin/env python '''Test that font.Text horizontal alignment works. Three labels will be rendered aligned left, center and right. ''' __docformat__ = 'restructuredtext' __version__ = '$Id: $' import unittest from pyglet import font import base_text class TEST_HALIGN(base_text.TextTestBase): font_name = '' font_size = 60 text = u'\u2022'*5 if __name__ == '__main__': unittest.main()
dbe689882ad80e68750812950082d741e097b499
ipmi_constants.py
ipmi_constants.py
payload_types = { 'ipmi': 0x0, 'sol' : 0x1, 'rmcpplusopenreq': 0x10, 'rmcpplusopenresponse': 0x11, 'rakp1': 0x12, 'rakp2': 0x13, 'rakp3': 0x14, 'rakp4': 0x15, } rmcp_codes = { 1: 'Insufficient resources to create new session (wait for existing sessions to timeout)', 2: 'Invalid Session ID', 3: 'Invalid payload type', 4: 'Invalid authentication algorithm', 5: 'Invalid integrity algorithm', 6: 'No matching integrity payload', 7: 'No matching integrity payload', 8: 'Inactive Session ID', 9: 'Invalid role', 0xa: 'Unauthorized role or privilege level requested', 0xb: 'Insufficient resources tocreate a session at the requested role', 0xc: 'Invalid username length', 0xd: 'Unauthorized name', 0xe: 'Unauthorized GUID', 0xf: 'Invalid integrity check value', 0x10: 'Invalid confidentiality algorithm', 0x11: 'No Cipher suite match with proposed security algorithms', 0x12: 'Illegal or unrecognized parameter', }
Break out constantns to a diff file
Break out constantns to a diff file
Python
apache-2.0
openstack/pyghmi,openstack/pyghmi,benoit-canet/pyghmi,stackforge/pyghmi
Break out constantns to a diff file
payload_types = { 'ipmi': 0x0, 'sol' : 0x1, 'rmcpplusopenreq': 0x10, 'rmcpplusopenresponse': 0x11, 'rakp1': 0x12, 'rakp2': 0x13, 'rakp3': 0x14, 'rakp4': 0x15, } rmcp_codes = { 1: 'Insufficient resources to create new session (wait for existing sessions to timeout)', 2: 'Invalid Session ID', 3: 'Invalid payload type', 4: 'Invalid authentication algorithm', 5: 'Invalid integrity algorithm', 6: 'No matching integrity payload', 7: 'No matching integrity payload', 8: 'Inactive Session ID', 9: 'Invalid role', 0xa: 'Unauthorized role or privilege level requested', 0xb: 'Insufficient resources tocreate a session at the requested role', 0xc: 'Invalid username length', 0xd: 'Unauthorized name', 0xe: 'Unauthorized GUID', 0xf: 'Invalid integrity check value', 0x10: 'Invalid confidentiality algorithm', 0x11: 'No Cipher suite match with proposed security algorithms', 0x12: 'Illegal or unrecognized parameter', }
<commit_before><commit_msg>Break out constantns to a diff file<commit_after>
payload_types = { 'ipmi': 0x0, 'sol' : 0x1, 'rmcpplusopenreq': 0x10, 'rmcpplusopenresponse': 0x11, 'rakp1': 0x12, 'rakp2': 0x13, 'rakp3': 0x14, 'rakp4': 0x15, } rmcp_codes = { 1: 'Insufficient resources to create new session (wait for existing sessions to timeout)', 2: 'Invalid Session ID', 3: 'Invalid payload type', 4: 'Invalid authentication algorithm', 5: 'Invalid integrity algorithm', 6: 'No matching integrity payload', 7: 'No matching integrity payload', 8: 'Inactive Session ID', 9: 'Invalid role', 0xa: 'Unauthorized role or privilege level requested', 0xb: 'Insufficient resources tocreate a session at the requested role', 0xc: 'Invalid username length', 0xd: 'Unauthorized name', 0xe: 'Unauthorized GUID', 0xf: 'Invalid integrity check value', 0x10: 'Invalid confidentiality algorithm', 0x11: 'No Cipher suite match with proposed security algorithms', 0x12: 'Illegal or unrecognized parameter', }
Break out constantns to a diff file payload_types = { 'ipmi': 0x0, 'sol' : 0x1, 'rmcpplusopenreq': 0x10, 'rmcpplusopenresponse': 0x11, 'rakp1': 0x12, 'rakp2': 0x13, 'rakp3': 0x14, 'rakp4': 0x15, } rmcp_codes = { 1: 'Insufficient resources to create new session (wait for existing sessions to timeout)', 2: 'Invalid Session ID', 3: 'Invalid payload type', 4: 'Invalid authentication algorithm', 5: 'Invalid integrity algorithm', 6: 'No matching integrity payload', 7: 'No matching integrity payload', 8: 'Inactive Session ID', 9: 'Invalid role', 0xa: 'Unauthorized role or privilege level requested', 0xb: 'Insufficient resources tocreate a session at the requested role', 0xc: 'Invalid username length', 0xd: 'Unauthorized name', 0xe: 'Unauthorized GUID', 0xf: 'Invalid integrity check value', 0x10: 'Invalid confidentiality algorithm', 0x11: 'No Cipher suite match with proposed security algorithms', 0x12: 'Illegal or unrecognized parameter', }
<commit_before><commit_msg>Break out constantns to a diff file<commit_after> payload_types = { 'ipmi': 0x0, 'sol' : 0x1, 'rmcpplusopenreq': 0x10, 'rmcpplusopenresponse': 0x11, 'rakp1': 0x12, 'rakp2': 0x13, 'rakp3': 0x14, 'rakp4': 0x15, } rmcp_codes = { 1: 'Insufficient resources to create new session (wait for existing sessions to timeout)', 2: 'Invalid Session ID', 3: 'Invalid payload type', 4: 'Invalid authentication algorithm', 5: 'Invalid integrity algorithm', 6: 'No matching integrity payload', 7: 'No matching integrity payload', 8: 'Inactive Session ID', 9: 'Invalid role', 0xa: 'Unauthorized role or privilege level requested', 0xb: 'Insufficient resources tocreate a session at the requested role', 0xc: 'Invalid username length', 0xd: 'Unauthorized name', 0xe: 'Unauthorized GUID', 0xf: 'Invalid integrity check value', 0x10: 'Invalid confidentiality algorithm', 0x11: 'No Cipher suite match with proposed security algorithms', 0x12: 'Illegal or unrecognized parameter', }
e3ba99b74661f1b22340394b7e2b058371a0ed0e
rst2pdf/tests/input/test_180.py
rst2pdf/tests/input/test_180.py
# -*- coding: utf-8 -*- from reportlab.platypus import SimpleDocTemplate from reportlab.platypus.paragraph import Paragraph from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle from reportlab.lib.colors import Color from reportlab.platypus.flowables import _listWrapOn, _FUZZ from wordaxe.rl.NewParagraph import Paragraph from wordaxe.rl.styles import ParagraphStyle, getSampleStyleSheet def go(): styles = getSampleStyleSheet() style=styles['Normal'] p1 = Paragraph('This is a paragraph', style ) print p1.wrap(500,701) print p1._cache['avail'] print len(p1.split(500,701)) print len(p1.split(500,700)) go()
Test case for wordaxe bug
Test case for wordaxe bug
Python
mit
rafaelmartins/rst2pdf,rafaelmartins/rst2pdf
Test case for wordaxe bug
# -*- coding: utf-8 -*- from reportlab.platypus import SimpleDocTemplate from reportlab.platypus.paragraph import Paragraph from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle from reportlab.lib.colors import Color from reportlab.platypus.flowables import _listWrapOn, _FUZZ from wordaxe.rl.NewParagraph import Paragraph from wordaxe.rl.styles import ParagraphStyle, getSampleStyleSheet def go(): styles = getSampleStyleSheet() style=styles['Normal'] p1 = Paragraph('This is a paragraph', style ) print p1.wrap(500,701) print p1._cache['avail'] print len(p1.split(500,701)) print len(p1.split(500,700)) go()
<commit_before><commit_msg>Test case for wordaxe bug<commit_after>
# -*- coding: utf-8 -*- from reportlab.platypus import SimpleDocTemplate from reportlab.platypus.paragraph import Paragraph from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle from reportlab.lib.colors import Color from reportlab.platypus.flowables import _listWrapOn, _FUZZ from wordaxe.rl.NewParagraph import Paragraph from wordaxe.rl.styles import ParagraphStyle, getSampleStyleSheet def go(): styles = getSampleStyleSheet() style=styles['Normal'] p1 = Paragraph('This is a paragraph', style ) print p1.wrap(500,701) print p1._cache['avail'] print len(p1.split(500,701)) print len(p1.split(500,700)) go()
Test case for wordaxe bug# -*- coding: utf-8 -*- from reportlab.platypus import SimpleDocTemplate from reportlab.platypus.paragraph import Paragraph from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle from reportlab.lib.colors import Color from reportlab.platypus.flowables import _listWrapOn, _FUZZ from wordaxe.rl.NewParagraph import Paragraph from wordaxe.rl.styles import ParagraphStyle, getSampleStyleSheet def go(): styles = getSampleStyleSheet() style=styles['Normal'] p1 = Paragraph('This is a paragraph', style ) print p1.wrap(500,701) print p1._cache['avail'] print len(p1.split(500,701)) print len(p1.split(500,700)) go()
<commit_before><commit_msg>Test case for wordaxe bug<commit_after># -*- coding: utf-8 -*- from reportlab.platypus import SimpleDocTemplate from reportlab.platypus.paragraph import Paragraph from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle from reportlab.lib.colors import Color from reportlab.platypus.flowables import _listWrapOn, _FUZZ from wordaxe.rl.NewParagraph import Paragraph from wordaxe.rl.styles import ParagraphStyle, getSampleStyleSheet def go(): styles = getSampleStyleSheet() style=styles['Normal'] p1 = Paragraph('This is a paragraph', style ) print p1.wrap(500,701) print p1._cache['avail'] print len(p1.split(500,701)) print len(p1.split(500,700)) go()
0b5d7edd0be2301ea6442c4537b09b9bd38957b2
string/test2.py
string/test2.py
#!/usr/local/bin/python width=input('Please input width: ') price_width=10 item_width=width-price_width header='%-*s%*s' format='%-*s%*.2f' print width*'=' print header %(item_width,'Item',price_width,'Price') print width*'-' print format%(item_width,'Apple',price_width,0.442) print format%(item_width,'Pears',price_width,0.367)
Add a string format test.
Add a string format test.
Python
apache-2.0
Vayne-Lover/Python
Add a string format test.
#!/usr/local/bin/python width=input('Please input width: ') price_width=10 item_width=width-price_width header='%-*s%*s' format='%-*s%*.2f' print width*'=' print header %(item_width,'Item',price_width,'Price') print width*'-' print format%(item_width,'Apple',price_width,0.442) print format%(item_width,'Pears',price_width,0.367)
<commit_before><commit_msg>Add a string format test.<commit_after>
#!/usr/local/bin/python width=input('Please input width: ') price_width=10 item_width=width-price_width header='%-*s%*s' format='%-*s%*.2f' print width*'=' print header %(item_width,'Item',price_width,'Price') print width*'-' print format%(item_width,'Apple',price_width,0.442) print format%(item_width,'Pears',price_width,0.367)
Add a string format test.#!/usr/local/bin/python width=input('Please input width: ') price_width=10 item_width=width-price_width header='%-*s%*s' format='%-*s%*.2f' print width*'=' print header %(item_width,'Item',price_width,'Price') print width*'-' print format%(item_width,'Apple',price_width,0.442) print format%(item_width,'Pears',price_width,0.367)
<commit_before><commit_msg>Add a string format test.<commit_after>#!/usr/local/bin/python width=input('Please input width: ') price_width=10 item_width=width-price_width header='%-*s%*s' format='%-*s%*.2f' print width*'=' print header %(item_width,'Item',price_width,'Price') print width*'-' print format%(item_width,'Apple',price_width,0.442) print format%(item_width,'Pears',price_width,0.367)
824c89ff8e6276271d6df57f5df80fbebc097ddc
backend/api_calls_test.py
backend/api_calls_test.py
#!/bin/python2.7 import mock import unittest import api_calls class FakeFirebase(object): def get(self, path, item): return None def put(self, path, item, data): return None def patch(self, path, data): return None class TestApiCalls(unittest.TestCase): def setUp(self): self.db = FakeFirebase() self.mdb = mock.create_autospec(FakeFirebase) def testValidateInputs(self): request = {} api_calls.ValidateInputs(request, self.db, [], []) # If we have a fooId, the value must start foo- request = {'gunId': 'gunFoo'} self.assertRaises(api_calls.InvalidInputError, api_calls.ValidateInputs, request, self.db, [], []) request = {'gunId': 'gun-Foo'} api_calls.ValidateInputs(request, self.db, [], []) def testRegister(self): """Register does a get/put and fails when the user already exists.""" self.mdb.get.return_value = None api_calls.Register({'userToken': 'foo'}, self.mdb) self.mdb.get.assert_called_once_with('/users/foo', 'a') self.mdb.put.assert_called_once_with('/users', 'foo', {'a': True}) self.mdb.get.return_value = True self.assertRaises(api_calls.InvalidInputError, api_calls.Register, {'userToken': 'foo'}, self.mdb) def testCreateGame(self): pass def testUpdateGame(self): pass def testCreateGroup(self): pass def testUpdateGroup(self): pass def testCreatePlayer(self): pass def testAddGun(self): pass def testAssignGun(self): pass def testUpdatePlayer(self): pass def testAddMission(self): pass def testUpdateMission(self): pass def testCreateChatRoom(self): pass def testAddPlayerToChat(self): pass def testSendChatMessage(self): pass def testAddRewardCategory(self): pass def testUpdateRewardCategory(self): pass def testAddReward(self): pass def testClaimReward(self): pass if __name__ == '__main__': unittest.main() # vim:ts=2:sw=2:expandtab
Add mostly empty unit tests
Add mostly empty unit tests
Python
apache-2.0
google/playhvz,google/playhvz,google/playhvz,google/playhvz,google/playhvz,google/playhvz,google/playhvz,google/playhvz
Add mostly empty unit tests
#!/bin/python2.7 import mock import unittest import api_calls class FakeFirebase(object): def get(self, path, item): return None def put(self, path, item, data): return None def patch(self, path, data): return None class TestApiCalls(unittest.TestCase): def setUp(self): self.db = FakeFirebase() self.mdb = mock.create_autospec(FakeFirebase) def testValidateInputs(self): request = {} api_calls.ValidateInputs(request, self.db, [], []) # If we have a fooId, the value must start foo- request = {'gunId': 'gunFoo'} self.assertRaises(api_calls.InvalidInputError, api_calls.ValidateInputs, request, self.db, [], []) request = {'gunId': 'gun-Foo'} api_calls.ValidateInputs(request, self.db, [], []) def testRegister(self): """Register does a get/put and fails when the user already exists.""" self.mdb.get.return_value = None api_calls.Register({'userToken': 'foo'}, self.mdb) self.mdb.get.assert_called_once_with('/users/foo', 'a') self.mdb.put.assert_called_once_with('/users', 'foo', {'a': True}) self.mdb.get.return_value = True self.assertRaises(api_calls.InvalidInputError, api_calls.Register, {'userToken': 'foo'}, self.mdb) def testCreateGame(self): pass def testUpdateGame(self): pass def testCreateGroup(self): pass def testUpdateGroup(self): pass def testCreatePlayer(self): pass def testAddGun(self): pass def testAssignGun(self): pass def testUpdatePlayer(self): pass def testAddMission(self): pass def testUpdateMission(self): pass def testCreateChatRoom(self): pass def testAddPlayerToChat(self): pass def testSendChatMessage(self): pass def testAddRewardCategory(self): pass def testUpdateRewardCategory(self): pass def testAddReward(self): pass def testClaimReward(self): pass if __name__ == '__main__': unittest.main() # vim:ts=2:sw=2:expandtab
<commit_before><commit_msg>Add mostly empty unit tests<commit_after>
#!/bin/python2.7 import mock import unittest import api_calls class FakeFirebase(object): def get(self, path, item): return None def put(self, path, item, data): return None def patch(self, path, data): return None class TestApiCalls(unittest.TestCase): def setUp(self): self.db = FakeFirebase() self.mdb = mock.create_autospec(FakeFirebase) def testValidateInputs(self): request = {} api_calls.ValidateInputs(request, self.db, [], []) # If we have a fooId, the value must start foo- request = {'gunId': 'gunFoo'} self.assertRaises(api_calls.InvalidInputError, api_calls.ValidateInputs, request, self.db, [], []) request = {'gunId': 'gun-Foo'} api_calls.ValidateInputs(request, self.db, [], []) def testRegister(self): """Register does a get/put and fails when the user already exists.""" self.mdb.get.return_value = None api_calls.Register({'userToken': 'foo'}, self.mdb) self.mdb.get.assert_called_once_with('/users/foo', 'a') self.mdb.put.assert_called_once_with('/users', 'foo', {'a': True}) self.mdb.get.return_value = True self.assertRaises(api_calls.InvalidInputError, api_calls.Register, {'userToken': 'foo'}, self.mdb) def testCreateGame(self): pass def testUpdateGame(self): pass def testCreateGroup(self): pass def testUpdateGroup(self): pass def testCreatePlayer(self): pass def testAddGun(self): pass def testAssignGun(self): pass def testUpdatePlayer(self): pass def testAddMission(self): pass def testUpdateMission(self): pass def testCreateChatRoom(self): pass def testAddPlayerToChat(self): pass def testSendChatMessage(self): pass def testAddRewardCategory(self): pass def testUpdateRewardCategory(self): pass def testAddReward(self): pass def testClaimReward(self): pass if __name__ == '__main__': unittest.main() # vim:ts=2:sw=2:expandtab
Add mostly empty unit tests#!/bin/python2.7 import mock import unittest import api_calls class FakeFirebase(object): def get(self, path, item): return None def put(self, path, item, data): return None def patch(self, path, data): return None class TestApiCalls(unittest.TestCase): def setUp(self): self.db = FakeFirebase() self.mdb = mock.create_autospec(FakeFirebase) def testValidateInputs(self): request = {} api_calls.ValidateInputs(request, self.db, [], []) # If we have a fooId, the value must start foo- request = {'gunId': 'gunFoo'} self.assertRaises(api_calls.InvalidInputError, api_calls.ValidateInputs, request, self.db, [], []) request = {'gunId': 'gun-Foo'} api_calls.ValidateInputs(request, self.db, [], []) def testRegister(self): """Register does a get/put and fails when the user already exists.""" self.mdb.get.return_value = None api_calls.Register({'userToken': 'foo'}, self.mdb) self.mdb.get.assert_called_once_with('/users/foo', 'a') self.mdb.put.assert_called_once_with('/users', 'foo', {'a': True}) self.mdb.get.return_value = True self.assertRaises(api_calls.InvalidInputError, api_calls.Register, {'userToken': 'foo'}, self.mdb) def testCreateGame(self): pass def testUpdateGame(self): pass def testCreateGroup(self): pass def testUpdateGroup(self): pass def testCreatePlayer(self): pass def testAddGun(self): pass def testAssignGun(self): pass def testUpdatePlayer(self): pass def testAddMission(self): pass def testUpdateMission(self): pass def testCreateChatRoom(self): pass def testAddPlayerToChat(self): pass def testSendChatMessage(self): pass def testAddRewardCategory(self): pass def testUpdateRewardCategory(self): pass def testAddReward(self): pass def testClaimReward(self): pass if __name__ == '__main__': unittest.main() # vim:ts=2:sw=2:expandtab
<commit_before><commit_msg>Add mostly empty unit tests<commit_after>#!/bin/python2.7 import mock import unittest import api_calls class FakeFirebase(object): def get(self, path, item): return None def put(self, path, item, data): return None def patch(self, path, data): return None class TestApiCalls(unittest.TestCase): def setUp(self): self.db = FakeFirebase() self.mdb = mock.create_autospec(FakeFirebase) def testValidateInputs(self): request = {} api_calls.ValidateInputs(request, self.db, [], []) # If we have a fooId, the value must start foo- request = {'gunId': 'gunFoo'} self.assertRaises(api_calls.InvalidInputError, api_calls.ValidateInputs, request, self.db, [], []) request = {'gunId': 'gun-Foo'} api_calls.ValidateInputs(request, self.db, [], []) def testRegister(self): """Register does a get/put and fails when the user already exists.""" self.mdb.get.return_value = None api_calls.Register({'userToken': 'foo'}, self.mdb) self.mdb.get.assert_called_once_with('/users/foo', 'a') self.mdb.put.assert_called_once_with('/users', 'foo', {'a': True}) self.mdb.get.return_value = True self.assertRaises(api_calls.InvalidInputError, api_calls.Register, {'userToken': 'foo'}, self.mdb) def testCreateGame(self): pass def testUpdateGame(self): pass def testCreateGroup(self): pass def testUpdateGroup(self): pass def testCreatePlayer(self): pass def testAddGun(self): pass def testAssignGun(self): pass def testUpdatePlayer(self): pass def testAddMission(self): pass def testUpdateMission(self): pass def testCreateChatRoom(self): pass def testAddPlayerToChat(self): pass def testSendChatMessage(self): pass def testAddRewardCategory(self): pass def testUpdateRewardCategory(self): pass def testAddReward(self): pass def testClaimReward(self): pass if __name__ == '__main__': unittest.main() # vim:ts=2:sw=2:expandtab
606018600e1a6162ff343af10b706d62dba705a2
webrtc/api/android/PRESUBMIT.py
webrtc/api/android/PRESUBMIT.py
# Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license # that can be found in the LICENSE file in the root of the source # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. def CheckChangeOnUpload(input_api, output_api): results = [] results.extend(CheckPatchFormatted(input_api, output_api)) return results def CheckPatchFormatted(input_api, output_api): import git_cl cmd = ['cl', 'format', '--dry-run', input_api.PresubmitLocalPath()] code, _ = git_cl.RunGitWithCode(cmd, suppress_stderr=True) if code == 2: short_path = input_api.basename(input_api.PresubmitLocalPath()) full_path = input_api.os_path.relpath(input_api.PresubmitLocalPath(), input_api.change.RepositoryRoot()) return [output_api.PresubmitPromptWarning( 'The %s directory requires source formatting. ' 'Please run git cl format %s' % (short_path, full_path))] # As this is just a warning, ignore all other errors if the user # happens to have a broken clang-format, doesn't use git, etc etc. return []
Add presubmit format requirement for webrtc/api/android
Add presubmit format requirement for webrtc/api/android BUG=webrtc:6419 NOTRY=True TBR=kjellander@webrtc.org Review-Url: https://codereview.webrtc.org/2377113003 Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#14435}
Python
bsd-3-clause
ShiftMediaProject/libilbc,TimothyGu/libilbc,ShiftMediaProject/libilbc,TimothyGu/libilbc,TimothyGu/libilbc,TimothyGu/libilbc,ShiftMediaProject/libilbc,TimothyGu/libilbc,ShiftMediaProject/libilbc,ShiftMediaProject/libilbc
Add presubmit format requirement for webrtc/api/android BUG=webrtc:6419 NOTRY=True TBR=kjellander@webrtc.org Review-Url: https://codereview.webrtc.org/2377113003 Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#14435}
# Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license # that can be found in the LICENSE file in the root of the source # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. def CheckChangeOnUpload(input_api, output_api): results = [] results.extend(CheckPatchFormatted(input_api, output_api)) return results def CheckPatchFormatted(input_api, output_api): import git_cl cmd = ['cl', 'format', '--dry-run', input_api.PresubmitLocalPath()] code, _ = git_cl.RunGitWithCode(cmd, suppress_stderr=True) if code == 2: short_path = input_api.basename(input_api.PresubmitLocalPath()) full_path = input_api.os_path.relpath(input_api.PresubmitLocalPath(), input_api.change.RepositoryRoot()) return [output_api.PresubmitPromptWarning( 'The %s directory requires source formatting. ' 'Please run git cl format %s' % (short_path, full_path))] # As this is just a warning, ignore all other errors if the user # happens to have a broken clang-format, doesn't use git, etc etc. return []
<commit_before><commit_msg>Add presubmit format requirement for webrtc/api/android BUG=webrtc:6419 NOTRY=True TBR=kjellander@webrtc.org Review-Url: https://codereview.webrtc.org/2377113003 Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#14435}<commit_after>
# Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license # that can be found in the LICENSE file in the root of the source # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. def CheckChangeOnUpload(input_api, output_api): results = [] results.extend(CheckPatchFormatted(input_api, output_api)) return results def CheckPatchFormatted(input_api, output_api): import git_cl cmd = ['cl', 'format', '--dry-run', input_api.PresubmitLocalPath()] code, _ = git_cl.RunGitWithCode(cmd, suppress_stderr=True) if code == 2: short_path = input_api.basename(input_api.PresubmitLocalPath()) full_path = input_api.os_path.relpath(input_api.PresubmitLocalPath(), input_api.change.RepositoryRoot()) return [output_api.PresubmitPromptWarning( 'The %s directory requires source formatting. ' 'Please run git cl format %s' % (short_path, full_path))] # As this is just a warning, ignore all other errors if the user # happens to have a broken clang-format, doesn't use git, etc etc. return []
Add presubmit format requirement for webrtc/api/android BUG=webrtc:6419 NOTRY=True TBR=kjellander@webrtc.org Review-Url: https://codereview.webrtc.org/2377113003 Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#14435}# Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license # that can be found in the LICENSE file in the root of the source # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. def CheckChangeOnUpload(input_api, output_api): results = [] results.extend(CheckPatchFormatted(input_api, output_api)) return results def CheckPatchFormatted(input_api, output_api): import git_cl cmd = ['cl', 'format', '--dry-run', input_api.PresubmitLocalPath()] code, _ = git_cl.RunGitWithCode(cmd, suppress_stderr=True) if code == 2: short_path = input_api.basename(input_api.PresubmitLocalPath()) full_path = input_api.os_path.relpath(input_api.PresubmitLocalPath(), input_api.change.RepositoryRoot()) return [output_api.PresubmitPromptWarning( 'The %s directory requires source formatting. ' 'Please run git cl format %s' % (short_path, full_path))] # As this is just a warning, ignore all other errors if the user # happens to have a broken clang-format, doesn't use git, etc etc. return []
<commit_before><commit_msg>Add presubmit format requirement for webrtc/api/android BUG=webrtc:6419 NOTRY=True TBR=kjellander@webrtc.org Review-Url: https://codereview.webrtc.org/2377113003 Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#14435}<commit_after># Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license # that can be found in the LICENSE file in the root of the source # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. def CheckChangeOnUpload(input_api, output_api): results = [] results.extend(CheckPatchFormatted(input_api, output_api)) return results def CheckPatchFormatted(input_api, output_api): import git_cl cmd = ['cl', 'format', '--dry-run', input_api.PresubmitLocalPath()] code, _ = git_cl.RunGitWithCode(cmd, suppress_stderr=True) if code == 2: short_path = input_api.basename(input_api.PresubmitLocalPath()) full_path = input_api.os_path.relpath(input_api.PresubmitLocalPath(), input_api.change.RepositoryRoot()) return [output_api.PresubmitPromptWarning( 'The %s directory requires source formatting. ' 'Please run git cl format %s' % (short_path, full_path))] # As this is just a warning, ignore all other errors if the user # happens to have a broken clang-format, doesn't use git, etc etc. return []
552289fd86446416eefec6356659a8333d091f79
util/plot_dh.py
util/plot_dh.py
#!/usr/bin/env python # # Script for plotting ECTester ECDH results. # # Example usage: # # > java -jar ECTesterReader.jar -dh 10000 -b 192 -fp -o dh.csv # ... # > ./plot_dh.py dh.csv # ... # import numpy as np import matplotlib.pyplot as plt import argparse from operator import itemgetter if __name__ == "__main__": parser = argparse.ArgumentParser(description="Plot ECTester ECDH timing.") parser.add_argument("-o", "--output", dest="output", type=argparse.FileType("wb"), help="Write image to [file], do not display.", metavar="file") parser.add_argument("file", type=str, help="The file to plot(csv).") opts = parser.parse_args() hx = lambda x: int(x, 16) data = np.genfromtxt(opts.file, delimiter=";", skip_header=1, converters={2: hx, 3: hx, 4: hx}, dtype=np.dtype([("index","u4"), ("time","u4"), ("pub", "O"), ("priv", "O"), ("secret","O")])) time_data = map(itemgetter(1), data) priv_data = map(itemgetter(2), data) pub_data = map(itemgetter(3), data) secret_data = map(itemgetter(4), data) fig = plt.figure(tight_layout=True) fig.suptitle(opts.file) axe_hist = fig.add_subplot(1,1,1) axe_hist.hist(time_data, bins=400, log=True) axe_hist.set_ylabel("count\n(log)") axe_hist.set_xlabel("time (ms)") if opts.output is None: plt.show() else: plt.savefig(opts.output, dpi=400)
Add script to plot ECDH timing.
Add script to plot ECDH timing.
Python
mit
petrs/ECTester,petrs/ECTester,petrs/ECTester
Add script to plot ECDH timing.
#!/usr/bin/env python # # Script for plotting ECTester ECDH results. # # Example usage: # # > java -jar ECTesterReader.jar -dh 10000 -b 192 -fp -o dh.csv # ... # > ./plot_dh.py dh.csv # ... # import numpy as np import matplotlib.pyplot as plt import argparse from operator import itemgetter if __name__ == "__main__": parser = argparse.ArgumentParser(description="Plot ECTester ECDH timing.") parser.add_argument("-o", "--output", dest="output", type=argparse.FileType("wb"), help="Write image to [file], do not display.", metavar="file") parser.add_argument("file", type=str, help="The file to plot(csv).") opts = parser.parse_args() hx = lambda x: int(x, 16) data = np.genfromtxt(opts.file, delimiter=";", skip_header=1, converters={2: hx, 3: hx, 4: hx}, dtype=np.dtype([("index","u4"), ("time","u4"), ("pub", "O"), ("priv", "O"), ("secret","O")])) time_data = map(itemgetter(1), data) priv_data = map(itemgetter(2), data) pub_data = map(itemgetter(3), data) secret_data = map(itemgetter(4), data) fig = plt.figure(tight_layout=True) fig.suptitle(opts.file) axe_hist = fig.add_subplot(1,1,1) axe_hist.hist(time_data, bins=400, log=True) axe_hist.set_ylabel("count\n(log)") axe_hist.set_xlabel("time (ms)") if opts.output is None: plt.show() else: plt.savefig(opts.output, dpi=400)
<commit_before><commit_msg>Add script to plot ECDH timing.<commit_after>
#!/usr/bin/env python # # Script for plotting ECTester ECDH results. # # Example usage: # # > java -jar ECTesterReader.jar -dh 10000 -b 192 -fp -o dh.csv # ... # > ./plot_dh.py dh.csv # ... # import numpy as np import matplotlib.pyplot as plt import argparse from operator import itemgetter if __name__ == "__main__": parser = argparse.ArgumentParser(description="Plot ECTester ECDH timing.") parser.add_argument("-o", "--output", dest="output", type=argparse.FileType("wb"), help="Write image to [file], do not display.", metavar="file") parser.add_argument("file", type=str, help="The file to plot(csv).") opts = parser.parse_args() hx = lambda x: int(x, 16) data = np.genfromtxt(opts.file, delimiter=";", skip_header=1, converters={2: hx, 3: hx, 4: hx}, dtype=np.dtype([("index","u4"), ("time","u4"), ("pub", "O"), ("priv", "O"), ("secret","O")])) time_data = map(itemgetter(1), data) priv_data = map(itemgetter(2), data) pub_data = map(itemgetter(3), data) secret_data = map(itemgetter(4), data) fig = plt.figure(tight_layout=True) fig.suptitle(opts.file) axe_hist = fig.add_subplot(1,1,1) axe_hist.hist(time_data, bins=400, log=True) axe_hist.set_ylabel("count\n(log)") axe_hist.set_xlabel("time (ms)") if opts.output is None: plt.show() else: plt.savefig(opts.output, dpi=400)
Add script to plot ECDH timing.#!/usr/bin/env python # # Script for plotting ECTester ECDH results. # # Example usage: # # > java -jar ECTesterReader.jar -dh 10000 -b 192 -fp -o dh.csv # ... # > ./plot_dh.py dh.csv # ... # import numpy as np import matplotlib.pyplot as plt import argparse from operator import itemgetter if __name__ == "__main__": parser = argparse.ArgumentParser(description="Plot ECTester ECDH timing.") parser.add_argument("-o", "--output", dest="output", type=argparse.FileType("wb"), help="Write image to [file], do not display.", metavar="file") parser.add_argument("file", type=str, help="The file to plot(csv).") opts = parser.parse_args() hx = lambda x: int(x, 16) data = np.genfromtxt(opts.file, delimiter=";", skip_header=1, converters={2: hx, 3: hx, 4: hx}, dtype=np.dtype([("index","u4"), ("time","u4"), ("pub", "O"), ("priv", "O"), ("secret","O")])) time_data = map(itemgetter(1), data) priv_data = map(itemgetter(2), data) pub_data = map(itemgetter(3), data) secret_data = map(itemgetter(4), data) fig = plt.figure(tight_layout=True) fig.suptitle(opts.file) axe_hist = fig.add_subplot(1,1,1) axe_hist.hist(time_data, bins=400, log=True) axe_hist.set_ylabel("count\n(log)") axe_hist.set_xlabel("time (ms)") if opts.output is None: plt.show() else: plt.savefig(opts.output, dpi=400)
<commit_before><commit_msg>Add script to plot ECDH timing.<commit_after>#!/usr/bin/env python # # Script for plotting ECTester ECDH results. # # Example usage: # # > java -jar ECTesterReader.jar -dh 10000 -b 192 -fp -o dh.csv # ... # > ./plot_dh.py dh.csv # ... # import numpy as np import matplotlib.pyplot as plt import argparse from operator import itemgetter if __name__ == "__main__": parser = argparse.ArgumentParser(description="Plot ECTester ECDH timing.") parser.add_argument("-o", "--output", dest="output", type=argparse.FileType("wb"), help="Write image to [file], do not display.", metavar="file") parser.add_argument("file", type=str, help="The file to plot(csv).") opts = parser.parse_args() hx = lambda x: int(x, 16) data = np.genfromtxt(opts.file, delimiter=";", skip_header=1, converters={2: hx, 3: hx, 4: hx}, dtype=np.dtype([("index","u4"), ("time","u4"), ("pub", "O"), ("priv", "O"), ("secret","O")])) time_data = map(itemgetter(1), data) priv_data = map(itemgetter(2), data) pub_data = map(itemgetter(3), data) secret_data = map(itemgetter(4), data) fig = plt.figure(tight_layout=True) fig.suptitle(opts.file) axe_hist = fig.add_subplot(1,1,1) axe_hist.hist(time_data, bins=400, log=True) axe_hist.set_ylabel("count\n(log)") axe_hist.set_xlabel("time (ms)") if opts.output is None: plt.show() else: plt.savefig(opts.output, dpi=400)
cfb7d50943a2d410d088b83b1d601305b6eb9283
i8c/tests/test_empty_function.py
i8c/tests/test_empty_function.py
from i8c.tests import TestCase SOURCE = "define test::pretty_minimal" class TestEmptyFunction(TestCase): def test_empty_function(self): """Check that empty functions can be compiled.""" tree, output = self.compile(SOURCE) self.assertEqual([], output.operations)
Check that empty functions can be compiled
Check that empty functions can be compiled
Python
lgpl-2.1
gbenson/i8c
Check that empty functions can be compiled
from i8c.tests import TestCase SOURCE = "define test::pretty_minimal" class TestEmptyFunction(TestCase): def test_empty_function(self): """Check that empty functions can be compiled.""" tree, output = self.compile(SOURCE) self.assertEqual([], output.operations)
<commit_before><commit_msg>Check that empty functions can be compiled<commit_after>
from i8c.tests import TestCase SOURCE = "define test::pretty_minimal" class TestEmptyFunction(TestCase): def test_empty_function(self): """Check that empty functions can be compiled.""" tree, output = self.compile(SOURCE) self.assertEqual([], output.operations)
Check that empty functions can be compiledfrom i8c.tests import TestCase SOURCE = "define test::pretty_minimal" class TestEmptyFunction(TestCase): def test_empty_function(self): """Check that empty functions can be compiled.""" tree, output = self.compile(SOURCE) self.assertEqual([], output.operations)
<commit_before><commit_msg>Check that empty functions can be compiled<commit_after>from i8c.tests import TestCase SOURCE = "define test::pretty_minimal" class TestEmptyFunction(TestCase): def test_empty_function(self): """Check that empty functions can be compiled.""" tree, output = self.compile(SOURCE) self.assertEqual([], output.operations)
408ba2172deee7d775d37f22e9f7f377682940fa
heat/tests/functional/test_WordPress_With_LB.py
heat/tests/functional/test_WordPress_With_LB.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # import util import verify import nose from nose.plugins.attrib import attr import unittest @attr(speed='slow') @attr(tag=['func', 'wordpress', 'LB', 'WordPress_With_LB.template']) class WordPressWithLBFunctionalTest(unittest.TestCase): def setUp(self): template = 'WordPress_With_LB.template' self.func_utils = util.FuncUtils() self.func_utils.prepare_jeos('F17', 'x86_64', 'cfntools') self.func_utils.create_stack(template, 'F17') self.func_utils.check_cfntools() self.func_utils.wait_for_provisioning() self.func_utils.check_user_data(template) self.ssh = self.func_utils.get_ssh_client() def test_instance(self): # ensure wordpress was installed by checking for expected # configuration file over ssh wp_file = '/etc/wordpress/wp-config.php' stdin, stdout, sterr = self.ssh.exec_command('ls ' + wp_file) result = stdout.readlines().pop().rstrip() self.assertTrue(result == wp_file) print "Wordpress installation detected" # Verify the output URL parses as expected, ie check that # the wordpress installation is operational stack_url = self.func_utils.get_stack_output("WebsiteURL") print "Got stack output WebsiteURL=%s, verifying" % stack_url ver = verify.VerifyStack() self.assertTrue(ver.verify_wordpress(stack_url)) self.func_utils.cleanup()
Add test case for WordPress_With_LB.template
Add test case for WordPress_With_LB.template Change-Id: I324da126b5a775a00b97d868cbd347dd65cb16aa Signed-off-by: Steven Dake <8638f3fce5db0278cfbc239bd581dfc00c29ec9d@redhat.com>
Python
apache-2.0
jasondunsmore/heat,cryptickp/heat,maestro-hybrid-cloud/heat,citrix-openstack-build/heat,jasondunsmore/heat,steveb/heat,JioCloud/heat,srznew/heat,srznew/heat,NeCTAR-RC/heat,takeshineshiro/heat,openstack/heat,noironetworks/heat,pratikmallya/heat,Triv90/Heat,takeshineshiro/heat,miguelgrinberg/heat,rickerc/heat_audit,gonzolino/heat,citrix-openstack-build/heat,cwolferh/heat-scratch,redhat-openstack/heat,dragorosson/heat,ntt-sic/heat,rdo-management/heat,cwolferh/heat-scratch,maestro-hybrid-cloud/heat,varunarya10/heat,dims/heat,Triv90/Heat,rdo-management/heat,dragorosson/heat,JioCloud/heat,gonzolino/heat,rickerc/heat_audit,NeCTAR-RC/heat,steveb/heat,noironetworks/heat,pshchelo/heat,miguelgrinberg/heat,openstack/heat,dims/heat,cryptickp/heat,pshchelo/heat,rh-s/heat,redhat-openstack/heat,ntt-sic/heat,pratikmallya/heat,Triv90/Heat,rh-s/heat,varunarya10/heat
Add test case for WordPress_With_LB.template Change-Id: I324da126b5a775a00b97d868cbd347dd65cb16aa Signed-off-by: Steven Dake <8638f3fce5db0278cfbc239bd581dfc00c29ec9d@redhat.com>
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # import util import verify import nose from nose.plugins.attrib import attr import unittest @attr(speed='slow') @attr(tag=['func', 'wordpress', 'LB', 'WordPress_With_LB.template']) class WordPressWithLBFunctionalTest(unittest.TestCase): def setUp(self): template = 'WordPress_With_LB.template' self.func_utils = util.FuncUtils() self.func_utils.prepare_jeos('F17', 'x86_64', 'cfntools') self.func_utils.create_stack(template, 'F17') self.func_utils.check_cfntools() self.func_utils.wait_for_provisioning() self.func_utils.check_user_data(template) self.ssh = self.func_utils.get_ssh_client() def test_instance(self): # ensure wordpress was installed by checking for expected # configuration file over ssh wp_file = '/etc/wordpress/wp-config.php' stdin, stdout, sterr = self.ssh.exec_command('ls ' + wp_file) result = stdout.readlines().pop().rstrip() self.assertTrue(result == wp_file) print "Wordpress installation detected" # Verify the output URL parses as expected, ie check that # the wordpress installation is operational stack_url = self.func_utils.get_stack_output("WebsiteURL") print "Got stack output WebsiteURL=%s, verifying" % stack_url ver = verify.VerifyStack() self.assertTrue(ver.verify_wordpress(stack_url)) self.func_utils.cleanup()
<commit_before><commit_msg>Add test case for WordPress_With_LB.template Change-Id: I324da126b5a775a00b97d868cbd347dd65cb16aa Signed-off-by: Steven Dake <8638f3fce5db0278cfbc239bd581dfc00c29ec9d@redhat.com><commit_after>
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # import util import verify import nose from nose.plugins.attrib import attr import unittest @attr(speed='slow') @attr(tag=['func', 'wordpress', 'LB', 'WordPress_With_LB.template']) class WordPressWithLBFunctionalTest(unittest.TestCase): def setUp(self): template = 'WordPress_With_LB.template' self.func_utils = util.FuncUtils() self.func_utils.prepare_jeos('F17', 'x86_64', 'cfntools') self.func_utils.create_stack(template, 'F17') self.func_utils.check_cfntools() self.func_utils.wait_for_provisioning() self.func_utils.check_user_data(template) self.ssh = self.func_utils.get_ssh_client() def test_instance(self): # ensure wordpress was installed by checking for expected # configuration file over ssh wp_file = '/etc/wordpress/wp-config.php' stdin, stdout, sterr = self.ssh.exec_command('ls ' + wp_file) result = stdout.readlines().pop().rstrip() self.assertTrue(result == wp_file) print "Wordpress installation detected" # Verify the output URL parses as expected, ie check that # the wordpress installation is operational stack_url = self.func_utils.get_stack_output("WebsiteURL") print "Got stack output WebsiteURL=%s, verifying" % stack_url ver = verify.VerifyStack() self.assertTrue(ver.verify_wordpress(stack_url)) self.func_utils.cleanup()
Add test case for WordPress_With_LB.template Change-Id: I324da126b5a775a00b97d868cbd347dd65cb16aa Signed-off-by: Steven Dake <8638f3fce5db0278cfbc239bd581dfc00c29ec9d@redhat.com># vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # import util import verify import nose from nose.plugins.attrib import attr import unittest @attr(speed='slow') @attr(tag=['func', 'wordpress', 'LB', 'WordPress_With_LB.template']) class WordPressWithLBFunctionalTest(unittest.TestCase): def setUp(self): template = 'WordPress_With_LB.template' self.func_utils = util.FuncUtils() self.func_utils.prepare_jeos('F17', 'x86_64', 'cfntools') self.func_utils.create_stack(template, 'F17') self.func_utils.check_cfntools() self.func_utils.wait_for_provisioning() self.func_utils.check_user_data(template) self.ssh = self.func_utils.get_ssh_client() def test_instance(self): # ensure wordpress was installed by checking for expected # configuration file over ssh wp_file = '/etc/wordpress/wp-config.php' stdin, stdout, sterr = self.ssh.exec_command('ls ' + wp_file) result = stdout.readlines().pop().rstrip() self.assertTrue(result == wp_file) print "Wordpress installation detected" # Verify the output URL parses as expected, ie check that # the wordpress installation is operational stack_url = self.func_utils.get_stack_output("WebsiteURL") print "Got stack output WebsiteURL=%s, verifying" % stack_url ver = verify.VerifyStack() self.assertTrue(ver.verify_wordpress(stack_url)) self.func_utils.cleanup()
<commit_before><commit_msg>Add test case for WordPress_With_LB.template Change-Id: I324da126b5a775a00b97d868cbd347dd65cb16aa Signed-off-by: Steven Dake <8638f3fce5db0278cfbc239bd581dfc00c29ec9d@redhat.com><commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # import util import verify import nose from nose.plugins.attrib import attr import unittest @attr(speed='slow') @attr(tag=['func', 'wordpress', 'LB', 'WordPress_With_LB.template']) class WordPressWithLBFunctionalTest(unittest.TestCase): def setUp(self): template = 'WordPress_With_LB.template' self.func_utils = util.FuncUtils() self.func_utils.prepare_jeos('F17', 'x86_64', 'cfntools') self.func_utils.create_stack(template, 'F17') self.func_utils.check_cfntools() self.func_utils.wait_for_provisioning() self.func_utils.check_user_data(template) self.ssh = self.func_utils.get_ssh_client() def test_instance(self): # ensure wordpress was installed by checking for expected # configuration file over ssh wp_file = '/etc/wordpress/wp-config.php' stdin, stdout, sterr = self.ssh.exec_command('ls ' + wp_file) result = stdout.readlines().pop().rstrip() self.assertTrue(result == wp_file) print "Wordpress installation detected" # Verify the output URL parses as expected, ie check that # the wordpress installation is operational stack_url = self.func_utils.get_stack_output("WebsiteURL") print "Got stack output WebsiteURL=%s, verifying" % stack_url ver = verify.VerifyStack() self.assertTrue(ver.verify_wordpress(stack_url)) self.func_utils.cleanup()
7824a38ee1038790adbc7209920a9112e2231bd2
src/toil/test/src/systemTest.py
src/toil/test/src/systemTest.py
import errno import multiprocessing import os import tempfile from functools import partial from toil.test import ToilTest class SystemTest(ToilTest): """ Test various assumptions about the operating system's behavior """ def testAtomicityOfNonEmptyDirectoryRenames(self): for _ in range(100): parent = self._createTempDir(purpose='parent') child = os.path.join(parent, 'child') # Use processes (as opposed to threads) to prevent GIL from ordering things artificially pool = multiprocessing.Pool() try: numTasks = multiprocessing.cpu_count() * 10 grandChildIds = pool.map_async( func=partial(_testAtomicityOfNonEmptyDirectoryRenamesTask, parent, child), iterable=range(numTasks)) grandChildIds = grandChildIds.get() finally: pool.close() pool.join() self.assertEquals(len(grandChildIds), numTasks) # Assert that we only had one winner grandChildIds = [n for n in grandChildIds if n is not None] self.assertEquals(len(grandChildIds), 1) # Assert that the winner's grandChild wasn't silently overwritten by a looser expectedGrandChildId = grandChildIds[0] actualGrandChild = os.path.join(child, 'grandChild') actualGrandChildId = os.stat(actualGrandChild).st_ino self.assertEquals(actualGrandChildId, expectedGrandChildId) def _testAtomicityOfNonEmptyDirectoryRenamesTask(parent, child, _): tmpChildDir = tempfile.mkdtemp(dir=parent, prefix='child', suffix='.tmp') grandChild = os.path.join(tmpChildDir, 'grandChild') open(grandChild, 'w').close() grandChildId = os.stat(grandChild).st_ino try: os.rename(tmpChildDir, child) except OSError as e: if e.errno == errno.ENOTEMPTY: os.unlink(grandChild) os.rmdir(tmpChildDir) return None else: raise else: # We won the race return grandChildId
Add test for atomicity of renames of non-empty directories
Add test for atomicity of renames of non-empty directories
Python
apache-2.0
BD2KGenomics/slugflow,BD2KGenomics/slugflow
Add test for atomicity of renames of non-empty directories
import errno import multiprocessing import os import tempfile from functools import partial from toil.test import ToilTest class SystemTest(ToilTest): """ Test various assumptions about the operating system's behavior """ def testAtomicityOfNonEmptyDirectoryRenames(self): for _ in range(100): parent = self._createTempDir(purpose='parent') child = os.path.join(parent, 'child') # Use processes (as opposed to threads) to prevent GIL from ordering things artificially pool = multiprocessing.Pool() try: numTasks = multiprocessing.cpu_count() * 10 grandChildIds = pool.map_async( func=partial(_testAtomicityOfNonEmptyDirectoryRenamesTask, parent, child), iterable=range(numTasks)) grandChildIds = grandChildIds.get() finally: pool.close() pool.join() self.assertEquals(len(grandChildIds), numTasks) # Assert that we only had one winner grandChildIds = [n for n in grandChildIds if n is not None] self.assertEquals(len(grandChildIds), 1) # Assert that the winner's grandChild wasn't silently overwritten by a looser expectedGrandChildId = grandChildIds[0] actualGrandChild = os.path.join(child, 'grandChild') actualGrandChildId = os.stat(actualGrandChild).st_ino self.assertEquals(actualGrandChildId, expectedGrandChildId) def _testAtomicityOfNonEmptyDirectoryRenamesTask(parent, child, _): tmpChildDir = tempfile.mkdtemp(dir=parent, prefix='child', suffix='.tmp') grandChild = os.path.join(tmpChildDir, 'grandChild') open(grandChild, 'w').close() grandChildId = os.stat(grandChild).st_ino try: os.rename(tmpChildDir, child) except OSError as e: if e.errno == errno.ENOTEMPTY: os.unlink(grandChild) os.rmdir(tmpChildDir) return None else: raise else: # We won the race return grandChildId
<commit_before><commit_msg>Add test for atomicity of renames of non-empty directories<commit_after>
import errno import multiprocessing import os import tempfile from functools import partial from toil.test import ToilTest class SystemTest(ToilTest): """ Test various assumptions about the operating system's behavior """ def testAtomicityOfNonEmptyDirectoryRenames(self): for _ in range(100): parent = self._createTempDir(purpose='parent') child = os.path.join(parent, 'child') # Use processes (as opposed to threads) to prevent GIL from ordering things artificially pool = multiprocessing.Pool() try: numTasks = multiprocessing.cpu_count() * 10 grandChildIds = pool.map_async( func=partial(_testAtomicityOfNonEmptyDirectoryRenamesTask, parent, child), iterable=range(numTasks)) grandChildIds = grandChildIds.get() finally: pool.close() pool.join() self.assertEquals(len(grandChildIds), numTasks) # Assert that we only had one winner grandChildIds = [n for n in grandChildIds if n is not None] self.assertEquals(len(grandChildIds), 1) # Assert that the winner's grandChild wasn't silently overwritten by a looser expectedGrandChildId = grandChildIds[0] actualGrandChild = os.path.join(child, 'grandChild') actualGrandChildId = os.stat(actualGrandChild).st_ino self.assertEquals(actualGrandChildId, expectedGrandChildId) def _testAtomicityOfNonEmptyDirectoryRenamesTask(parent, child, _): tmpChildDir = tempfile.mkdtemp(dir=parent, prefix='child', suffix='.tmp') grandChild = os.path.join(tmpChildDir, 'grandChild') open(grandChild, 'w').close() grandChildId = os.stat(grandChild).st_ino try: os.rename(tmpChildDir, child) except OSError as e: if e.errno == errno.ENOTEMPTY: os.unlink(grandChild) os.rmdir(tmpChildDir) return None else: raise else: # We won the race return grandChildId
Add test for atomicity of renames of non-empty directoriesimport errno import multiprocessing import os import tempfile from functools import partial from toil.test import ToilTest class SystemTest(ToilTest): """ Test various assumptions about the operating system's behavior """ def testAtomicityOfNonEmptyDirectoryRenames(self): for _ in range(100): parent = self._createTempDir(purpose='parent') child = os.path.join(parent, 'child') # Use processes (as opposed to threads) to prevent GIL from ordering things artificially pool = multiprocessing.Pool() try: numTasks = multiprocessing.cpu_count() * 10 grandChildIds = pool.map_async( func=partial(_testAtomicityOfNonEmptyDirectoryRenamesTask, parent, child), iterable=range(numTasks)) grandChildIds = grandChildIds.get() finally: pool.close() pool.join() self.assertEquals(len(grandChildIds), numTasks) # Assert that we only had one winner grandChildIds = [n for n in grandChildIds if n is not None] self.assertEquals(len(grandChildIds), 1) # Assert that the winner's grandChild wasn't silently overwritten by a looser expectedGrandChildId = grandChildIds[0] actualGrandChild = os.path.join(child, 'grandChild') actualGrandChildId = os.stat(actualGrandChild).st_ino self.assertEquals(actualGrandChildId, expectedGrandChildId) def _testAtomicityOfNonEmptyDirectoryRenamesTask(parent, child, _): tmpChildDir = tempfile.mkdtemp(dir=parent, prefix='child', suffix='.tmp') grandChild = os.path.join(tmpChildDir, 'grandChild') open(grandChild, 'w').close() grandChildId = os.stat(grandChild).st_ino try: os.rename(tmpChildDir, child) except OSError as e: if e.errno == errno.ENOTEMPTY: os.unlink(grandChild) os.rmdir(tmpChildDir) return None else: raise else: # We won the race return grandChildId
<commit_before><commit_msg>Add test for atomicity of renames of non-empty directories<commit_after>import errno import multiprocessing import os import tempfile from functools import partial from toil.test import ToilTest class SystemTest(ToilTest): """ Test various assumptions about the operating system's behavior """ def testAtomicityOfNonEmptyDirectoryRenames(self): for _ in range(100): parent = self._createTempDir(purpose='parent') child = os.path.join(parent, 'child') # Use processes (as opposed to threads) to prevent GIL from ordering things artificially pool = multiprocessing.Pool() try: numTasks = multiprocessing.cpu_count() * 10 grandChildIds = pool.map_async( func=partial(_testAtomicityOfNonEmptyDirectoryRenamesTask, parent, child), iterable=range(numTasks)) grandChildIds = grandChildIds.get() finally: pool.close() pool.join() self.assertEquals(len(grandChildIds), numTasks) # Assert that we only had one winner grandChildIds = [n for n in grandChildIds if n is not None] self.assertEquals(len(grandChildIds), 1) # Assert that the winner's grandChild wasn't silently overwritten by a looser expectedGrandChildId = grandChildIds[0] actualGrandChild = os.path.join(child, 'grandChild') actualGrandChildId = os.stat(actualGrandChild).st_ino self.assertEquals(actualGrandChildId, expectedGrandChildId) def _testAtomicityOfNonEmptyDirectoryRenamesTask(parent, child, _): tmpChildDir = tempfile.mkdtemp(dir=parent, prefix='child', suffix='.tmp') grandChild = os.path.join(tmpChildDir, 'grandChild') open(grandChild, 'w').close() grandChildId = os.stat(grandChild).st_ino try: os.rename(tmpChildDir, child) except OSError as e: if e.errno == errno.ENOTEMPTY: os.unlink(grandChild) os.rmdir(tmpChildDir) return None else: raise else: # We won the race return grandChildId
716553051e35bddfab67a28679f2a28c0a454ad3
oslo_concurrency/_i18n.py
oslo_concurrency/_i18n.py
# Copyright 2014 Mirantis Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo import i18n _translators = i18n.TranslatorFactory(domain='oslo_concurrency') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical
# Copyright 2014 Mirantis Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo import i18n _translators = i18n.TranslatorFactory(domain='oslo.concurrency') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical
Correct the translation domain for loading messages
Correct the translation domain for loading messages Change-Id: If7fa8fd1915378bda3fc6e361049c2d90cdec8af
Python
apache-2.0
varunarya10/oslo.concurrency,JioCloud/oslo.concurrency
# Copyright 2014 Mirantis Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo import i18n _translators = i18n.TranslatorFactory(domain='oslo_concurrency') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical Correct the translation domain for loading messages Change-Id: If7fa8fd1915378bda3fc6e361049c2d90cdec8af
# Copyright 2014 Mirantis Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo import i18n _translators = i18n.TranslatorFactory(domain='oslo.concurrency') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical
<commit_before># Copyright 2014 Mirantis Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo import i18n _translators = i18n.TranslatorFactory(domain='oslo_concurrency') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical <commit_msg>Correct the translation domain for loading messages Change-Id: If7fa8fd1915378bda3fc6e361049c2d90cdec8af<commit_after>
# Copyright 2014 Mirantis Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo import i18n _translators = i18n.TranslatorFactory(domain='oslo.concurrency') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical
# Copyright 2014 Mirantis Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo import i18n _translators = i18n.TranslatorFactory(domain='oslo_concurrency') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical Correct the translation domain for loading messages Change-Id: If7fa8fd1915378bda3fc6e361049c2d90cdec8af# Copyright 2014 Mirantis Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo import i18n _translators = i18n.TranslatorFactory(domain='oslo.concurrency') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical
<commit_before># Copyright 2014 Mirantis Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo import i18n _translators = i18n.TranslatorFactory(domain='oslo_concurrency') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical <commit_msg>Correct the translation domain for loading messages Change-Id: If7fa8fd1915378bda3fc6e361049c2d90cdec8af<commit_after># Copyright 2014 Mirantis Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo import i18n _translators = i18n.TranslatorFactory(domain='oslo.concurrency') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical
5d77e0d84e6b22c0d4accd895ae664736b7a27a5
lily/management/commands/tags.py
lily/management/commands/tags.py
from optparse import make_option from django.core.management.base import BaseCommand from lily.tags.models import Tag import difflib class Command(BaseCommand): help = """Find similar tags based on the difflib module, usefull to identify duplicates & typos.""" ratio_cut_off_default = 0.65 option_list = BaseCommand.option_list + ( make_option('-c', '--cutoff', action='store', dest='cutoff', default='', help='Choose similarity cut-off value. Decimal in range [0,1]. Defaults to 0.65.' ), ) def handle(self, *args, **options): if options['cutoff']: cutoff = float(options['cutoff']) else: cutoff = self.ratio_cut_off_default tags1 = Tag.objects.all() tags2 = Tag.objects.all() tag_list = [] # Loop over each tag and compare with all other tags. for tag1 in tags1: for tag2 in tags2: if ((tag1.name, tag2.name) not in tag_list) and ((tag2.name, tag1.name) not in tag_list): # Determine similarity ratio between the two tag names. diffl = difflib.SequenceMatcher(a=tag1.name, b=tag2.name).ratio() if diffl > cutoff and diffl != 1.0: # Encode & decode to handle special characters. # This is a work around for encoding problems in outputting to docker shell. n1 = tag1.name.encode('utf-8') n1 = n1.decode('ascii', 'ignore') n2 = tag2.name.encode('utf-8') n2 = n2.decode('ascii', 'ignore') tag_list.insert(0, (tag1.name, tag2.name)) print u"{0}\t{1}\t{2:.3f}".format(n1, n2, diffl)
Add management script to identify similar tag names.
LILY-1099: Add management script to identify similar tag names.
Python
agpl-3.0
HelloLily/hellolily,HelloLily/hellolily,HelloLily/hellolily,HelloLily/hellolily
LILY-1099: Add management script to identify similar tag names.
from optparse import make_option from django.core.management.base import BaseCommand from lily.tags.models import Tag import difflib class Command(BaseCommand): help = """Find similar tags based on the difflib module, usefull to identify duplicates & typos.""" ratio_cut_off_default = 0.65 option_list = BaseCommand.option_list + ( make_option('-c', '--cutoff', action='store', dest='cutoff', default='', help='Choose similarity cut-off value. Decimal in range [0,1]. Defaults to 0.65.' ), ) def handle(self, *args, **options): if options['cutoff']: cutoff = float(options['cutoff']) else: cutoff = self.ratio_cut_off_default tags1 = Tag.objects.all() tags2 = Tag.objects.all() tag_list = [] # Loop over each tag and compare with all other tags. for tag1 in tags1: for tag2 in tags2: if ((tag1.name, tag2.name) not in tag_list) and ((tag2.name, tag1.name) not in tag_list): # Determine similarity ratio between the two tag names. diffl = difflib.SequenceMatcher(a=tag1.name, b=tag2.name).ratio() if diffl > cutoff and diffl != 1.0: # Encode & decode to handle special characters. # This is a work around for encoding problems in outputting to docker shell. n1 = tag1.name.encode('utf-8') n1 = n1.decode('ascii', 'ignore') n2 = tag2.name.encode('utf-8') n2 = n2.decode('ascii', 'ignore') tag_list.insert(0, (tag1.name, tag2.name)) print u"{0}\t{1}\t{2:.3f}".format(n1, n2, diffl)
<commit_before><commit_msg>LILY-1099: Add management script to identify similar tag names.<commit_after>
from optparse import make_option from django.core.management.base import BaseCommand from lily.tags.models import Tag import difflib class Command(BaseCommand): help = """Find similar tags based on the difflib module, usefull to identify duplicates & typos.""" ratio_cut_off_default = 0.65 option_list = BaseCommand.option_list + ( make_option('-c', '--cutoff', action='store', dest='cutoff', default='', help='Choose similarity cut-off value. Decimal in range [0,1]. Defaults to 0.65.' ), ) def handle(self, *args, **options): if options['cutoff']: cutoff = float(options['cutoff']) else: cutoff = self.ratio_cut_off_default tags1 = Tag.objects.all() tags2 = Tag.objects.all() tag_list = [] # Loop over each tag and compare with all other tags. for tag1 in tags1: for tag2 in tags2: if ((tag1.name, tag2.name) not in tag_list) and ((tag2.name, tag1.name) not in tag_list): # Determine similarity ratio between the two tag names. diffl = difflib.SequenceMatcher(a=tag1.name, b=tag2.name).ratio() if diffl > cutoff and diffl != 1.0: # Encode & decode to handle special characters. # This is a work around for encoding problems in outputting to docker shell. n1 = tag1.name.encode('utf-8') n1 = n1.decode('ascii', 'ignore') n2 = tag2.name.encode('utf-8') n2 = n2.decode('ascii', 'ignore') tag_list.insert(0, (tag1.name, tag2.name)) print u"{0}\t{1}\t{2:.3f}".format(n1, n2, diffl)
LILY-1099: Add management script to identify similar tag names.from optparse import make_option from django.core.management.base import BaseCommand from lily.tags.models import Tag import difflib class Command(BaseCommand): help = """Find similar tags based on the difflib module, usefull to identify duplicates & typos.""" ratio_cut_off_default = 0.65 option_list = BaseCommand.option_list + ( make_option('-c', '--cutoff', action='store', dest='cutoff', default='', help='Choose similarity cut-off value. Decimal in range [0,1]. Defaults to 0.65.' ), ) def handle(self, *args, **options): if options['cutoff']: cutoff = float(options['cutoff']) else: cutoff = self.ratio_cut_off_default tags1 = Tag.objects.all() tags2 = Tag.objects.all() tag_list = [] # Loop over each tag and compare with all other tags. for tag1 in tags1: for tag2 in tags2: if ((tag1.name, tag2.name) not in tag_list) and ((tag2.name, tag1.name) not in tag_list): # Determine similarity ratio between the two tag names. diffl = difflib.SequenceMatcher(a=tag1.name, b=tag2.name).ratio() if diffl > cutoff and diffl != 1.0: # Encode & decode to handle special characters. # This is a work around for encoding problems in outputting to docker shell. n1 = tag1.name.encode('utf-8') n1 = n1.decode('ascii', 'ignore') n2 = tag2.name.encode('utf-8') n2 = n2.decode('ascii', 'ignore') tag_list.insert(0, (tag1.name, tag2.name)) print u"{0}\t{1}\t{2:.3f}".format(n1, n2, diffl)
<commit_before><commit_msg>LILY-1099: Add management script to identify similar tag names.<commit_after>from optparse import make_option from django.core.management.base import BaseCommand from lily.tags.models import Tag import difflib class Command(BaseCommand): help = """Find similar tags based on the difflib module, usefull to identify duplicates & typos.""" ratio_cut_off_default = 0.65 option_list = BaseCommand.option_list + ( make_option('-c', '--cutoff', action='store', dest='cutoff', default='', help='Choose similarity cut-off value. Decimal in range [0,1]. Defaults to 0.65.' ), ) def handle(self, *args, **options): if options['cutoff']: cutoff = float(options['cutoff']) else: cutoff = self.ratio_cut_off_default tags1 = Tag.objects.all() tags2 = Tag.objects.all() tag_list = [] # Loop over each tag and compare with all other tags. for tag1 in tags1: for tag2 in tags2: if ((tag1.name, tag2.name) not in tag_list) and ((tag2.name, tag1.name) not in tag_list): # Determine similarity ratio between the two tag names. diffl = difflib.SequenceMatcher(a=tag1.name, b=tag2.name).ratio() if diffl > cutoff and diffl != 1.0: # Encode & decode to handle special characters. # This is a work around for encoding problems in outputting to docker shell. n1 = tag1.name.encode('utf-8') n1 = n1.decode('ascii', 'ignore') n2 = tag2.name.encode('utf-8') n2 = n2.decode('ascii', 'ignore') tag_list.insert(0, (tag1.name, tag2.name)) print u"{0}\t{1}\t{2:.3f}".format(n1, n2, diffl)
bf0b3cb27fa2b518fcc3f5116da0e4dbde25aae8
src/django_richenum/__init__.py
src/django_richenum/__init__.py
import forms # noqa import models # noqa __all__ = ( 'forms', 'models', ) __version__ = 'unknown' try: __version__ = __import__('pkg_resources').get_distribution('django_richenum').version except Exception as e: pass
__version__ = 'unknown' try: __version__ = __import__('pkg_resources').get_distribution('django_richenum').version except Exception as e: pass
Remove unnecessary import of form submodule
Remove unnecessary import of form submodule
Python
mit
hearsaycorp/django-richenum,dhui/django-richenum,hearsaycorp/django-richenum,asherf/django-richenum,adepue/django-richenum
import forms # noqa import models # noqa __all__ = ( 'forms', 'models', ) __version__ = 'unknown' try: __version__ = __import__('pkg_resources').get_distribution('django_richenum').version except Exception as e: pass Remove unnecessary import of form submodule
__version__ = 'unknown' try: __version__ = __import__('pkg_resources').get_distribution('django_richenum').version except Exception as e: pass
<commit_before>import forms # noqa import models # noqa __all__ = ( 'forms', 'models', ) __version__ = 'unknown' try: __version__ = __import__('pkg_resources').get_distribution('django_richenum').version except Exception as e: pass <commit_msg>Remove unnecessary import of form submodule<commit_after>
__version__ = 'unknown' try: __version__ = __import__('pkg_resources').get_distribution('django_richenum').version except Exception as e: pass
import forms # noqa import models # noqa __all__ = ( 'forms', 'models', ) __version__ = 'unknown' try: __version__ = __import__('pkg_resources').get_distribution('django_richenum').version except Exception as e: pass Remove unnecessary import of form submodule__version__ = 'unknown' try: __version__ = __import__('pkg_resources').get_distribution('django_richenum').version except Exception as e: pass
<commit_before>import forms # noqa import models # noqa __all__ = ( 'forms', 'models', ) __version__ = 'unknown' try: __version__ = __import__('pkg_resources').get_distribution('django_richenum').version except Exception as e: pass <commit_msg>Remove unnecessary import of form submodule<commit_after>__version__ = 'unknown' try: __version__ = __import__('pkg_resources').get_distribution('django_richenum').version except Exception as e: pass
c833f56f70c6d539f9d01308d2e118ef0557557b
src/excel_sheet_column_title.py
src/excel_sheet_column_title.py
""" Source : https://oj.leetcode.com/problems/excel-sheet-column-number/ Author : Changxi Wu Date : 2015-01-21 Given a column title as appear in an Excel sheet, return its corresponding column number. For example: 1 -> A 2 -> B 3 -> C ... 26 -> Z 27 -> AA 28 -> AB """ # @return a string def convertToTitle(num): if __name__ == '__main__': test = [1:'A', 2:'B', 3:'C', 26:'Z', 27:'AA', 28:'AB']
Add excel sheet column title question description
Add excel sheet column title question description
Python
mit
chancyWu/leetcode
Add excel sheet column title question description
""" Source : https://oj.leetcode.com/problems/excel-sheet-column-number/ Author : Changxi Wu Date : 2015-01-21 Given a column title as appear in an Excel sheet, return its corresponding column number. For example: 1 -> A 2 -> B 3 -> C ... 26 -> Z 27 -> AA 28 -> AB """ # @return a string def convertToTitle(num): if __name__ == '__main__': test = [1:'A', 2:'B', 3:'C', 26:'Z', 27:'AA', 28:'AB']
<commit_before><commit_msg>Add excel sheet column title question description<commit_after>
""" Source : https://oj.leetcode.com/problems/excel-sheet-column-number/ Author : Changxi Wu Date : 2015-01-21 Given a column title as appear in an Excel sheet, return its corresponding column number. For example: 1 -> A 2 -> B 3 -> C ... 26 -> Z 27 -> AA 28 -> AB """ # @return a string def convertToTitle(num): if __name__ == '__main__': test = [1:'A', 2:'B', 3:'C', 26:'Z', 27:'AA', 28:'AB']
Add excel sheet column title question description""" Source : https://oj.leetcode.com/problems/excel-sheet-column-number/ Author : Changxi Wu Date : 2015-01-21 Given a column title as appear in an Excel sheet, return its corresponding column number. For example: 1 -> A 2 -> B 3 -> C ... 26 -> Z 27 -> AA 28 -> AB """ # @return a string def convertToTitle(num): if __name__ == '__main__': test = [1:'A', 2:'B', 3:'C', 26:'Z', 27:'AA', 28:'AB']
<commit_before><commit_msg>Add excel sheet column title question description<commit_after>""" Source : https://oj.leetcode.com/problems/excel-sheet-column-number/ Author : Changxi Wu Date : 2015-01-21 Given a column title as appear in an Excel sheet, return its corresponding column number. For example: 1 -> A 2 -> B 3 -> C ... 26 -> Z 27 -> AA 28 -> AB """ # @return a string def convertToTitle(num): if __name__ == '__main__': test = [1:'A', 2:'B', 3:'C', 26:'Z', 27:'AA', 28:'AB']
c509ef35fb0ba77fbcb66dd968daa0a19a617867
tests/sim/test_skills.py
tests/sim/test_skills.py
import unittest import unittest.mock as mock from hunting.level.map import LevelMap, LevelTile from hunting.sim.entities import GameObject, Fighter from hunting.sim.skills import PowerStrike class TestPowerStrike(unittest.TestCase): def setUp(self): level = LevelMap() level.set_map([[LevelTile()], [LevelTile()]]) self.power_strike = PowerStrike(-25, 50, 25) self.attacker = GameObject('0', level.log, 0, 0, 'attacker', faction='0', fighter=Fighter(100, 100, 0, 0, 0, speed=1, skills=[self.power_strike])) level.add_faction('0', {}) level.add_object(self.attacker) self.defender = GameObject('1', level.log, 1, 0, 'defender', faction='1', fighter=Fighter(100, 100, defense=0, power=10, xp=0, speed=100)) level.add_faction('1', {}) level.add_object(self.defender) @mock.patch('random.randint', return_value=40) def test_reduces_accuracy(self, _): self.power_strike.use(self.attacker, self.defender) self.assertEqual(self.defender.fighter.hp, 100) @mock.patch('random.randint', return_value=85) def test_increases_power(self, _): self.power_strike.use(self.attacker, self.defender) self.assertEqual(self.defender.fighter.hp, 50) def test_effect_is_removed(self): self.power_strike.use(self.attacker, self.defender) self.assertEqual(len(self.attacker.fighter.effect_list), 1) self.attacker.fighter.pass_time(1) self.assertEqual(len(self.attacker.fighter.effect_list), 0)
Add test for PowerStrike ability
Add test for PowerStrike ability
Python
mit
MoyTW/RL_Arena_Experiment
Add test for PowerStrike ability
import unittest import unittest.mock as mock from hunting.level.map import LevelMap, LevelTile from hunting.sim.entities import GameObject, Fighter from hunting.sim.skills import PowerStrike class TestPowerStrike(unittest.TestCase): def setUp(self): level = LevelMap() level.set_map([[LevelTile()], [LevelTile()]]) self.power_strike = PowerStrike(-25, 50, 25) self.attacker = GameObject('0', level.log, 0, 0, 'attacker', faction='0', fighter=Fighter(100, 100, 0, 0, 0, speed=1, skills=[self.power_strike])) level.add_faction('0', {}) level.add_object(self.attacker) self.defender = GameObject('1', level.log, 1, 0, 'defender', faction='1', fighter=Fighter(100, 100, defense=0, power=10, xp=0, speed=100)) level.add_faction('1', {}) level.add_object(self.defender) @mock.patch('random.randint', return_value=40) def test_reduces_accuracy(self, _): self.power_strike.use(self.attacker, self.defender) self.assertEqual(self.defender.fighter.hp, 100) @mock.patch('random.randint', return_value=85) def test_increases_power(self, _): self.power_strike.use(self.attacker, self.defender) self.assertEqual(self.defender.fighter.hp, 50) def test_effect_is_removed(self): self.power_strike.use(self.attacker, self.defender) self.assertEqual(len(self.attacker.fighter.effect_list), 1) self.attacker.fighter.pass_time(1) self.assertEqual(len(self.attacker.fighter.effect_list), 0)
<commit_before><commit_msg>Add test for PowerStrike ability<commit_after>
import unittest import unittest.mock as mock from hunting.level.map import LevelMap, LevelTile from hunting.sim.entities import GameObject, Fighter from hunting.sim.skills import PowerStrike class TestPowerStrike(unittest.TestCase): def setUp(self): level = LevelMap() level.set_map([[LevelTile()], [LevelTile()]]) self.power_strike = PowerStrike(-25, 50, 25) self.attacker = GameObject('0', level.log, 0, 0, 'attacker', faction='0', fighter=Fighter(100, 100, 0, 0, 0, speed=1, skills=[self.power_strike])) level.add_faction('0', {}) level.add_object(self.attacker) self.defender = GameObject('1', level.log, 1, 0, 'defender', faction='1', fighter=Fighter(100, 100, defense=0, power=10, xp=0, speed=100)) level.add_faction('1', {}) level.add_object(self.defender) @mock.patch('random.randint', return_value=40) def test_reduces_accuracy(self, _): self.power_strike.use(self.attacker, self.defender) self.assertEqual(self.defender.fighter.hp, 100) @mock.patch('random.randint', return_value=85) def test_increases_power(self, _): self.power_strike.use(self.attacker, self.defender) self.assertEqual(self.defender.fighter.hp, 50) def test_effect_is_removed(self): self.power_strike.use(self.attacker, self.defender) self.assertEqual(len(self.attacker.fighter.effect_list), 1) self.attacker.fighter.pass_time(1) self.assertEqual(len(self.attacker.fighter.effect_list), 0)
Add test for PowerStrike abilityimport unittest import unittest.mock as mock from hunting.level.map import LevelMap, LevelTile from hunting.sim.entities import GameObject, Fighter from hunting.sim.skills import PowerStrike class TestPowerStrike(unittest.TestCase): def setUp(self): level = LevelMap() level.set_map([[LevelTile()], [LevelTile()]]) self.power_strike = PowerStrike(-25, 50, 25) self.attacker = GameObject('0', level.log, 0, 0, 'attacker', faction='0', fighter=Fighter(100, 100, 0, 0, 0, speed=1, skills=[self.power_strike])) level.add_faction('0', {}) level.add_object(self.attacker) self.defender = GameObject('1', level.log, 1, 0, 'defender', faction='1', fighter=Fighter(100, 100, defense=0, power=10, xp=0, speed=100)) level.add_faction('1', {}) level.add_object(self.defender) @mock.patch('random.randint', return_value=40) def test_reduces_accuracy(self, _): self.power_strike.use(self.attacker, self.defender) self.assertEqual(self.defender.fighter.hp, 100) @mock.patch('random.randint', return_value=85) def test_increases_power(self, _): self.power_strike.use(self.attacker, self.defender) self.assertEqual(self.defender.fighter.hp, 50) def test_effect_is_removed(self): self.power_strike.use(self.attacker, self.defender) self.assertEqual(len(self.attacker.fighter.effect_list), 1) self.attacker.fighter.pass_time(1) self.assertEqual(len(self.attacker.fighter.effect_list), 0)
<commit_before><commit_msg>Add test for PowerStrike ability<commit_after>import unittest import unittest.mock as mock from hunting.level.map import LevelMap, LevelTile from hunting.sim.entities import GameObject, Fighter from hunting.sim.skills import PowerStrike class TestPowerStrike(unittest.TestCase): def setUp(self): level = LevelMap() level.set_map([[LevelTile()], [LevelTile()]]) self.power_strike = PowerStrike(-25, 50, 25) self.attacker = GameObject('0', level.log, 0, 0, 'attacker', faction='0', fighter=Fighter(100, 100, 0, 0, 0, speed=1, skills=[self.power_strike])) level.add_faction('0', {}) level.add_object(self.attacker) self.defender = GameObject('1', level.log, 1, 0, 'defender', faction='1', fighter=Fighter(100, 100, defense=0, power=10, xp=0, speed=100)) level.add_faction('1', {}) level.add_object(self.defender) @mock.patch('random.randint', return_value=40) def test_reduces_accuracy(self, _): self.power_strike.use(self.attacker, self.defender) self.assertEqual(self.defender.fighter.hp, 100) @mock.patch('random.randint', return_value=85) def test_increases_power(self, _): self.power_strike.use(self.attacker, self.defender) self.assertEqual(self.defender.fighter.hp, 50) def test_effect_is_removed(self): self.power_strike.use(self.attacker, self.defender) self.assertEqual(len(self.attacker.fighter.effect_list), 1) self.attacker.fighter.pass_time(1) self.assertEqual(len(self.attacker.fighter.effect_list), 0)
9bb9894b154a7a6bd6a8725e23cea4dbe78f84e0
generate_historic_liwc.py
generate_historic_liwc.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import requests from bs4 import BeautifulSoup from time import sleep import json import sys def get_spelling_variants(term, categories, y_from, y_to): """Retrieve historic spelling variants from the INL Lexicon service. """ # options for service: # get_wordforms # expand # get_lemma service = 'get_wordforms' url = 'http://sk.taalbanknederlands.inl.nl/LexiconService/lexicon/{s}'. \ format(s=service) params = { 'database': 'lexicon_service_db', 'lemma': term, 'year_from': y_from, 'year_to': y_to } # Expand numbers to numbers by setting pos tag if '11' in categories: params['pos'] = 'NUM' r = requests.get(url, params=params) if r.status_code == requests.codes.ok: #print r.encoding r.encoding = 'utf-8' #print r.text soup = BeautifulSoup(r.text, 'xml') words = soup.find_all('wordform') result = [] for word in words: result.append(word.text) return result else: r.raise_for_status() if __name__ == '__main__': #ws = get_spelling_variants('actueel', [], 1600, 1830) #print ws #for w in ws: # print w.encode('utf-8') #sys.exit() with open('LIWC_Dutch_dictionary.dic', 'r') as f: lines = f.readlines() liwc_output = {} for line in lines: # legend if line[0].isdigit() or line.startswith(('%', '\r')): print line.strip() # word else: entry = line.split() term = entry[0] categories = entry[1:] t = term.decode('latin-1') words = get_spelling_variants(t, categories, 1600, 1830) words.append(t) #print term, words sleep(1) for word in words: if liwc_output.get(word) and not categories == liwc_output[word]: new_c = list(set(categories + liwc_output.get(word))) new_c.sort() liwc_output[word] = new_c else: liwc_output[word] = categories json.dumps(liwc_output, open('liwc_output.json','w'), sort_keys=True) #liwc_output = json.loads(open('liwc_output.json', 'r'), encoding='utf-8') entries = liwc_output.keys() entries.sort() for entry in entries: print '{e}\t\t{l}'.format(e=entry.encode('utf8'), l='\t'.join(liwc_output[entry]))
Add script for generating a historic version of the Dutch LIWC
Add script for generating a historic version of the Dutch LIWC Added the script for generating a historic version of the Dutch LIWC. This script was not yet committed in the old liwc project. The same holds for the files historic_Dutch_LIWC.dic and liwc_output.json.
Python
apache-2.0
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
Add script for generating a historic version of the Dutch LIWC Added the script for generating a historic version of the Dutch LIWC. This script was not yet committed in the old liwc project. The same holds for the files historic_Dutch_LIWC.dic and liwc_output.json.
#!/usr/bin/env python # -*- coding: utf-8 -*- import requests from bs4 import BeautifulSoup from time import sleep import json import sys def get_spelling_variants(term, categories, y_from, y_to): """Retrieve historic spelling variants from the INL Lexicon service. """ # options for service: # get_wordforms # expand # get_lemma service = 'get_wordforms' url = 'http://sk.taalbanknederlands.inl.nl/LexiconService/lexicon/{s}'. \ format(s=service) params = { 'database': 'lexicon_service_db', 'lemma': term, 'year_from': y_from, 'year_to': y_to } # Expand numbers to numbers by setting pos tag if '11' in categories: params['pos'] = 'NUM' r = requests.get(url, params=params) if r.status_code == requests.codes.ok: #print r.encoding r.encoding = 'utf-8' #print r.text soup = BeautifulSoup(r.text, 'xml') words = soup.find_all('wordform') result = [] for word in words: result.append(word.text) return result else: r.raise_for_status() if __name__ == '__main__': #ws = get_spelling_variants('actueel', [], 1600, 1830) #print ws #for w in ws: # print w.encode('utf-8') #sys.exit() with open('LIWC_Dutch_dictionary.dic', 'r') as f: lines = f.readlines() liwc_output = {} for line in lines: # legend if line[0].isdigit() or line.startswith(('%', '\r')): print line.strip() # word else: entry = line.split() term = entry[0] categories = entry[1:] t = term.decode('latin-1') words = get_spelling_variants(t, categories, 1600, 1830) words.append(t) #print term, words sleep(1) for word in words: if liwc_output.get(word) and not categories == liwc_output[word]: new_c = list(set(categories + liwc_output.get(word))) new_c.sort() liwc_output[word] = new_c else: liwc_output[word] = categories json.dumps(liwc_output, open('liwc_output.json','w'), sort_keys=True) #liwc_output = json.loads(open('liwc_output.json', 'r'), encoding='utf-8') entries = liwc_output.keys() entries.sort() for entry in entries: print '{e}\t\t{l}'.format(e=entry.encode('utf8'), l='\t'.join(liwc_output[entry]))
<commit_before><commit_msg>Add script for generating a historic version of the Dutch LIWC Added the script for generating a historic version of the Dutch LIWC. This script was not yet committed in the old liwc project. The same holds for the files historic_Dutch_LIWC.dic and liwc_output.json.<commit_after>
#!/usr/bin/env python # -*- coding: utf-8 -*- import requests from bs4 import BeautifulSoup from time import sleep import json import sys def get_spelling_variants(term, categories, y_from, y_to): """Retrieve historic spelling variants from the INL Lexicon service. """ # options for service: # get_wordforms # expand # get_lemma service = 'get_wordforms' url = 'http://sk.taalbanknederlands.inl.nl/LexiconService/lexicon/{s}'. \ format(s=service) params = { 'database': 'lexicon_service_db', 'lemma': term, 'year_from': y_from, 'year_to': y_to } # Expand numbers to numbers by setting pos tag if '11' in categories: params['pos'] = 'NUM' r = requests.get(url, params=params) if r.status_code == requests.codes.ok: #print r.encoding r.encoding = 'utf-8' #print r.text soup = BeautifulSoup(r.text, 'xml') words = soup.find_all('wordform') result = [] for word in words: result.append(word.text) return result else: r.raise_for_status() if __name__ == '__main__': #ws = get_spelling_variants('actueel', [], 1600, 1830) #print ws #for w in ws: # print w.encode('utf-8') #sys.exit() with open('LIWC_Dutch_dictionary.dic', 'r') as f: lines = f.readlines() liwc_output = {} for line in lines: # legend if line[0].isdigit() or line.startswith(('%', '\r')): print line.strip() # word else: entry = line.split() term = entry[0] categories = entry[1:] t = term.decode('latin-1') words = get_spelling_variants(t, categories, 1600, 1830) words.append(t) #print term, words sleep(1) for word in words: if liwc_output.get(word) and not categories == liwc_output[word]: new_c = list(set(categories + liwc_output.get(word))) new_c.sort() liwc_output[word] = new_c else: liwc_output[word] = categories json.dumps(liwc_output, open('liwc_output.json','w'), sort_keys=True) #liwc_output = json.loads(open('liwc_output.json', 'r'), encoding='utf-8') entries = liwc_output.keys() entries.sort() for entry in entries: print '{e}\t\t{l}'.format(e=entry.encode('utf8'), l='\t'.join(liwc_output[entry]))
Add script for generating a historic version of the Dutch LIWC Added the script for generating a historic version of the Dutch LIWC. This script was not yet committed in the old liwc project. The same holds for the files historic_Dutch_LIWC.dic and liwc_output.json.#!/usr/bin/env python # -*- coding: utf-8 -*- import requests from bs4 import BeautifulSoup from time import sleep import json import sys def get_spelling_variants(term, categories, y_from, y_to): """Retrieve historic spelling variants from the INL Lexicon service. """ # options for service: # get_wordforms # expand # get_lemma service = 'get_wordforms' url = 'http://sk.taalbanknederlands.inl.nl/LexiconService/lexicon/{s}'. \ format(s=service) params = { 'database': 'lexicon_service_db', 'lemma': term, 'year_from': y_from, 'year_to': y_to } # Expand numbers to numbers by setting pos tag if '11' in categories: params['pos'] = 'NUM' r = requests.get(url, params=params) if r.status_code == requests.codes.ok: #print r.encoding r.encoding = 'utf-8' #print r.text soup = BeautifulSoup(r.text, 'xml') words = soup.find_all('wordform') result = [] for word in words: result.append(word.text) return result else: r.raise_for_status() if __name__ == '__main__': #ws = get_spelling_variants('actueel', [], 1600, 1830) #print ws #for w in ws: # print w.encode('utf-8') #sys.exit() with open('LIWC_Dutch_dictionary.dic', 'r') as f: lines = f.readlines() liwc_output = {} for line in lines: # legend if line[0].isdigit() or line.startswith(('%', '\r')): print line.strip() # word else: entry = line.split() term = entry[0] categories = entry[1:] t = term.decode('latin-1') words = get_spelling_variants(t, categories, 1600, 1830) words.append(t) #print term, words sleep(1) for word in words: if liwc_output.get(word) and not categories == liwc_output[word]: new_c = list(set(categories + liwc_output.get(word))) new_c.sort() liwc_output[word] = new_c else: liwc_output[word] = categories json.dumps(liwc_output, open('liwc_output.json','w'), sort_keys=True) #liwc_output = json.loads(open('liwc_output.json', 'r'), encoding='utf-8') entries = liwc_output.keys() entries.sort() for entry in entries: print '{e}\t\t{l}'.format(e=entry.encode('utf8'), l='\t'.join(liwc_output[entry]))
<commit_before><commit_msg>Add script for generating a historic version of the Dutch LIWC Added the script for generating a historic version of the Dutch LIWC. This script was not yet committed in the old liwc project. The same holds for the files historic_Dutch_LIWC.dic and liwc_output.json.<commit_after>#!/usr/bin/env python # -*- coding: utf-8 -*- import requests from bs4 import BeautifulSoup from time import sleep import json import sys def get_spelling_variants(term, categories, y_from, y_to): """Retrieve historic spelling variants from the INL Lexicon service. """ # options for service: # get_wordforms # expand # get_lemma service = 'get_wordforms' url = 'http://sk.taalbanknederlands.inl.nl/LexiconService/lexicon/{s}'. \ format(s=service) params = { 'database': 'lexicon_service_db', 'lemma': term, 'year_from': y_from, 'year_to': y_to } # Expand numbers to numbers by setting pos tag if '11' in categories: params['pos'] = 'NUM' r = requests.get(url, params=params) if r.status_code == requests.codes.ok: #print r.encoding r.encoding = 'utf-8' #print r.text soup = BeautifulSoup(r.text, 'xml') words = soup.find_all('wordform') result = [] for word in words: result.append(word.text) return result else: r.raise_for_status() if __name__ == '__main__': #ws = get_spelling_variants('actueel', [], 1600, 1830) #print ws #for w in ws: # print w.encode('utf-8') #sys.exit() with open('LIWC_Dutch_dictionary.dic', 'r') as f: lines = f.readlines() liwc_output = {} for line in lines: # legend if line[0].isdigit() or line.startswith(('%', '\r')): print line.strip() # word else: entry = line.split() term = entry[0] categories = entry[1:] t = term.decode('latin-1') words = get_spelling_variants(t, categories, 1600, 1830) words.append(t) #print term, words sleep(1) for word in words: if liwc_output.get(word) and not categories == liwc_output[word]: new_c = list(set(categories + liwc_output.get(word))) new_c.sort() liwc_output[word] = new_c else: liwc_output[word] = categories json.dumps(liwc_output, open('liwc_output.json','w'), sort_keys=True) #liwc_output = json.loads(open('liwc_output.json', 'r'), encoding='utf-8') entries = liwc_output.keys() entries.sort() for entry in entries: print '{e}\t\t{l}'.format(e=entry.encode('utf8'), l='\t'.join(liwc_output[entry]))
2ef2d15fe305ade82fc760f0cc29f530458528fe
pronto_feedback/feedback/migrations/0005_remove_feedback_last_modification_date.py
pronto_feedback/feedback/migrations/0005_remove_feedback_last_modification_date.py
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2016-09-01 01:27 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('feedback', '0004_auto_20160827_0453'), ] operations = [ migrations.RemoveField( model_name='feedback', name='last_modification_date', ), ]
Add migration file that removes last_modification_date
Add migration file that removes last_modification_date
Python
mit
zkan/pronto-feedback,zkan/pronto-feedback
Add migration file that removes last_modification_date
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2016-09-01 01:27 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('feedback', '0004_auto_20160827_0453'), ] operations = [ migrations.RemoveField( model_name='feedback', name='last_modification_date', ), ]
<commit_before><commit_msg>Add migration file that removes last_modification_date<commit_after>
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2016-09-01 01:27 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('feedback', '0004_auto_20160827_0453'), ] operations = [ migrations.RemoveField( model_name='feedback', name='last_modification_date', ), ]
Add migration file that removes last_modification_date# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2016-09-01 01:27 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('feedback', '0004_auto_20160827_0453'), ] operations = [ migrations.RemoveField( model_name='feedback', name='last_modification_date', ), ]
<commit_before><commit_msg>Add migration file that removes last_modification_date<commit_after># -*- coding: utf-8 -*- # Generated by Django 1.10 on 2016-09-01 01:27 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('feedback', '0004_auto_20160827_0453'), ] operations = [ migrations.RemoveField( model_name='feedback', name='last_modification_date', ), ]
b7cf20cea38f28f1f488e2667ea43ee579c0ed95
testserver.py
testserver.py
#!/usr/bin/python # Author: sc0tfree # Twitter: @sc0tfree # Email: henry@sc0tfree.com import os import socket def generate_random_hex(length): ''' Generates a hex string of arbitrary length - 1, ending in a newline. ''' hex_string = os.urandom(length - 1) hex_string += '\x0a' return hex_string host = '127.0.0.1' port = 12345 s = socket.socket() s.bind((host, port)) s.listen(5) try: while True: c, addr = s.accept() print 'Connection established from', addr[0], ':', addr[1] c.send('Hello from Test Server\n') # Echo Test c.send('Echo Test - enter string:') data = c.recv(1024) print 'Echo Test - received: ', data c.send('Echo Test - received: ' + data + '\n') # Hex Test c.send('Hex Test - enter length:') data = c.recv(1024) try: hex_length = int(data) except ValueError: c.send('You must enter a number. Defaulting to 10.\n') hex_length = 10 hex_string = generate_random_hex(hex_length) c.send('Sending hex string...\n\n') print 'Hex Test - sending: ', hex_string c.send(hex_string) c.close() print 'Closed connection to ', addr[0], ':', addr[1] except KeyboardInterrupt: c.close() print '\nExiting...' exit(0)
Add test server to demo functionality
Add test server to demo functionality
Python
mit
sc0tfree/netbyte
Add test server to demo functionality
#!/usr/bin/python # Author: sc0tfree # Twitter: @sc0tfree # Email: henry@sc0tfree.com import os import socket def generate_random_hex(length): ''' Generates a hex string of arbitrary length - 1, ending in a newline. ''' hex_string = os.urandom(length - 1) hex_string += '\x0a' return hex_string host = '127.0.0.1' port = 12345 s = socket.socket() s.bind((host, port)) s.listen(5) try: while True: c, addr = s.accept() print 'Connection established from', addr[0], ':', addr[1] c.send('Hello from Test Server\n') # Echo Test c.send('Echo Test - enter string:') data = c.recv(1024) print 'Echo Test - received: ', data c.send('Echo Test - received: ' + data + '\n') # Hex Test c.send('Hex Test - enter length:') data = c.recv(1024) try: hex_length = int(data) except ValueError: c.send('You must enter a number. Defaulting to 10.\n') hex_length = 10 hex_string = generate_random_hex(hex_length) c.send('Sending hex string...\n\n') print 'Hex Test - sending: ', hex_string c.send(hex_string) c.close() print 'Closed connection to ', addr[0], ':', addr[1] except KeyboardInterrupt: c.close() print '\nExiting...' exit(0)
<commit_before><commit_msg>Add test server to demo functionality<commit_after>
#!/usr/bin/python # Author: sc0tfree # Twitter: @sc0tfree # Email: henry@sc0tfree.com import os import socket def generate_random_hex(length): ''' Generates a hex string of arbitrary length - 1, ending in a newline. ''' hex_string = os.urandom(length - 1) hex_string += '\x0a' return hex_string host = '127.0.0.1' port = 12345 s = socket.socket() s.bind((host, port)) s.listen(5) try: while True: c, addr = s.accept() print 'Connection established from', addr[0], ':', addr[1] c.send('Hello from Test Server\n') # Echo Test c.send('Echo Test - enter string:') data = c.recv(1024) print 'Echo Test - received: ', data c.send('Echo Test - received: ' + data + '\n') # Hex Test c.send('Hex Test - enter length:') data = c.recv(1024) try: hex_length = int(data) except ValueError: c.send('You must enter a number. Defaulting to 10.\n') hex_length = 10 hex_string = generate_random_hex(hex_length) c.send('Sending hex string...\n\n') print 'Hex Test - sending: ', hex_string c.send(hex_string) c.close() print 'Closed connection to ', addr[0], ':', addr[1] except KeyboardInterrupt: c.close() print '\nExiting...' exit(0)
Add test server to demo functionality#!/usr/bin/python # Author: sc0tfree # Twitter: @sc0tfree # Email: henry@sc0tfree.com import os import socket def generate_random_hex(length): ''' Generates a hex string of arbitrary length - 1, ending in a newline. ''' hex_string = os.urandom(length - 1) hex_string += '\x0a' return hex_string host = '127.0.0.1' port = 12345 s = socket.socket() s.bind((host, port)) s.listen(5) try: while True: c, addr = s.accept() print 'Connection established from', addr[0], ':', addr[1] c.send('Hello from Test Server\n') # Echo Test c.send('Echo Test - enter string:') data = c.recv(1024) print 'Echo Test - received: ', data c.send('Echo Test - received: ' + data + '\n') # Hex Test c.send('Hex Test - enter length:') data = c.recv(1024) try: hex_length = int(data) except ValueError: c.send('You must enter a number. Defaulting to 10.\n') hex_length = 10 hex_string = generate_random_hex(hex_length) c.send('Sending hex string...\n\n') print 'Hex Test - sending: ', hex_string c.send(hex_string) c.close() print 'Closed connection to ', addr[0], ':', addr[1] except KeyboardInterrupt: c.close() print '\nExiting...' exit(0)
<commit_before><commit_msg>Add test server to demo functionality<commit_after>#!/usr/bin/python # Author: sc0tfree # Twitter: @sc0tfree # Email: henry@sc0tfree.com import os import socket def generate_random_hex(length): ''' Generates a hex string of arbitrary length - 1, ending in a newline. ''' hex_string = os.urandom(length - 1) hex_string += '\x0a' return hex_string host = '127.0.0.1' port = 12345 s = socket.socket() s.bind((host, port)) s.listen(5) try: while True: c, addr = s.accept() print 'Connection established from', addr[0], ':', addr[1] c.send('Hello from Test Server\n') # Echo Test c.send('Echo Test - enter string:') data = c.recv(1024) print 'Echo Test - received: ', data c.send('Echo Test - received: ' + data + '\n') # Hex Test c.send('Hex Test - enter length:') data = c.recv(1024) try: hex_length = int(data) except ValueError: c.send('You must enter a number. Defaulting to 10.\n') hex_length = 10 hex_string = generate_random_hex(hex_length) c.send('Sending hex string...\n\n') print 'Hex Test - sending: ', hex_string c.send(hex_string) c.close() print 'Closed connection to ', addr[0], ':', addr[1] except KeyboardInterrupt: c.close() print '\nExiting...' exit(0)
b7887c3e2729605d50372f51178ec3f86a09b52a
tastycrust/api.py
tastycrust/api.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals from importlib import import_module from inspect import isclass, getmembers from django.conf import settings from tastypie.api import Api as VanillaApi from tastypie.resources import Resource __all__ = ['Api'] def _is_resource_class(obj): return isclass(obj) and issubclass(obj, Resource) class Api(VanillaApi): def register(self, resource, canonical=True, module_name='resources'): if isinstance(resource, Resource): return super(Api, self).register(resource, canonical) elif _is_resource_class(resource): return super(Api, self).register(resource(), canonical) app_name, resource_name = resource.split('.') module = import_module('.'.join([app_name, module_name])) resource = getattr(module, resource_name)() return super(Api, self).register(resource, canonical) def autodiscover(self): for app_name in settings.INSTALLED_APPS: if app_name == 'tastypie': continue try: module = import_module('.'.join([app_name, 'resources'])) except ImportError: continue for name, klass in getmembers(module, _is_resource_class): resource = klass() if not getattr(resource._meta, 'abstract', False): self.register(resource)
Implement Api addons (not tested)
Implement Api addons (not tested)
Python
bsd-3-clause
uranusjr/django-tastypie-crust
Implement Api addons (not tested)
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals from importlib import import_module from inspect import isclass, getmembers from django.conf import settings from tastypie.api import Api as VanillaApi from tastypie.resources import Resource __all__ = ['Api'] def _is_resource_class(obj): return isclass(obj) and issubclass(obj, Resource) class Api(VanillaApi): def register(self, resource, canonical=True, module_name='resources'): if isinstance(resource, Resource): return super(Api, self).register(resource, canonical) elif _is_resource_class(resource): return super(Api, self).register(resource(), canonical) app_name, resource_name = resource.split('.') module = import_module('.'.join([app_name, module_name])) resource = getattr(module, resource_name)() return super(Api, self).register(resource, canonical) def autodiscover(self): for app_name in settings.INSTALLED_APPS: if app_name == 'tastypie': continue try: module = import_module('.'.join([app_name, 'resources'])) except ImportError: continue for name, klass in getmembers(module, _is_resource_class): resource = klass() if not getattr(resource._meta, 'abstract', False): self.register(resource)
<commit_before><commit_msg>Implement Api addons (not tested)<commit_after>
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals from importlib import import_module from inspect import isclass, getmembers from django.conf import settings from tastypie.api import Api as VanillaApi from tastypie.resources import Resource __all__ = ['Api'] def _is_resource_class(obj): return isclass(obj) and issubclass(obj, Resource) class Api(VanillaApi): def register(self, resource, canonical=True, module_name='resources'): if isinstance(resource, Resource): return super(Api, self).register(resource, canonical) elif _is_resource_class(resource): return super(Api, self).register(resource(), canonical) app_name, resource_name = resource.split('.') module = import_module('.'.join([app_name, module_name])) resource = getattr(module, resource_name)() return super(Api, self).register(resource, canonical) def autodiscover(self): for app_name in settings.INSTALLED_APPS: if app_name == 'tastypie': continue try: module = import_module('.'.join([app_name, 'resources'])) except ImportError: continue for name, klass in getmembers(module, _is_resource_class): resource = klass() if not getattr(resource._meta, 'abstract', False): self.register(resource)
Implement Api addons (not tested)#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals from importlib import import_module from inspect import isclass, getmembers from django.conf import settings from tastypie.api import Api as VanillaApi from tastypie.resources import Resource __all__ = ['Api'] def _is_resource_class(obj): return isclass(obj) and issubclass(obj, Resource) class Api(VanillaApi): def register(self, resource, canonical=True, module_name='resources'): if isinstance(resource, Resource): return super(Api, self).register(resource, canonical) elif _is_resource_class(resource): return super(Api, self).register(resource(), canonical) app_name, resource_name = resource.split('.') module = import_module('.'.join([app_name, module_name])) resource = getattr(module, resource_name)() return super(Api, self).register(resource, canonical) def autodiscover(self): for app_name in settings.INSTALLED_APPS: if app_name == 'tastypie': continue try: module = import_module('.'.join([app_name, 'resources'])) except ImportError: continue for name, klass in getmembers(module, _is_resource_class): resource = klass() if not getattr(resource._meta, 'abstract', False): self.register(resource)
<commit_before><commit_msg>Implement Api addons (not tested)<commit_after>#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals from importlib import import_module from inspect import isclass, getmembers from django.conf import settings from tastypie.api import Api as VanillaApi from tastypie.resources import Resource __all__ = ['Api'] def _is_resource_class(obj): return isclass(obj) and issubclass(obj, Resource) class Api(VanillaApi): def register(self, resource, canonical=True, module_name='resources'): if isinstance(resource, Resource): return super(Api, self).register(resource, canonical) elif _is_resource_class(resource): return super(Api, self).register(resource(), canonical) app_name, resource_name = resource.split('.') module = import_module('.'.join([app_name, module_name])) resource = getattr(module, resource_name)() return super(Api, self).register(resource, canonical) def autodiscover(self): for app_name in settings.INSTALLED_APPS: if app_name == 'tastypie': continue try: module = import_module('.'.join([app_name, 'resources'])) except ImportError: continue for name, klass in getmembers(module, _is_resource_class): resource = klass() if not getattr(resource._meta, 'abstract', False): self.register(resource)
9f9db6242c3bac5626caf922487209f6f9af1cd8
vultr/v1_firewall.py
vultr/v1_firewall.py
'''Partial class to handle Vultr Firewall API calls''' from .utils import VultrBase class VultrFirewall(VultrBase): '''Handles Bultr Firewall API calls''' def __init__(self, api_key): VultrBase.__init__(self, api_key) def group_list(self, params=None): ''' /v1/firewall/group_list GET - account List all firewall groups on the current account. Link: https://www.vultr.com/api/#firewall_group_list ''' params = params if params else dict() return self.request('/v1/firewall/group_list', params, 'GET')
'''Partial class to handle Vultr Firewall API calls''' from .utils import VultrBase class VultrFirewall(VultrBase): '''Handles Vultr Firewall API calls''' def __init__(self, api_key): VultrBase.__init__(self, api_key) def group_list(self, params=None): ''' /v1/firewall/group_list GET - account List all firewall groups on the current account. Link: https://www.vultr.com/api/#firewall_group_list ''' params = params if params else dict() return self.request('/v1/firewall/group_list', params, 'GET')
Add partial support for firewall
Add partial support for firewall
Python
mit
spry-group/python-vultr
'''Partial class to handle Vultr Firewall API calls''' from .utils import VultrBase class VultrFirewall(VultrBase): '''Handles Bultr Firewall API calls''' def __init__(self, api_key): VultrBase.__init__(self, api_key) def group_list(self, params=None): ''' /v1/firewall/group_list GET - account List all firewall groups on the current account. Link: https://www.vultr.com/api/#firewall_group_list ''' params = params if params else dict() return self.request('/v1/firewall/group_list', params, 'GET') Add partial support for firewall
'''Partial class to handle Vultr Firewall API calls''' from .utils import VultrBase class VultrFirewall(VultrBase): '''Handles Vultr Firewall API calls''' def __init__(self, api_key): VultrBase.__init__(self, api_key) def group_list(self, params=None): ''' /v1/firewall/group_list GET - account List all firewall groups on the current account. Link: https://www.vultr.com/api/#firewall_group_list ''' params = params if params else dict() return self.request('/v1/firewall/group_list', params, 'GET')
<commit_before>'''Partial class to handle Vultr Firewall API calls''' from .utils import VultrBase class VultrFirewall(VultrBase): '''Handles Bultr Firewall API calls''' def __init__(self, api_key): VultrBase.__init__(self, api_key) def group_list(self, params=None): ''' /v1/firewall/group_list GET - account List all firewall groups on the current account. Link: https://www.vultr.com/api/#firewall_group_list ''' params = params if params else dict() return self.request('/v1/firewall/group_list', params, 'GET') <commit_msg>Add partial support for firewall<commit_after>
'''Partial class to handle Vultr Firewall API calls''' from .utils import VultrBase class VultrFirewall(VultrBase): '''Handles Vultr Firewall API calls''' def __init__(self, api_key): VultrBase.__init__(self, api_key) def group_list(self, params=None): ''' /v1/firewall/group_list GET - account List all firewall groups on the current account. Link: https://www.vultr.com/api/#firewall_group_list ''' params = params if params else dict() return self.request('/v1/firewall/group_list', params, 'GET')
'''Partial class to handle Vultr Firewall API calls''' from .utils import VultrBase class VultrFirewall(VultrBase): '''Handles Bultr Firewall API calls''' def __init__(self, api_key): VultrBase.__init__(self, api_key) def group_list(self, params=None): ''' /v1/firewall/group_list GET - account List all firewall groups on the current account. Link: https://www.vultr.com/api/#firewall_group_list ''' params = params if params else dict() return self.request('/v1/firewall/group_list', params, 'GET') Add partial support for firewall'''Partial class to handle Vultr Firewall API calls''' from .utils import VultrBase class VultrFirewall(VultrBase): '''Handles Vultr Firewall API calls''' def __init__(self, api_key): VultrBase.__init__(self, api_key) def group_list(self, params=None): ''' /v1/firewall/group_list GET - account List all firewall groups on the current account. Link: https://www.vultr.com/api/#firewall_group_list ''' params = params if params else dict() return self.request('/v1/firewall/group_list', params, 'GET')
<commit_before>'''Partial class to handle Vultr Firewall API calls''' from .utils import VultrBase class VultrFirewall(VultrBase): '''Handles Bultr Firewall API calls''' def __init__(self, api_key): VultrBase.__init__(self, api_key) def group_list(self, params=None): ''' /v1/firewall/group_list GET - account List all firewall groups on the current account. Link: https://www.vultr.com/api/#firewall_group_list ''' params = params if params else dict() return self.request('/v1/firewall/group_list', params, 'GET') <commit_msg>Add partial support for firewall<commit_after>'''Partial class to handle Vultr Firewall API calls''' from .utils import VultrBase class VultrFirewall(VultrBase): '''Handles Vultr Firewall API calls''' def __init__(self, api_key): VultrBase.__init__(self, api_key) def group_list(self, params=None): ''' /v1/firewall/group_list GET - account List all firewall groups on the current account. Link: https://www.vultr.com/api/#firewall_group_list ''' params = params if params else dict() return self.request('/v1/firewall/group_list', params, 'GET')
2c932a14fbf6a468176af77b39360be108777389
markers/mark_error_104.py
markers/mark_error_104.py
"""Marks all fixed errors #104 on ruwiki's CheckWikipedia.""" import re import pywikibot from checkwiki import load_page_list, mark_error_done, log NUMBER = "104" REGEXP = r"<ref\s+name=\s*(.*?)\s*(?:group=.*?)?\s*/?>" FLAGS = re.I def main(): """Downloads list from server and marks relevant errors as done.""" site = pywikibot.Site() for line in load_page_list(NUMBER): page = pywikibot.Page(site, line) mark = True for name in re.findall(REGEXP, page.text, flags=FLAGS): if re.match(r"^'.*'$|^\".*\"$", name): continue if re.search(r"[\"'/\\=?#>\s]", name): mark = False break if mark: mark_error_done(NUMBER, page.title()) log(line, success=True) else: log(line, success=False) if __name__ == "__main__": main()
Add marker for 104th error
Add marker for 104th error
Python
mit
Facenapalm/NapalmBot
Add marker for 104th error
"""Marks all fixed errors #104 on ruwiki's CheckWikipedia.""" import re import pywikibot from checkwiki import load_page_list, mark_error_done, log NUMBER = "104" REGEXP = r"<ref\s+name=\s*(.*?)\s*(?:group=.*?)?\s*/?>" FLAGS = re.I def main(): """Downloads list from server and marks relevant errors as done.""" site = pywikibot.Site() for line in load_page_list(NUMBER): page = pywikibot.Page(site, line) mark = True for name in re.findall(REGEXP, page.text, flags=FLAGS): if re.match(r"^'.*'$|^\".*\"$", name): continue if re.search(r"[\"'/\\=?#>\s]", name): mark = False break if mark: mark_error_done(NUMBER, page.title()) log(line, success=True) else: log(line, success=False) if __name__ == "__main__": main()
<commit_before><commit_msg>Add marker for 104th error<commit_after>
"""Marks all fixed errors #104 on ruwiki's CheckWikipedia.""" import re import pywikibot from checkwiki import load_page_list, mark_error_done, log NUMBER = "104" REGEXP = r"<ref\s+name=\s*(.*?)\s*(?:group=.*?)?\s*/?>" FLAGS = re.I def main(): """Downloads list from server and marks relevant errors as done.""" site = pywikibot.Site() for line in load_page_list(NUMBER): page = pywikibot.Page(site, line) mark = True for name in re.findall(REGEXP, page.text, flags=FLAGS): if re.match(r"^'.*'$|^\".*\"$", name): continue if re.search(r"[\"'/\\=?#>\s]", name): mark = False break if mark: mark_error_done(NUMBER, page.title()) log(line, success=True) else: log(line, success=False) if __name__ == "__main__": main()
Add marker for 104th error"""Marks all fixed errors #104 on ruwiki's CheckWikipedia.""" import re import pywikibot from checkwiki import load_page_list, mark_error_done, log NUMBER = "104" REGEXP = r"<ref\s+name=\s*(.*?)\s*(?:group=.*?)?\s*/?>" FLAGS = re.I def main(): """Downloads list from server and marks relevant errors as done.""" site = pywikibot.Site() for line in load_page_list(NUMBER): page = pywikibot.Page(site, line) mark = True for name in re.findall(REGEXP, page.text, flags=FLAGS): if re.match(r"^'.*'$|^\".*\"$", name): continue if re.search(r"[\"'/\\=?#>\s]", name): mark = False break if mark: mark_error_done(NUMBER, page.title()) log(line, success=True) else: log(line, success=False) if __name__ == "__main__": main()
<commit_before><commit_msg>Add marker for 104th error<commit_after>"""Marks all fixed errors #104 on ruwiki's CheckWikipedia.""" import re import pywikibot from checkwiki import load_page_list, mark_error_done, log NUMBER = "104" REGEXP = r"<ref\s+name=\s*(.*?)\s*(?:group=.*?)?\s*/?>" FLAGS = re.I def main(): """Downloads list from server and marks relevant errors as done.""" site = pywikibot.Site() for line in load_page_list(NUMBER): page = pywikibot.Page(site, line) mark = True for name in re.findall(REGEXP, page.text, flags=FLAGS): if re.match(r"^'.*'$|^\".*\"$", name): continue if re.search(r"[\"'/\\=?#>\s]", name): mark = False break if mark: mark_error_done(NUMBER, page.title()) log(line, success=True) else: log(line, success=False) if __name__ == "__main__": main()
a0adf63c1f942f4cdbe839ea367772c5ae08fbfc
CodeFights/fixTree.py
CodeFights/fixTree.py
#!/usr/local/bin/python # Code Fights Fix Tree Problem def fixTree(tree): return [s.strip().center(len(s)) for s in tree] def main(): tests = [ [ [" * ", " * ", "*** ", " *****", " *******", "*********", " *** "], [" * ", " * ", " *** ", " ***** ", " ******* ", "*********", " *** "] ], [ [" * ", " * ", " *** ", " ***** ", " ******* ", "*********", " *** "], [" * ", " * ", " *** ", " ***** ", " ******* ", "*********", " *** "] ], [ ["*", "*", "*", "*"], ["*", "*", "*", "*"] ], [ [" *** "], [" *** "] ], [ [" * ", "* ", " *** ", " ***** ", " ******* ", " ********* ", "******* ", " ********* ", " ***********", " *********", "*********** ", "*************", " *** ", " *** ", " *** "], [" * ", " * ", " *** ", " ***** ", " ******* ", " ********* ", " ******* ", " ********* ", " *********** ", " ********* ", " *********** ", "*************", " *** ", " *** ", " *** "] ] ] for t in tests: res = fixTree(t[0]) ans = t[1] if ans == res: print("PASSED: fixTree({}) returned {}" .format(t[0], res)) else: print(("FAILED: fixTree({}) returned {}," "answer: {}").format(t[0], res, ans)) if __name__ == '__main__': main()
Solve Code Fights fix tree problem
Solve Code Fights fix tree problem
Python
mit
HKuz/Test_Code
Solve Code Fights fix tree problem
#!/usr/local/bin/python # Code Fights Fix Tree Problem def fixTree(tree): return [s.strip().center(len(s)) for s in tree] def main(): tests = [ [ [" * ", " * ", "*** ", " *****", " *******", "*********", " *** "], [" * ", " * ", " *** ", " ***** ", " ******* ", "*********", " *** "] ], [ [" * ", " * ", " *** ", " ***** ", " ******* ", "*********", " *** "], [" * ", " * ", " *** ", " ***** ", " ******* ", "*********", " *** "] ], [ ["*", "*", "*", "*"], ["*", "*", "*", "*"] ], [ [" *** "], [" *** "] ], [ [" * ", "* ", " *** ", " ***** ", " ******* ", " ********* ", "******* ", " ********* ", " ***********", " *********", "*********** ", "*************", " *** ", " *** ", " *** "], [" * ", " * ", " *** ", " ***** ", " ******* ", " ********* ", " ******* ", " ********* ", " *********** ", " ********* ", " *********** ", "*************", " *** ", " *** ", " *** "] ] ] for t in tests: res = fixTree(t[0]) ans = t[1] if ans == res: print("PASSED: fixTree({}) returned {}" .format(t[0], res)) else: print(("FAILED: fixTree({}) returned {}," "answer: {}").format(t[0], res, ans)) if __name__ == '__main__': main()
<commit_before><commit_msg>Solve Code Fights fix tree problem<commit_after>
#!/usr/local/bin/python # Code Fights Fix Tree Problem def fixTree(tree): return [s.strip().center(len(s)) for s in tree] def main(): tests = [ [ [" * ", " * ", "*** ", " *****", " *******", "*********", " *** "], [" * ", " * ", " *** ", " ***** ", " ******* ", "*********", " *** "] ], [ [" * ", " * ", " *** ", " ***** ", " ******* ", "*********", " *** "], [" * ", " * ", " *** ", " ***** ", " ******* ", "*********", " *** "] ], [ ["*", "*", "*", "*"], ["*", "*", "*", "*"] ], [ [" *** "], [" *** "] ], [ [" * ", "* ", " *** ", " ***** ", " ******* ", " ********* ", "******* ", " ********* ", " ***********", " *********", "*********** ", "*************", " *** ", " *** ", " *** "], [" * ", " * ", " *** ", " ***** ", " ******* ", " ********* ", " ******* ", " ********* ", " *********** ", " ********* ", " *********** ", "*************", " *** ", " *** ", " *** "] ] ] for t in tests: res = fixTree(t[0]) ans = t[1] if ans == res: print("PASSED: fixTree({}) returned {}" .format(t[0], res)) else: print(("FAILED: fixTree({}) returned {}," "answer: {}").format(t[0], res, ans)) if __name__ == '__main__': main()
Solve Code Fights fix tree problem#!/usr/local/bin/python # Code Fights Fix Tree Problem def fixTree(tree): return [s.strip().center(len(s)) for s in tree] def main(): tests = [ [ [" * ", " * ", "*** ", " *****", " *******", "*********", " *** "], [" * ", " * ", " *** ", " ***** ", " ******* ", "*********", " *** "] ], [ [" * ", " * ", " *** ", " ***** ", " ******* ", "*********", " *** "], [" * ", " * ", " *** ", " ***** ", " ******* ", "*********", " *** "] ], [ ["*", "*", "*", "*"], ["*", "*", "*", "*"] ], [ [" *** "], [" *** "] ], [ [" * ", "* ", " *** ", " ***** ", " ******* ", " ********* ", "******* ", " ********* ", " ***********", " *********", "*********** ", "*************", " *** ", " *** ", " *** "], [" * ", " * ", " *** ", " ***** ", " ******* ", " ********* ", " ******* ", " ********* ", " *********** ", " ********* ", " *********** ", "*************", " *** ", " *** ", " *** "] ] ] for t in tests: res = fixTree(t[0]) ans = t[1] if ans == res: print("PASSED: fixTree({}) returned {}" .format(t[0], res)) else: print(("FAILED: fixTree({}) returned {}," "answer: {}").format(t[0], res, ans)) if __name__ == '__main__': main()
<commit_before><commit_msg>Solve Code Fights fix tree problem<commit_after>#!/usr/local/bin/python # Code Fights Fix Tree Problem def fixTree(tree): return [s.strip().center(len(s)) for s in tree] def main(): tests = [ [ [" * ", " * ", "*** ", " *****", " *******", "*********", " *** "], [" * ", " * ", " *** ", " ***** ", " ******* ", "*********", " *** "] ], [ [" * ", " * ", " *** ", " ***** ", " ******* ", "*********", " *** "], [" * ", " * ", " *** ", " ***** ", " ******* ", "*********", " *** "] ], [ ["*", "*", "*", "*"], ["*", "*", "*", "*"] ], [ [" *** "], [" *** "] ], [ [" * ", "* ", " *** ", " ***** ", " ******* ", " ********* ", "******* ", " ********* ", " ***********", " *********", "*********** ", "*************", " *** ", " *** ", " *** "], [" * ", " * ", " *** ", " ***** ", " ******* ", " ********* ", " ******* ", " ********* ", " *********** ", " ********* ", " *********** ", "*************", " *** ", " *** ", " *** "] ] ] for t in tests: res = fixTree(t[0]) ans = t[1] if ans == res: print("PASSED: fixTree({}) returned {}" .format(t[0], res)) else: print(("FAILED: fixTree({}) returned {}," "answer: {}").format(t[0], res, ans)) if __name__ == '__main__': main()
8ad29d6563fd282b18884c345451a44a71eba890
tests/test_midi_note_numbers.py
tests/test_midi_note_numbers.py
# TODO: test if method to convert Note to a MIDI note number works import pytest from music_essentials import Note def test_midi_0(): n = Note.from_note_string('C-1') mid = n.midi_note_number() assert mid == 0 def test_midi_1_sharp(): n = Note.from_note_string('C-1#') mid = n.midi_note_number() assert mid == 1 def test_midi_1_flat(): n = Note.from_note_string('D-1b') mid = n.midi_note_number() assert mid == 1 def test_midi_2(): n = Note.from_note_string('D-1') mid = n.midi_note_number() assert mid == 2 def test_midi_3_sharp(): n = Note.from_note_string('D-1#') mid = n.midi_note_number() assert mid == 3 def test_midi_3_flat(): n = Note.from_note_string('E-1b') mid = n.midi_note_number() assert mid == 3 def test_midi_4(): n = Note.from_note_string('E-1') mid = n.midi_note_number() assert mid == 4 def test_midi_5(): n = Note.from_note_string('F-1') mid = n.midi_note_number() assert mid == 5 def test_midi_6_sharp(): n = Note.from_note_string('F-1#') mid = n.midi_note_number() assert mid == 6 def test_midi_6_flat(): n = Note.from_note_string('G-1b') mid = n.midi_note_number() assert mid == 6 def test_midi_7(): n = Note.from_note_string('G-1') mid = n.midi_note_number() assert mid == 7 def test_midi_8_sharp(): n = Note.from_note_string('G-1#') mid = n.midi_note_number() assert mid == 8 def test_midi_8_flat(): n = Note.from_note_string('A-1b') mid = n.midi_note_number() assert mid == 8 def test_midi_9(): n = Note.from_note_string('A-1') mid = n.midi_note_number() assert mid == 9 def test_midi_10_sharp(): n = Note.from_note_string('A-1#') mid = n.midi_note_number() assert mid == 10 def test_midi_10_flat(): n = Note.from_note_string('B-1b') mid = n.midi_note_number() assert mid == 10 def test_midi_11(): n = Note.from_note_string('B-1') mid = n.midi_note_number() assert mid == 11
Add tests for calculating MIDI numbers - octave -1.
Add tests for calculating MIDI numbers - octave -1. Signed-off-by: Charlotte Pierce <351429ca27f6e4bff2dbb77adb5046c88cd12fae@malformed-bits.com>
Python
mit
charlottepierce/music_essentials
Add tests for calculating MIDI numbers - octave -1. Signed-off-by: Charlotte Pierce <351429ca27f6e4bff2dbb77adb5046c88cd12fae@malformed-bits.com>
# TODO: test if method to convert Note to a MIDI note number works import pytest from music_essentials import Note def test_midi_0(): n = Note.from_note_string('C-1') mid = n.midi_note_number() assert mid == 0 def test_midi_1_sharp(): n = Note.from_note_string('C-1#') mid = n.midi_note_number() assert mid == 1 def test_midi_1_flat(): n = Note.from_note_string('D-1b') mid = n.midi_note_number() assert mid == 1 def test_midi_2(): n = Note.from_note_string('D-1') mid = n.midi_note_number() assert mid == 2 def test_midi_3_sharp(): n = Note.from_note_string('D-1#') mid = n.midi_note_number() assert mid == 3 def test_midi_3_flat(): n = Note.from_note_string('E-1b') mid = n.midi_note_number() assert mid == 3 def test_midi_4(): n = Note.from_note_string('E-1') mid = n.midi_note_number() assert mid == 4 def test_midi_5(): n = Note.from_note_string('F-1') mid = n.midi_note_number() assert mid == 5 def test_midi_6_sharp(): n = Note.from_note_string('F-1#') mid = n.midi_note_number() assert mid == 6 def test_midi_6_flat(): n = Note.from_note_string('G-1b') mid = n.midi_note_number() assert mid == 6 def test_midi_7(): n = Note.from_note_string('G-1') mid = n.midi_note_number() assert mid == 7 def test_midi_8_sharp(): n = Note.from_note_string('G-1#') mid = n.midi_note_number() assert mid == 8 def test_midi_8_flat(): n = Note.from_note_string('A-1b') mid = n.midi_note_number() assert mid == 8 def test_midi_9(): n = Note.from_note_string('A-1') mid = n.midi_note_number() assert mid == 9 def test_midi_10_sharp(): n = Note.from_note_string('A-1#') mid = n.midi_note_number() assert mid == 10 def test_midi_10_flat(): n = Note.from_note_string('B-1b') mid = n.midi_note_number() assert mid == 10 def test_midi_11(): n = Note.from_note_string('B-1') mid = n.midi_note_number() assert mid == 11
<commit_before><commit_msg>Add tests for calculating MIDI numbers - octave -1. Signed-off-by: Charlotte Pierce <351429ca27f6e4bff2dbb77adb5046c88cd12fae@malformed-bits.com><commit_after>
# TODO: test if method to convert Note to a MIDI note number works import pytest from music_essentials import Note def test_midi_0(): n = Note.from_note_string('C-1') mid = n.midi_note_number() assert mid == 0 def test_midi_1_sharp(): n = Note.from_note_string('C-1#') mid = n.midi_note_number() assert mid == 1 def test_midi_1_flat(): n = Note.from_note_string('D-1b') mid = n.midi_note_number() assert mid == 1 def test_midi_2(): n = Note.from_note_string('D-1') mid = n.midi_note_number() assert mid == 2 def test_midi_3_sharp(): n = Note.from_note_string('D-1#') mid = n.midi_note_number() assert mid == 3 def test_midi_3_flat(): n = Note.from_note_string('E-1b') mid = n.midi_note_number() assert mid == 3 def test_midi_4(): n = Note.from_note_string('E-1') mid = n.midi_note_number() assert mid == 4 def test_midi_5(): n = Note.from_note_string('F-1') mid = n.midi_note_number() assert mid == 5 def test_midi_6_sharp(): n = Note.from_note_string('F-1#') mid = n.midi_note_number() assert mid == 6 def test_midi_6_flat(): n = Note.from_note_string('G-1b') mid = n.midi_note_number() assert mid == 6 def test_midi_7(): n = Note.from_note_string('G-1') mid = n.midi_note_number() assert mid == 7 def test_midi_8_sharp(): n = Note.from_note_string('G-1#') mid = n.midi_note_number() assert mid == 8 def test_midi_8_flat(): n = Note.from_note_string('A-1b') mid = n.midi_note_number() assert mid == 8 def test_midi_9(): n = Note.from_note_string('A-1') mid = n.midi_note_number() assert mid == 9 def test_midi_10_sharp(): n = Note.from_note_string('A-1#') mid = n.midi_note_number() assert mid == 10 def test_midi_10_flat(): n = Note.from_note_string('B-1b') mid = n.midi_note_number() assert mid == 10 def test_midi_11(): n = Note.from_note_string('B-1') mid = n.midi_note_number() assert mid == 11
Add tests for calculating MIDI numbers - octave -1. Signed-off-by: Charlotte Pierce <351429ca27f6e4bff2dbb77adb5046c88cd12fae@malformed-bits.com># TODO: test if method to convert Note to a MIDI note number works import pytest from music_essentials import Note def test_midi_0(): n = Note.from_note_string('C-1') mid = n.midi_note_number() assert mid == 0 def test_midi_1_sharp(): n = Note.from_note_string('C-1#') mid = n.midi_note_number() assert mid == 1 def test_midi_1_flat(): n = Note.from_note_string('D-1b') mid = n.midi_note_number() assert mid == 1 def test_midi_2(): n = Note.from_note_string('D-1') mid = n.midi_note_number() assert mid == 2 def test_midi_3_sharp(): n = Note.from_note_string('D-1#') mid = n.midi_note_number() assert mid == 3 def test_midi_3_flat(): n = Note.from_note_string('E-1b') mid = n.midi_note_number() assert mid == 3 def test_midi_4(): n = Note.from_note_string('E-1') mid = n.midi_note_number() assert mid == 4 def test_midi_5(): n = Note.from_note_string('F-1') mid = n.midi_note_number() assert mid == 5 def test_midi_6_sharp(): n = Note.from_note_string('F-1#') mid = n.midi_note_number() assert mid == 6 def test_midi_6_flat(): n = Note.from_note_string('G-1b') mid = n.midi_note_number() assert mid == 6 def test_midi_7(): n = Note.from_note_string('G-1') mid = n.midi_note_number() assert mid == 7 def test_midi_8_sharp(): n = Note.from_note_string('G-1#') mid = n.midi_note_number() assert mid == 8 def test_midi_8_flat(): n = Note.from_note_string('A-1b') mid = n.midi_note_number() assert mid == 8 def test_midi_9(): n = Note.from_note_string('A-1') mid = n.midi_note_number() assert mid == 9 def test_midi_10_sharp(): n = Note.from_note_string('A-1#') mid = n.midi_note_number() assert mid == 10 def test_midi_10_flat(): n = Note.from_note_string('B-1b') mid = n.midi_note_number() assert mid == 10 def test_midi_11(): n = Note.from_note_string('B-1') mid = n.midi_note_number() assert mid == 11
<commit_before><commit_msg>Add tests for calculating MIDI numbers - octave -1. Signed-off-by: Charlotte Pierce <351429ca27f6e4bff2dbb77adb5046c88cd12fae@malformed-bits.com><commit_after># TODO: test if method to convert Note to a MIDI note number works import pytest from music_essentials import Note def test_midi_0(): n = Note.from_note_string('C-1') mid = n.midi_note_number() assert mid == 0 def test_midi_1_sharp(): n = Note.from_note_string('C-1#') mid = n.midi_note_number() assert mid == 1 def test_midi_1_flat(): n = Note.from_note_string('D-1b') mid = n.midi_note_number() assert mid == 1 def test_midi_2(): n = Note.from_note_string('D-1') mid = n.midi_note_number() assert mid == 2 def test_midi_3_sharp(): n = Note.from_note_string('D-1#') mid = n.midi_note_number() assert mid == 3 def test_midi_3_flat(): n = Note.from_note_string('E-1b') mid = n.midi_note_number() assert mid == 3 def test_midi_4(): n = Note.from_note_string('E-1') mid = n.midi_note_number() assert mid == 4 def test_midi_5(): n = Note.from_note_string('F-1') mid = n.midi_note_number() assert mid == 5 def test_midi_6_sharp(): n = Note.from_note_string('F-1#') mid = n.midi_note_number() assert mid == 6 def test_midi_6_flat(): n = Note.from_note_string('G-1b') mid = n.midi_note_number() assert mid == 6 def test_midi_7(): n = Note.from_note_string('G-1') mid = n.midi_note_number() assert mid == 7 def test_midi_8_sharp(): n = Note.from_note_string('G-1#') mid = n.midi_note_number() assert mid == 8 def test_midi_8_flat(): n = Note.from_note_string('A-1b') mid = n.midi_note_number() assert mid == 8 def test_midi_9(): n = Note.from_note_string('A-1') mid = n.midi_note_number() assert mid == 9 def test_midi_10_sharp(): n = Note.from_note_string('A-1#') mid = n.midi_note_number() assert mid == 10 def test_midi_10_flat(): n = Note.from_note_string('B-1b') mid = n.midi_note_number() assert mid == 10 def test_midi_11(): n = Note.from_note_string('B-1') mid = n.midi_note_number() assert mid == 11
f37ee7d25cd00ade44926590ae6b7feb82844b4e
scripts/update_famplex.py
scripts/update_famplex.py
import os import requests # NOTE: this URL should be updated once the reach_export branch is merged base_url = ('https://raw.githubusercontent.com/bgyori/famplex/reach_export/' 'export/') famplex_groundings = 'famplex_groundings.tsv' famplex_overrides = 'famplex_groundings_override.tsv' if __name__ == '__main__': # Basic positioning of folders here = os.path.dirname(os.path.abspath(__file__)) kb_dir = os.path.join(here, os.pardir, 'src', 'main', 'resources', 'org', 'clulab', 'reach', 'kb') groundings_fname = os.path.join(kb_dir, 'hgnc.tsv') # Download and write to groundings file with open(groundings_fname, 'w') as fh: fh.write(requests.get(base_url + famplex_groundings).text)
Add script to update FamPlex groundings
Add script to update FamPlex groundings
Python
apache-2.0
clulab/bioresources
Add script to update FamPlex groundings
import os import requests # NOTE: this URL should be updated once the reach_export branch is merged base_url = ('https://raw.githubusercontent.com/bgyori/famplex/reach_export/' 'export/') famplex_groundings = 'famplex_groundings.tsv' famplex_overrides = 'famplex_groundings_override.tsv' if __name__ == '__main__': # Basic positioning of folders here = os.path.dirname(os.path.abspath(__file__)) kb_dir = os.path.join(here, os.pardir, 'src', 'main', 'resources', 'org', 'clulab', 'reach', 'kb') groundings_fname = os.path.join(kb_dir, 'hgnc.tsv') # Download and write to groundings file with open(groundings_fname, 'w') as fh: fh.write(requests.get(base_url + famplex_groundings).text)
<commit_before><commit_msg>Add script to update FamPlex groundings<commit_after>
import os import requests # NOTE: this URL should be updated once the reach_export branch is merged base_url = ('https://raw.githubusercontent.com/bgyori/famplex/reach_export/' 'export/') famplex_groundings = 'famplex_groundings.tsv' famplex_overrides = 'famplex_groundings_override.tsv' if __name__ == '__main__': # Basic positioning of folders here = os.path.dirname(os.path.abspath(__file__)) kb_dir = os.path.join(here, os.pardir, 'src', 'main', 'resources', 'org', 'clulab', 'reach', 'kb') groundings_fname = os.path.join(kb_dir, 'hgnc.tsv') # Download and write to groundings file with open(groundings_fname, 'w') as fh: fh.write(requests.get(base_url + famplex_groundings).text)
Add script to update FamPlex groundingsimport os import requests # NOTE: this URL should be updated once the reach_export branch is merged base_url = ('https://raw.githubusercontent.com/bgyori/famplex/reach_export/' 'export/') famplex_groundings = 'famplex_groundings.tsv' famplex_overrides = 'famplex_groundings_override.tsv' if __name__ == '__main__': # Basic positioning of folders here = os.path.dirname(os.path.abspath(__file__)) kb_dir = os.path.join(here, os.pardir, 'src', 'main', 'resources', 'org', 'clulab', 'reach', 'kb') groundings_fname = os.path.join(kb_dir, 'hgnc.tsv') # Download and write to groundings file with open(groundings_fname, 'w') as fh: fh.write(requests.get(base_url + famplex_groundings).text)
<commit_before><commit_msg>Add script to update FamPlex groundings<commit_after>import os import requests # NOTE: this URL should be updated once the reach_export branch is merged base_url = ('https://raw.githubusercontent.com/bgyori/famplex/reach_export/' 'export/') famplex_groundings = 'famplex_groundings.tsv' famplex_overrides = 'famplex_groundings_override.tsv' if __name__ == '__main__': # Basic positioning of folders here = os.path.dirname(os.path.abspath(__file__)) kb_dir = os.path.join(here, os.pardir, 'src', 'main', 'resources', 'org', 'clulab', 'reach', 'kb') groundings_fname = os.path.join(kb_dir, 'hgnc.tsv') # Download and write to groundings file with open(groundings_fname, 'w') as fh: fh.write(requests.get(base_url + famplex_groundings).text)
bafd6eb44a0bbacbf11970e21435bd91c5ae610b
suitability/get_features.py
suitability/get_features.py
#!/usr/bin/env python2 import os import pickle import caffe caffe.set_mode_cpu() caffe_root = os.path.normpath(os.path.dirname('%s/../../../../' % caffe.__file__)) import numpy as np import util def main(): extractor = FeatureExtractor() cache_features(extractor, '../datasets/Michael') cache_features(extractor, '../datasets/Wookie') def cache_features(extractor, path): files = util.filesWithRe(path, r'.*\.(jpg|jpeg|png)$') for i, fpath in enumerate(files): feats = extractor.get_features(fpath) with open('%s.pickle' % fpath, 'w') as f: pickle.dump(feats, f) print('[%d/%d] Stored features for %s' % (i, len(files), fpath)) class FeatureExtractor: def __init__(self): caffe.set_mode_cpu() net = caffe.Net(caffe_root + '/models/bvlc_reference_caffenet/deploy.prototxt', caffe_root + '/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel', caffe.TEST) # Input preprocessing: 'data' is the name of the input blob == net.inputs[0] transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) transformer.set_transpose('data', (2,0,1)) # Mean pixel transformer.set_mean('data', np.load(caffe_root + '/python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) # The reference model operates on images in [0,255] range instead of [0,1] transformer.set_raw_scale('data', 255) # The reference model has channels in BGR order instead of RGB transformer.set_channel_swap('data', (2, 1, 0)) self.net = net self.transformer = transformer def get_features(self, img_path): # Reshape net for single image input self.net.blobs['data'].reshape(1, 3, 227, 227) img = self.transformer.preprocess('data', caffe.io.load_image(img_path)) self.net.blobs['data'].data[...] = img out = self.net.forward() return { 'fc6': self.net.blobs['fc6'].data.flatten(), 'fc7': self.net.blobs['fc7'].data.flatten(), } if __name__ == '__main__': main()
Use Caffe to cache feature vectors
Use Caffe to cache feature vectors
Python
mit
swook/autocrop,swook/autocrop,swook/autocrop,swook/autocrop
Use Caffe to cache feature vectors
#!/usr/bin/env python2 import os import pickle import caffe caffe.set_mode_cpu() caffe_root = os.path.normpath(os.path.dirname('%s/../../../../' % caffe.__file__)) import numpy as np import util def main(): extractor = FeatureExtractor() cache_features(extractor, '../datasets/Michael') cache_features(extractor, '../datasets/Wookie') def cache_features(extractor, path): files = util.filesWithRe(path, r'.*\.(jpg|jpeg|png)$') for i, fpath in enumerate(files): feats = extractor.get_features(fpath) with open('%s.pickle' % fpath, 'w') as f: pickle.dump(feats, f) print('[%d/%d] Stored features for %s' % (i, len(files), fpath)) class FeatureExtractor: def __init__(self): caffe.set_mode_cpu() net = caffe.Net(caffe_root + '/models/bvlc_reference_caffenet/deploy.prototxt', caffe_root + '/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel', caffe.TEST) # Input preprocessing: 'data' is the name of the input blob == net.inputs[0] transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) transformer.set_transpose('data', (2,0,1)) # Mean pixel transformer.set_mean('data', np.load(caffe_root + '/python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) # The reference model operates on images in [0,255] range instead of [0,1] transformer.set_raw_scale('data', 255) # The reference model has channels in BGR order instead of RGB transformer.set_channel_swap('data', (2, 1, 0)) self.net = net self.transformer = transformer def get_features(self, img_path): # Reshape net for single image input self.net.blobs['data'].reshape(1, 3, 227, 227) img = self.transformer.preprocess('data', caffe.io.load_image(img_path)) self.net.blobs['data'].data[...] = img out = self.net.forward() return { 'fc6': self.net.blobs['fc6'].data.flatten(), 'fc7': self.net.blobs['fc7'].data.flatten(), } if __name__ == '__main__': main()
<commit_before><commit_msg>Use Caffe to cache feature vectors<commit_after>
#!/usr/bin/env python2 import os import pickle import caffe caffe.set_mode_cpu() caffe_root = os.path.normpath(os.path.dirname('%s/../../../../' % caffe.__file__)) import numpy as np import util def main(): extractor = FeatureExtractor() cache_features(extractor, '../datasets/Michael') cache_features(extractor, '../datasets/Wookie') def cache_features(extractor, path): files = util.filesWithRe(path, r'.*\.(jpg|jpeg|png)$') for i, fpath in enumerate(files): feats = extractor.get_features(fpath) with open('%s.pickle' % fpath, 'w') as f: pickle.dump(feats, f) print('[%d/%d] Stored features for %s' % (i, len(files), fpath)) class FeatureExtractor: def __init__(self): caffe.set_mode_cpu() net = caffe.Net(caffe_root + '/models/bvlc_reference_caffenet/deploy.prototxt', caffe_root + '/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel', caffe.TEST) # Input preprocessing: 'data' is the name of the input blob == net.inputs[0] transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) transformer.set_transpose('data', (2,0,1)) # Mean pixel transformer.set_mean('data', np.load(caffe_root + '/python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) # The reference model operates on images in [0,255] range instead of [0,1] transformer.set_raw_scale('data', 255) # The reference model has channels in BGR order instead of RGB transformer.set_channel_swap('data', (2, 1, 0)) self.net = net self.transformer = transformer def get_features(self, img_path): # Reshape net for single image input self.net.blobs['data'].reshape(1, 3, 227, 227) img = self.transformer.preprocess('data', caffe.io.load_image(img_path)) self.net.blobs['data'].data[...] = img out = self.net.forward() return { 'fc6': self.net.blobs['fc6'].data.flatten(), 'fc7': self.net.blobs['fc7'].data.flatten(), } if __name__ == '__main__': main()
Use Caffe to cache feature vectors#!/usr/bin/env python2 import os import pickle import caffe caffe.set_mode_cpu() caffe_root = os.path.normpath(os.path.dirname('%s/../../../../' % caffe.__file__)) import numpy as np import util def main(): extractor = FeatureExtractor() cache_features(extractor, '../datasets/Michael') cache_features(extractor, '../datasets/Wookie') def cache_features(extractor, path): files = util.filesWithRe(path, r'.*\.(jpg|jpeg|png)$') for i, fpath in enumerate(files): feats = extractor.get_features(fpath) with open('%s.pickle' % fpath, 'w') as f: pickle.dump(feats, f) print('[%d/%d] Stored features for %s' % (i, len(files), fpath)) class FeatureExtractor: def __init__(self): caffe.set_mode_cpu() net = caffe.Net(caffe_root + '/models/bvlc_reference_caffenet/deploy.prototxt', caffe_root + '/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel', caffe.TEST) # Input preprocessing: 'data' is the name of the input blob == net.inputs[0] transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) transformer.set_transpose('data', (2,0,1)) # Mean pixel transformer.set_mean('data', np.load(caffe_root + '/python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) # The reference model operates on images in [0,255] range instead of [0,1] transformer.set_raw_scale('data', 255) # The reference model has channels in BGR order instead of RGB transformer.set_channel_swap('data', (2, 1, 0)) self.net = net self.transformer = transformer def get_features(self, img_path): # Reshape net for single image input self.net.blobs['data'].reshape(1, 3, 227, 227) img = self.transformer.preprocess('data', caffe.io.load_image(img_path)) self.net.blobs['data'].data[...] = img out = self.net.forward() return { 'fc6': self.net.blobs['fc6'].data.flatten(), 'fc7': self.net.blobs['fc7'].data.flatten(), } if __name__ == '__main__': main()
<commit_before><commit_msg>Use Caffe to cache feature vectors<commit_after>#!/usr/bin/env python2 import os import pickle import caffe caffe.set_mode_cpu() caffe_root = os.path.normpath(os.path.dirname('%s/../../../../' % caffe.__file__)) import numpy as np import util def main(): extractor = FeatureExtractor() cache_features(extractor, '../datasets/Michael') cache_features(extractor, '../datasets/Wookie') def cache_features(extractor, path): files = util.filesWithRe(path, r'.*\.(jpg|jpeg|png)$') for i, fpath in enumerate(files): feats = extractor.get_features(fpath) with open('%s.pickle' % fpath, 'w') as f: pickle.dump(feats, f) print('[%d/%d] Stored features for %s' % (i, len(files), fpath)) class FeatureExtractor: def __init__(self): caffe.set_mode_cpu() net = caffe.Net(caffe_root + '/models/bvlc_reference_caffenet/deploy.prototxt', caffe_root + '/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel', caffe.TEST) # Input preprocessing: 'data' is the name of the input blob == net.inputs[0] transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) transformer.set_transpose('data', (2,0,1)) # Mean pixel transformer.set_mean('data', np.load(caffe_root + '/python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) # The reference model operates on images in [0,255] range instead of [0,1] transformer.set_raw_scale('data', 255) # The reference model has channels in BGR order instead of RGB transformer.set_channel_swap('data', (2, 1, 0)) self.net = net self.transformer = transformer def get_features(self, img_path): # Reshape net for single image input self.net.blobs['data'].reshape(1, 3, 227, 227) img = self.transformer.preprocess('data', caffe.io.load_image(img_path)) self.net.blobs['data'].data[...] = img out = self.net.forward() return { 'fc6': self.net.blobs['fc6'].data.flatten(), 'fc7': self.net.blobs['fc7'].data.flatten(), } if __name__ == '__main__': main()
a03004c621e1d82edbfc6866f8f8f79483a9c1cc
test/test_number_parsing.py
test/test_number_parsing.py
from nex.tokens import PLYTokenMixin from nex.parsing import parsing import pytest class T(PLYTokenMixin): def __init__(self, type_, v=None): self.type = type_ self.value = v def __repr__(self): v = self.value if self.value is not None else '' return f'T<{self.type}>({v})' def test_numbers(): parser = parsing.get_parser(start='number') def p(s): return parser.parse(iter(s)) def basic_check(r): assert r.type == 'number' sgns, sz = r.value['signs'], r.value['size'] assert sz.type == 'size' return sgns, sz r = parser.parse(iter([T('ZERO'), T('ONE'), T('TWO')])) sgns, sz = basic_check(r) assert len(sgns.value) == 0 szv = sz.value assert szv.type == 'integer_constant' dig_collect = szv.value assert dig_collect.base == 10 number_makers = [ # Check signs. [T('MINUS_SIGN'), T('MINUS_SIGN'), T('ONE'), T('TWO')], # Check optional space. [T('ONE'), T('TWO'), T('SPACE')], # Check hex and octal constants. [T('SINGLE_QUOTE'), T('TWO')], [T('DOUBLE_QUOTE'), T('TWO')], [T('BACKTICK'), T('UNEXPANDED_CONTROL_SYMBOL')], [T('BACKTICK'), T('EQUALS')], [T('BACKTICK'), T('ACTIVE_CHARACTER')], [T('INTEGER_PARAMETER')], [T('SPECIAL_INTEGER')], [T('CHAR_DEF_TOKEN')], [T('MATH_CHAR_DEF_TOKEN')], [T('COUNT_DEF_TOKEN')], [T('COUNT'), T('ONE')] ] for number_maker in number_makers: r = parser.parse(iter(number_maker)) basic_check(r) s = [T('COUNT')] for number_maker in number_makers: cs = s + number_maker r = parser.parse(iter(cs)) basic_check(r)
Add tests for parsing numbers
Add tests for parsing numbers
Python
mit
eddiejessup/nex
Add tests for parsing numbers
from nex.tokens import PLYTokenMixin from nex.parsing import parsing import pytest class T(PLYTokenMixin): def __init__(self, type_, v=None): self.type = type_ self.value = v def __repr__(self): v = self.value if self.value is not None else '' return f'T<{self.type}>({v})' def test_numbers(): parser = parsing.get_parser(start='number') def p(s): return parser.parse(iter(s)) def basic_check(r): assert r.type == 'number' sgns, sz = r.value['signs'], r.value['size'] assert sz.type == 'size' return sgns, sz r = parser.parse(iter([T('ZERO'), T('ONE'), T('TWO')])) sgns, sz = basic_check(r) assert len(sgns.value) == 0 szv = sz.value assert szv.type == 'integer_constant' dig_collect = szv.value assert dig_collect.base == 10 number_makers = [ # Check signs. [T('MINUS_SIGN'), T('MINUS_SIGN'), T('ONE'), T('TWO')], # Check optional space. [T('ONE'), T('TWO'), T('SPACE')], # Check hex and octal constants. [T('SINGLE_QUOTE'), T('TWO')], [T('DOUBLE_QUOTE'), T('TWO')], [T('BACKTICK'), T('UNEXPANDED_CONTROL_SYMBOL')], [T('BACKTICK'), T('EQUALS')], [T('BACKTICK'), T('ACTIVE_CHARACTER')], [T('INTEGER_PARAMETER')], [T('SPECIAL_INTEGER')], [T('CHAR_DEF_TOKEN')], [T('MATH_CHAR_DEF_TOKEN')], [T('COUNT_DEF_TOKEN')], [T('COUNT'), T('ONE')] ] for number_maker in number_makers: r = parser.parse(iter(number_maker)) basic_check(r) s = [T('COUNT')] for number_maker in number_makers: cs = s + number_maker r = parser.parse(iter(cs)) basic_check(r)
<commit_before><commit_msg>Add tests for parsing numbers<commit_after>
from nex.tokens import PLYTokenMixin from nex.parsing import parsing import pytest class T(PLYTokenMixin): def __init__(self, type_, v=None): self.type = type_ self.value = v def __repr__(self): v = self.value if self.value is not None else '' return f'T<{self.type}>({v})' def test_numbers(): parser = parsing.get_parser(start='number') def p(s): return parser.parse(iter(s)) def basic_check(r): assert r.type == 'number' sgns, sz = r.value['signs'], r.value['size'] assert sz.type == 'size' return sgns, sz r = parser.parse(iter([T('ZERO'), T('ONE'), T('TWO')])) sgns, sz = basic_check(r) assert len(sgns.value) == 0 szv = sz.value assert szv.type == 'integer_constant' dig_collect = szv.value assert dig_collect.base == 10 number_makers = [ # Check signs. [T('MINUS_SIGN'), T('MINUS_SIGN'), T('ONE'), T('TWO')], # Check optional space. [T('ONE'), T('TWO'), T('SPACE')], # Check hex and octal constants. [T('SINGLE_QUOTE'), T('TWO')], [T('DOUBLE_QUOTE'), T('TWO')], [T('BACKTICK'), T('UNEXPANDED_CONTROL_SYMBOL')], [T('BACKTICK'), T('EQUALS')], [T('BACKTICK'), T('ACTIVE_CHARACTER')], [T('INTEGER_PARAMETER')], [T('SPECIAL_INTEGER')], [T('CHAR_DEF_TOKEN')], [T('MATH_CHAR_DEF_TOKEN')], [T('COUNT_DEF_TOKEN')], [T('COUNT'), T('ONE')] ] for number_maker in number_makers: r = parser.parse(iter(number_maker)) basic_check(r) s = [T('COUNT')] for number_maker in number_makers: cs = s + number_maker r = parser.parse(iter(cs)) basic_check(r)
Add tests for parsing numbersfrom nex.tokens import PLYTokenMixin from nex.parsing import parsing import pytest class T(PLYTokenMixin): def __init__(self, type_, v=None): self.type = type_ self.value = v def __repr__(self): v = self.value if self.value is not None else '' return f'T<{self.type}>({v})' def test_numbers(): parser = parsing.get_parser(start='number') def p(s): return parser.parse(iter(s)) def basic_check(r): assert r.type == 'number' sgns, sz = r.value['signs'], r.value['size'] assert sz.type == 'size' return sgns, sz r = parser.parse(iter([T('ZERO'), T('ONE'), T('TWO')])) sgns, sz = basic_check(r) assert len(sgns.value) == 0 szv = sz.value assert szv.type == 'integer_constant' dig_collect = szv.value assert dig_collect.base == 10 number_makers = [ # Check signs. [T('MINUS_SIGN'), T('MINUS_SIGN'), T('ONE'), T('TWO')], # Check optional space. [T('ONE'), T('TWO'), T('SPACE')], # Check hex and octal constants. [T('SINGLE_QUOTE'), T('TWO')], [T('DOUBLE_QUOTE'), T('TWO')], [T('BACKTICK'), T('UNEXPANDED_CONTROL_SYMBOL')], [T('BACKTICK'), T('EQUALS')], [T('BACKTICK'), T('ACTIVE_CHARACTER')], [T('INTEGER_PARAMETER')], [T('SPECIAL_INTEGER')], [T('CHAR_DEF_TOKEN')], [T('MATH_CHAR_DEF_TOKEN')], [T('COUNT_DEF_TOKEN')], [T('COUNT'), T('ONE')] ] for number_maker in number_makers: r = parser.parse(iter(number_maker)) basic_check(r) s = [T('COUNT')] for number_maker in number_makers: cs = s + number_maker r = parser.parse(iter(cs)) basic_check(r)
<commit_before><commit_msg>Add tests for parsing numbers<commit_after>from nex.tokens import PLYTokenMixin from nex.parsing import parsing import pytest class T(PLYTokenMixin): def __init__(self, type_, v=None): self.type = type_ self.value = v def __repr__(self): v = self.value if self.value is not None else '' return f'T<{self.type}>({v})' def test_numbers(): parser = parsing.get_parser(start='number') def p(s): return parser.parse(iter(s)) def basic_check(r): assert r.type == 'number' sgns, sz = r.value['signs'], r.value['size'] assert sz.type == 'size' return sgns, sz r = parser.parse(iter([T('ZERO'), T('ONE'), T('TWO')])) sgns, sz = basic_check(r) assert len(sgns.value) == 0 szv = sz.value assert szv.type == 'integer_constant' dig_collect = szv.value assert dig_collect.base == 10 number_makers = [ # Check signs. [T('MINUS_SIGN'), T('MINUS_SIGN'), T('ONE'), T('TWO')], # Check optional space. [T('ONE'), T('TWO'), T('SPACE')], # Check hex and octal constants. [T('SINGLE_QUOTE'), T('TWO')], [T('DOUBLE_QUOTE'), T('TWO')], [T('BACKTICK'), T('UNEXPANDED_CONTROL_SYMBOL')], [T('BACKTICK'), T('EQUALS')], [T('BACKTICK'), T('ACTIVE_CHARACTER')], [T('INTEGER_PARAMETER')], [T('SPECIAL_INTEGER')], [T('CHAR_DEF_TOKEN')], [T('MATH_CHAR_DEF_TOKEN')], [T('COUNT_DEF_TOKEN')], [T('COUNT'), T('ONE')] ] for number_maker in number_makers: r = parser.parse(iter(number_maker)) basic_check(r) s = [T('COUNT')] for number_maker in number_makers: cs = s + number_maker r = parser.parse(iter(cs)) basic_check(r)
f873c340f5121aa63117314ea9906695463d0854
numba/tests/test_typingerror.py
numba/tests/test_typingerror.py
from __future__ import print_function import unittest from numba.compiler import compile_isolated from numba import types from numba.typeinfer import TypingError def what(): pass def foo(): return what() def bar(x): return x.a class TestTypingError(unittest.TestCase): def test_unknown_function(self): try: compile_isolated(foo, ()) except TypingError, e: self.assertTrue(e.msg.startswith("Untyped global name")) else: self.fail("Should raise error") def test_unknown_attrs(self): try: compile_isolated(bar, (types.int32,)) except TypingError, e: self.assertTrue(e.msg.startswith("Unknown attribute")) else: self.fail("Should raise error") if __name__ == '__main__': unittest.main()
Add tests for typing errors
Add tests for typing errors
Python
bsd-2-clause
sklam/numba,cpcloud/numba,gmarkall/numba,IntelLabs/numba,seibert/numba,jriehl/numba,GaZ3ll3/numba,cpcloud/numba,numba/numba,stuartarchibald/numba,stonebig/numba,stuartarchibald/numba,pombredanne/numba,ssarangi/numba,gmarkall/numba,gdementen/numba,jriehl/numba,gmarkall/numba,ssarangi/numba,pitrou/numba,pitrou/numba,stefanseefeld/numba,stonebig/numba,pitrou/numba,pombredanne/numba,pitrou/numba,numba/numba,IntelLabs/numba,seibert/numba,gdementen/numba,stefanseefeld/numba,stefanseefeld/numba,seibert/numba,jriehl/numba,jriehl/numba,stefanseefeld/numba,IntelLabs/numba,cpcloud/numba,IntelLabs/numba,IntelLabs/numba,GaZ3ll3/numba,seibert/numba,gdementen/numba,pombredanne/numba,stonebig/numba,GaZ3ll3/numba,GaZ3ll3/numba,ssarangi/numba,pombredanne/numba,seibert/numba,GaZ3ll3/numba,sklam/numba,sklam/numba,cpcloud/numba,numba/numba,jriehl/numba,gdementen/numba,ssarangi/numba,cpcloud/numba,gdementen/numba,stonebig/numba,pitrou/numba,stuartarchibald/numba,stonebig/numba,gmarkall/numba,numba/numba,ssarangi/numba,gmarkall/numba,stefanseefeld/numba,stuartarchibald/numba,stuartarchibald/numba,sklam/numba,pombredanne/numba,sklam/numba,numba/numba
Add tests for typing errors
from __future__ import print_function import unittest from numba.compiler import compile_isolated from numba import types from numba.typeinfer import TypingError def what(): pass def foo(): return what() def bar(x): return x.a class TestTypingError(unittest.TestCase): def test_unknown_function(self): try: compile_isolated(foo, ()) except TypingError, e: self.assertTrue(e.msg.startswith("Untyped global name")) else: self.fail("Should raise error") def test_unknown_attrs(self): try: compile_isolated(bar, (types.int32,)) except TypingError, e: self.assertTrue(e.msg.startswith("Unknown attribute")) else: self.fail("Should raise error") if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add tests for typing errors<commit_after>
from __future__ import print_function import unittest from numba.compiler import compile_isolated from numba import types from numba.typeinfer import TypingError def what(): pass def foo(): return what() def bar(x): return x.a class TestTypingError(unittest.TestCase): def test_unknown_function(self): try: compile_isolated(foo, ()) except TypingError, e: self.assertTrue(e.msg.startswith("Untyped global name")) else: self.fail("Should raise error") def test_unknown_attrs(self): try: compile_isolated(bar, (types.int32,)) except TypingError, e: self.assertTrue(e.msg.startswith("Unknown attribute")) else: self.fail("Should raise error") if __name__ == '__main__': unittest.main()
Add tests for typing errorsfrom __future__ import print_function import unittest from numba.compiler import compile_isolated from numba import types from numba.typeinfer import TypingError def what(): pass def foo(): return what() def bar(x): return x.a class TestTypingError(unittest.TestCase): def test_unknown_function(self): try: compile_isolated(foo, ()) except TypingError, e: self.assertTrue(e.msg.startswith("Untyped global name")) else: self.fail("Should raise error") def test_unknown_attrs(self): try: compile_isolated(bar, (types.int32,)) except TypingError, e: self.assertTrue(e.msg.startswith("Unknown attribute")) else: self.fail("Should raise error") if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add tests for typing errors<commit_after>from __future__ import print_function import unittest from numba.compiler import compile_isolated from numba import types from numba.typeinfer import TypingError def what(): pass def foo(): return what() def bar(x): return x.a class TestTypingError(unittest.TestCase): def test_unknown_function(self): try: compile_isolated(foo, ()) except TypingError, e: self.assertTrue(e.msg.startswith("Untyped global name")) else: self.fail("Should raise error") def test_unknown_attrs(self): try: compile_isolated(bar, (types.int32,)) except TypingError, e: self.assertTrue(e.msg.startswith("Unknown attribute")) else: self.fail("Should raise error") if __name__ == '__main__': unittest.main()
c49227c3a91fa173544d23d2133a94273187ae35
num-prediction.py
num-prediction.py
# Requires sci-kit learn and matplotlib import matplotlib.pyplot as pyplot from sklearn import datasets from sklearn import svm digits = datasets.load_digits() #Gamma breaks when greater than 0.01. Maintains high accuracy at 0.001 clf = svm.SVC(gamma=0.001, C=100) x,y = digits.data[:-1], digits.target[:-1] clf.fit(x,y) #Will return a prediction and display the last digit in dataset print('Prediction:',clf.predict(digits.data[-1])) pyplot.imshow(digits.images[-1], cmap=pyplot.cm.gray_r, interpolation="nearest") pyplot.show()
Add number guess SVM file
Add number guess SVM file
Python
mit
FrizzBolt/machine-learning-sandbox
Add number guess SVM file
# Requires sci-kit learn and matplotlib import matplotlib.pyplot as pyplot from sklearn import datasets from sklearn import svm digits = datasets.load_digits() #Gamma breaks when greater than 0.01. Maintains high accuracy at 0.001 clf = svm.SVC(gamma=0.001, C=100) x,y = digits.data[:-1], digits.target[:-1] clf.fit(x,y) #Will return a prediction and display the last digit in dataset print('Prediction:',clf.predict(digits.data[-1])) pyplot.imshow(digits.images[-1], cmap=pyplot.cm.gray_r, interpolation="nearest") pyplot.show()
<commit_before><commit_msg>Add number guess SVM file<commit_after>
# Requires sci-kit learn and matplotlib import matplotlib.pyplot as pyplot from sklearn import datasets from sklearn import svm digits = datasets.load_digits() #Gamma breaks when greater than 0.01. Maintains high accuracy at 0.001 clf = svm.SVC(gamma=0.001, C=100) x,y = digits.data[:-1], digits.target[:-1] clf.fit(x,y) #Will return a prediction and display the last digit in dataset print('Prediction:',clf.predict(digits.data[-1])) pyplot.imshow(digits.images[-1], cmap=pyplot.cm.gray_r, interpolation="nearest") pyplot.show()
Add number guess SVM file# Requires sci-kit learn and matplotlib import matplotlib.pyplot as pyplot from sklearn import datasets from sklearn import svm digits = datasets.load_digits() #Gamma breaks when greater than 0.01. Maintains high accuracy at 0.001 clf = svm.SVC(gamma=0.001, C=100) x,y = digits.data[:-1], digits.target[:-1] clf.fit(x,y) #Will return a prediction and display the last digit in dataset print('Prediction:',clf.predict(digits.data[-1])) pyplot.imshow(digits.images[-1], cmap=pyplot.cm.gray_r, interpolation="nearest") pyplot.show()
<commit_before><commit_msg>Add number guess SVM file<commit_after># Requires sci-kit learn and matplotlib import matplotlib.pyplot as pyplot from sklearn import datasets from sklearn import svm digits = datasets.load_digits() #Gamma breaks when greater than 0.01. Maintains high accuracy at 0.001 clf = svm.SVC(gamma=0.001, C=100) x,y = digits.data[:-1], digits.target[:-1] clf.fit(x,y) #Will return a prediction and display the last digit in dataset print('Prediction:',clf.predict(digits.data[-1])) pyplot.imshow(digits.images[-1], cmap=pyplot.cm.gray_r, interpolation="nearest") pyplot.show()
b042b715aaab917073a2587db1b2eca3525422e4
platforms/m3_ctl/programming/fake_ice.py
platforms/m3_ctl/programming/fake_ice.py
#!/usr/bin/env python MAX_GPIO = 24 import serial s = serial.Serial('/tmp/com2', 115200) if not s.isOpen(): raise IOError, "Failed to open serial port" GPIO_INPUT = 0 GPIO_OUTPUT = 1 GPIO_TRISTATE = 2 event = 0 gpio_level = [False for x in xrange(MAX_GPIO)] gpio_dir = [GPIO_TRISTATE for x in xrange(MAX_GPIO)] def respond(msg, ack=True): global s global event if (ack): s.write(chr(0)) else: s.write(chr(1)) s.write(chr(event)) event += 1 s.write(chr(len(msg))) if len(msg): s.write(msg) print "Sent a response of length", len(msg) def ack(): respond('') while True: msg_type, event_id, length = s.read(3) print "Got a message of type", msg_type event_id = ord(event_id) length = ord(length) msg = s.read(length) if msg_type == 'V': respond('0001'.decode('hex')) elif msg_type == 'v': if msg == '0001'.decode('hex'): ack() else: print "Unknown version:", msg raise Exception elif msg_type == 'g': if len(msg) != 3: print "bad 'g' message length:", len(msg) raise Exception if msg[0] == 'l': gpio_level[ord(msg[1])] = ord(msg[2]) ack() elif msg[0] == 'd': gpio_dir[ord(msg[1])] = ord(msg[2]) ack() else: print "bad 'g' subtype:", msg[0] raise Exception else: print "Unknown msg type:", msg_type raise Exception
Add a stub fake ICE peripheral
Add a stub fake ICE peripheral
Python
apache-2.0
lab11/M-ulator,lab11/M-ulator,lab11/M-ulator,lab11/M-ulator,lab11/M-ulator,lab11/M-ulator,lab11/M-ulator
Add a stub fake ICE peripheral
#!/usr/bin/env python MAX_GPIO = 24 import serial s = serial.Serial('/tmp/com2', 115200) if not s.isOpen(): raise IOError, "Failed to open serial port" GPIO_INPUT = 0 GPIO_OUTPUT = 1 GPIO_TRISTATE = 2 event = 0 gpio_level = [False for x in xrange(MAX_GPIO)] gpio_dir = [GPIO_TRISTATE for x in xrange(MAX_GPIO)] def respond(msg, ack=True): global s global event if (ack): s.write(chr(0)) else: s.write(chr(1)) s.write(chr(event)) event += 1 s.write(chr(len(msg))) if len(msg): s.write(msg) print "Sent a response of length", len(msg) def ack(): respond('') while True: msg_type, event_id, length = s.read(3) print "Got a message of type", msg_type event_id = ord(event_id) length = ord(length) msg = s.read(length) if msg_type == 'V': respond('0001'.decode('hex')) elif msg_type == 'v': if msg == '0001'.decode('hex'): ack() else: print "Unknown version:", msg raise Exception elif msg_type == 'g': if len(msg) != 3: print "bad 'g' message length:", len(msg) raise Exception if msg[0] == 'l': gpio_level[ord(msg[1])] = ord(msg[2]) ack() elif msg[0] == 'd': gpio_dir[ord(msg[1])] = ord(msg[2]) ack() else: print "bad 'g' subtype:", msg[0] raise Exception else: print "Unknown msg type:", msg_type raise Exception
<commit_before><commit_msg>Add a stub fake ICE peripheral<commit_after>
#!/usr/bin/env python MAX_GPIO = 24 import serial s = serial.Serial('/tmp/com2', 115200) if not s.isOpen(): raise IOError, "Failed to open serial port" GPIO_INPUT = 0 GPIO_OUTPUT = 1 GPIO_TRISTATE = 2 event = 0 gpio_level = [False for x in xrange(MAX_GPIO)] gpio_dir = [GPIO_TRISTATE for x in xrange(MAX_GPIO)] def respond(msg, ack=True): global s global event if (ack): s.write(chr(0)) else: s.write(chr(1)) s.write(chr(event)) event += 1 s.write(chr(len(msg))) if len(msg): s.write(msg) print "Sent a response of length", len(msg) def ack(): respond('') while True: msg_type, event_id, length = s.read(3) print "Got a message of type", msg_type event_id = ord(event_id) length = ord(length) msg = s.read(length) if msg_type == 'V': respond('0001'.decode('hex')) elif msg_type == 'v': if msg == '0001'.decode('hex'): ack() else: print "Unknown version:", msg raise Exception elif msg_type == 'g': if len(msg) != 3: print "bad 'g' message length:", len(msg) raise Exception if msg[0] == 'l': gpio_level[ord(msg[1])] = ord(msg[2]) ack() elif msg[0] == 'd': gpio_dir[ord(msg[1])] = ord(msg[2]) ack() else: print "bad 'g' subtype:", msg[0] raise Exception else: print "Unknown msg type:", msg_type raise Exception
Add a stub fake ICE peripheral#!/usr/bin/env python MAX_GPIO = 24 import serial s = serial.Serial('/tmp/com2', 115200) if not s.isOpen(): raise IOError, "Failed to open serial port" GPIO_INPUT = 0 GPIO_OUTPUT = 1 GPIO_TRISTATE = 2 event = 0 gpio_level = [False for x in xrange(MAX_GPIO)] gpio_dir = [GPIO_TRISTATE for x in xrange(MAX_GPIO)] def respond(msg, ack=True): global s global event if (ack): s.write(chr(0)) else: s.write(chr(1)) s.write(chr(event)) event += 1 s.write(chr(len(msg))) if len(msg): s.write(msg) print "Sent a response of length", len(msg) def ack(): respond('') while True: msg_type, event_id, length = s.read(3) print "Got a message of type", msg_type event_id = ord(event_id) length = ord(length) msg = s.read(length) if msg_type == 'V': respond('0001'.decode('hex')) elif msg_type == 'v': if msg == '0001'.decode('hex'): ack() else: print "Unknown version:", msg raise Exception elif msg_type == 'g': if len(msg) != 3: print "bad 'g' message length:", len(msg) raise Exception if msg[0] == 'l': gpio_level[ord(msg[1])] = ord(msg[2]) ack() elif msg[0] == 'd': gpio_dir[ord(msg[1])] = ord(msg[2]) ack() else: print "bad 'g' subtype:", msg[0] raise Exception else: print "Unknown msg type:", msg_type raise Exception
<commit_before><commit_msg>Add a stub fake ICE peripheral<commit_after>#!/usr/bin/env python MAX_GPIO = 24 import serial s = serial.Serial('/tmp/com2', 115200) if not s.isOpen(): raise IOError, "Failed to open serial port" GPIO_INPUT = 0 GPIO_OUTPUT = 1 GPIO_TRISTATE = 2 event = 0 gpio_level = [False for x in xrange(MAX_GPIO)] gpio_dir = [GPIO_TRISTATE for x in xrange(MAX_GPIO)] def respond(msg, ack=True): global s global event if (ack): s.write(chr(0)) else: s.write(chr(1)) s.write(chr(event)) event += 1 s.write(chr(len(msg))) if len(msg): s.write(msg) print "Sent a response of length", len(msg) def ack(): respond('') while True: msg_type, event_id, length = s.read(3) print "Got a message of type", msg_type event_id = ord(event_id) length = ord(length) msg = s.read(length) if msg_type == 'V': respond('0001'.decode('hex')) elif msg_type == 'v': if msg == '0001'.decode('hex'): ack() else: print "Unknown version:", msg raise Exception elif msg_type == 'g': if len(msg) != 3: print "bad 'g' message length:", len(msg) raise Exception if msg[0] == 'l': gpio_level[ord(msg[1])] = ord(msg[2]) ack() elif msg[0] == 'd': gpio_dir[ord(msg[1])] = ord(msg[2]) ack() else: print "bad 'g' subtype:", msg[0] raise Exception else: print "Unknown msg type:", msg_type raise Exception
696309f33320b0e45ea4a6c7577c87be23cb4462
example_config.py
example_config.py
from roland import lazy, Mode home_page = 'https://www.google.com' search_page = 'https://www.google.com/search?q=%s' commands = { 'i': lazy.set_mode(Mode.Insert), 'colon': lazy.set_mode(Mode.Command), 'b': lambda browser: browser.roland.select_window(), 'c': lazy.close(), 'o': lazy.open_or_search(), 'O': lazy.open_modify(), 't': lazy.open_or_search(new_window=True), 'T': lazy.open_modify(new_window=True), 'r': lazy.reload(), 'R': lazy.reload_bypass_cache(), 'plus': lazy.zoom_in(), 'minus': lazy.zoom_out(), 'equal': lazy.zoom_reset(), 'slash': lazy.search_page(forwards=True), 'question': lazy.search_page(forwards=False), 'n': lazy.next_search_result(forwards=True), 'N': lazy.next_search_result(forwards=False), 'C-o': lazy.back(), 'C-i': lazy.forward(), 'f': lazy.follow(), 'F': lazy.follow(new_window=True), 'C-f': lazy.run_javascript('window.scrollBy(0, window.innerHeight);'), 'C-b': lazy.run_javascript('window.scrollBy(0, -window.innerHeight);'), 'C-c': lazy.stop(), 'C-w': lazy.shell(), 'C-q': lazy.quit(), 'h': lazy.move(x=-1), 'j': lazy.move(y=1), 'k': lazy.move(y=-1), 'l': lazy.move(x=1), 'y': lambda browser: browser.roland.set_clipboard(browser.get_uri()), 'g': lazy.set_mode(Mode.SubCommand, 'g', { 'g': lazy.run_javascript('window.scrollTo(0, 0);'), }), 'd': lazy.set_mode(Mode.SubCommand, 'd', { 'l': lazy.list_downloads(), 'c': lazy.cancel_download(), }), 'G': lazy.run_javascript('window.scrollBy(0, document.body.scrollHeight);'), }
Add my config as an example config.
Add my config as an example config.
Python
bsd-3-clause
nathan-hoad/roland,nathan-hoad/roland
Add my config as an example config.
from roland import lazy, Mode home_page = 'https://www.google.com' search_page = 'https://www.google.com/search?q=%s' commands = { 'i': lazy.set_mode(Mode.Insert), 'colon': lazy.set_mode(Mode.Command), 'b': lambda browser: browser.roland.select_window(), 'c': lazy.close(), 'o': lazy.open_or_search(), 'O': lazy.open_modify(), 't': lazy.open_or_search(new_window=True), 'T': lazy.open_modify(new_window=True), 'r': lazy.reload(), 'R': lazy.reload_bypass_cache(), 'plus': lazy.zoom_in(), 'minus': lazy.zoom_out(), 'equal': lazy.zoom_reset(), 'slash': lazy.search_page(forwards=True), 'question': lazy.search_page(forwards=False), 'n': lazy.next_search_result(forwards=True), 'N': lazy.next_search_result(forwards=False), 'C-o': lazy.back(), 'C-i': lazy.forward(), 'f': lazy.follow(), 'F': lazy.follow(new_window=True), 'C-f': lazy.run_javascript('window.scrollBy(0, window.innerHeight);'), 'C-b': lazy.run_javascript('window.scrollBy(0, -window.innerHeight);'), 'C-c': lazy.stop(), 'C-w': lazy.shell(), 'C-q': lazy.quit(), 'h': lazy.move(x=-1), 'j': lazy.move(y=1), 'k': lazy.move(y=-1), 'l': lazy.move(x=1), 'y': lambda browser: browser.roland.set_clipboard(browser.get_uri()), 'g': lazy.set_mode(Mode.SubCommand, 'g', { 'g': lazy.run_javascript('window.scrollTo(0, 0);'), }), 'd': lazy.set_mode(Mode.SubCommand, 'd', { 'l': lazy.list_downloads(), 'c': lazy.cancel_download(), }), 'G': lazy.run_javascript('window.scrollBy(0, document.body.scrollHeight);'), }
<commit_before><commit_msg>Add my config as an example config.<commit_after>
from roland import lazy, Mode home_page = 'https://www.google.com' search_page = 'https://www.google.com/search?q=%s' commands = { 'i': lazy.set_mode(Mode.Insert), 'colon': lazy.set_mode(Mode.Command), 'b': lambda browser: browser.roland.select_window(), 'c': lazy.close(), 'o': lazy.open_or_search(), 'O': lazy.open_modify(), 't': lazy.open_or_search(new_window=True), 'T': lazy.open_modify(new_window=True), 'r': lazy.reload(), 'R': lazy.reload_bypass_cache(), 'plus': lazy.zoom_in(), 'minus': lazy.zoom_out(), 'equal': lazy.zoom_reset(), 'slash': lazy.search_page(forwards=True), 'question': lazy.search_page(forwards=False), 'n': lazy.next_search_result(forwards=True), 'N': lazy.next_search_result(forwards=False), 'C-o': lazy.back(), 'C-i': lazy.forward(), 'f': lazy.follow(), 'F': lazy.follow(new_window=True), 'C-f': lazy.run_javascript('window.scrollBy(0, window.innerHeight);'), 'C-b': lazy.run_javascript('window.scrollBy(0, -window.innerHeight);'), 'C-c': lazy.stop(), 'C-w': lazy.shell(), 'C-q': lazy.quit(), 'h': lazy.move(x=-1), 'j': lazy.move(y=1), 'k': lazy.move(y=-1), 'l': lazy.move(x=1), 'y': lambda browser: browser.roland.set_clipboard(browser.get_uri()), 'g': lazy.set_mode(Mode.SubCommand, 'g', { 'g': lazy.run_javascript('window.scrollTo(0, 0);'), }), 'd': lazy.set_mode(Mode.SubCommand, 'd', { 'l': lazy.list_downloads(), 'c': lazy.cancel_download(), }), 'G': lazy.run_javascript('window.scrollBy(0, document.body.scrollHeight);'), }
Add my config as an example config.from roland import lazy, Mode home_page = 'https://www.google.com' search_page = 'https://www.google.com/search?q=%s' commands = { 'i': lazy.set_mode(Mode.Insert), 'colon': lazy.set_mode(Mode.Command), 'b': lambda browser: browser.roland.select_window(), 'c': lazy.close(), 'o': lazy.open_or_search(), 'O': lazy.open_modify(), 't': lazy.open_or_search(new_window=True), 'T': lazy.open_modify(new_window=True), 'r': lazy.reload(), 'R': lazy.reload_bypass_cache(), 'plus': lazy.zoom_in(), 'minus': lazy.zoom_out(), 'equal': lazy.zoom_reset(), 'slash': lazy.search_page(forwards=True), 'question': lazy.search_page(forwards=False), 'n': lazy.next_search_result(forwards=True), 'N': lazy.next_search_result(forwards=False), 'C-o': lazy.back(), 'C-i': lazy.forward(), 'f': lazy.follow(), 'F': lazy.follow(new_window=True), 'C-f': lazy.run_javascript('window.scrollBy(0, window.innerHeight);'), 'C-b': lazy.run_javascript('window.scrollBy(0, -window.innerHeight);'), 'C-c': lazy.stop(), 'C-w': lazy.shell(), 'C-q': lazy.quit(), 'h': lazy.move(x=-1), 'j': lazy.move(y=1), 'k': lazy.move(y=-1), 'l': lazy.move(x=1), 'y': lambda browser: browser.roland.set_clipboard(browser.get_uri()), 'g': lazy.set_mode(Mode.SubCommand, 'g', { 'g': lazy.run_javascript('window.scrollTo(0, 0);'), }), 'd': lazy.set_mode(Mode.SubCommand, 'd', { 'l': lazy.list_downloads(), 'c': lazy.cancel_download(), }), 'G': lazy.run_javascript('window.scrollBy(0, document.body.scrollHeight);'), }
<commit_before><commit_msg>Add my config as an example config.<commit_after>from roland import lazy, Mode home_page = 'https://www.google.com' search_page = 'https://www.google.com/search?q=%s' commands = { 'i': lazy.set_mode(Mode.Insert), 'colon': lazy.set_mode(Mode.Command), 'b': lambda browser: browser.roland.select_window(), 'c': lazy.close(), 'o': lazy.open_or_search(), 'O': lazy.open_modify(), 't': lazy.open_or_search(new_window=True), 'T': lazy.open_modify(new_window=True), 'r': lazy.reload(), 'R': lazy.reload_bypass_cache(), 'plus': lazy.zoom_in(), 'minus': lazy.zoom_out(), 'equal': lazy.zoom_reset(), 'slash': lazy.search_page(forwards=True), 'question': lazy.search_page(forwards=False), 'n': lazy.next_search_result(forwards=True), 'N': lazy.next_search_result(forwards=False), 'C-o': lazy.back(), 'C-i': lazy.forward(), 'f': lazy.follow(), 'F': lazy.follow(new_window=True), 'C-f': lazy.run_javascript('window.scrollBy(0, window.innerHeight);'), 'C-b': lazy.run_javascript('window.scrollBy(0, -window.innerHeight);'), 'C-c': lazy.stop(), 'C-w': lazy.shell(), 'C-q': lazy.quit(), 'h': lazy.move(x=-1), 'j': lazy.move(y=1), 'k': lazy.move(y=-1), 'l': lazy.move(x=1), 'y': lambda browser: browser.roland.set_clipboard(browser.get_uri()), 'g': lazy.set_mode(Mode.SubCommand, 'g', { 'g': lazy.run_javascript('window.scrollTo(0, 0);'), }), 'd': lazy.set_mode(Mode.SubCommand, 'd', { 'l': lazy.list_downloads(), 'c': lazy.cancel_download(), }), 'G': lazy.run_javascript('window.scrollBy(0, document.body.scrollHeight);'), }
e9ba95a16530ed29e387f6197573cc688b670aa1
Python/171_ExcelSheetColumNumber.py
Python/171_ExcelSheetColumNumber.py
class Solution(object): def titleToNumber(self, s): """ :type s: str :rtype: int """ # A-Z:65-90 || a-z:97-122 # ord('A') = 65, chr(65)=A n = len(s) if n == 0: return 0 result = 0 for x in xrange(0, n): asc2 = ord(s[n-1-x]) if 96<asc2<123: asc2 -= 32 result += (26**x)*(asc2-64) return result s = "" foo = Solution() print foo.titleToNumber(s)
Add solution for 171:Excel sheet column number
Add solution for 171:Excel sheet column number
Python
mit
comicxmz001/LeetCode,comicxmz001/LeetCode
Add solution for 171:Excel sheet column number
class Solution(object): def titleToNumber(self, s): """ :type s: str :rtype: int """ # A-Z:65-90 || a-z:97-122 # ord('A') = 65, chr(65)=A n = len(s) if n == 0: return 0 result = 0 for x in xrange(0, n): asc2 = ord(s[n-1-x]) if 96<asc2<123: asc2 -= 32 result += (26**x)*(asc2-64) return result s = "" foo = Solution() print foo.titleToNumber(s)
<commit_before><commit_msg>Add solution for 171:Excel sheet column number<commit_after>
class Solution(object): def titleToNumber(self, s): """ :type s: str :rtype: int """ # A-Z:65-90 || a-z:97-122 # ord('A') = 65, chr(65)=A n = len(s) if n == 0: return 0 result = 0 for x in xrange(0, n): asc2 = ord(s[n-1-x]) if 96<asc2<123: asc2 -= 32 result += (26**x)*(asc2-64) return result s = "" foo = Solution() print foo.titleToNumber(s)
Add solution for 171:Excel sheet column numberclass Solution(object): def titleToNumber(self, s): """ :type s: str :rtype: int """ # A-Z:65-90 || a-z:97-122 # ord('A') = 65, chr(65)=A n = len(s) if n == 0: return 0 result = 0 for x in xrange(0, n): asc2 = ord(s[n-1-x]) if 96<asc2<123: asc2 -= 32 result += (26**x)*(asc2-64) return result s = "" foo = Solution() print foo.titleToNumber(s)
<commit_before><commit_msg>Add solution for 171:Excel sheet column number<commit_after>class Solution(object): def titleToNumber(self, s): """ :type s: str :rtype: int """ # A-Z:65-90 || a-z:97-122 # ord('A') = 65, chr(65)=A n = len(s) if n == 0: return 0 result = 0 for x in xrange(0, n): asc2 = ord(s[n-1-x]) if 96<asc2<123: asc2 -= 32 result += (26**x)*(asc2-64) return result s = "" foo = Solution() print foo.titleToNumber(s)
a6cdc37e5770afb8995d9e04ef9a54b790861c3a
generate_conf.py
generate_conf.py
#!/usr/bin/env python3 RATE = 150000.00 FORMAT = 'list' PORT = '445' OUTFILE = 'internet_{0}.list'.format(PORT) BANNER = 'false' with open('internet.conf', 'w') as f: f.write('# Internet Masscan Configuration.\n') f.write('rate = {0}\n'.format(RATE)) f.write('output-format = {0}\n'.format(FORMAT)) f.write('output-filename = {0}\n'.format(OUTFILE)) f.write('port = {0}\n'.format(PORT)) f.write('exclude-file = exclude.conf\n') f.write('banners = {0}\n'.format(BANNER)) f.write('range = 0.0.0.0/0\n')
Create a conf file for the internet.
Create a conf file for the internet.
Python
bsd-3-clause
averagesecurityguy/research
Create a conf file for the internet.
#!/usr/bin/env python3 RATE = 150000.00 FORMAT = 'list' PORT = '445' OUTFILE = 'internet_{0}.list'.format(PORT) BANNER = 'false' with open('internet.conf', 'w') as f: f.write('# Internet Masscan Configuration.\n') f.write('rate = {0}\n'.format(RATE)) f.write('output-format = {0}\n'.format(FORMAT)) f.write('output-filename = {0}\n'.format(OUTFILE)) f.write('port = {0}\n'.format(PORT)) f.write('exclude-file = exclude.conf\n') f.write('banners = {0}\n'.format(BANNER)) f.write('range = 0.0.0.0/0\n')
<commit_before><commit_msg>Create a conf file for the internet.<commit_after>
#!/usr/bin/env python3 RATE = 150000.00 FORMAT = 'list' PORT = '445' OUTFILE = 'internet_{0}.list'.format(PORT) BANNER = 'false' with open('internet.conf', 'w') as f: f.write('# Internet Masscan Configuration.\n') f.write('rate = {0}\n'.format(RATE)) f.write('output-format = {0}\n'.format(FORMAT)) f.write('output-filename = {0}\n'.format(OUTFILE)) f.write('port = {0}\n'.format(PORT)) f.write('exclude-file = exclude.conf\n') f.write('banners = {0}\n'.format(BANNER)) f.write('range = 0.0.0.0/0\n')
Create a conf file for the internet.#!/usr/bin/env python3 RATE = 150000.00 FORMAT = 'list' PORT = '445' OUTFILE = 'internet_{0}.list'.format(PORT) BANNER = 'false' with open('internet.conf', 'w') as f: f.write('# Internet Masscan Configuration.\n') f.write('rate = {0}\n'.format(RATE)) f.write('output-format = {0}\n'.format(FORMAT)) f.write('output-filename = {0}\n'.format(OUTFILE)) f.write('port = {0}\n'.format(PORT)) f.write('exclude-file = exclude.conf\n') f.write('banners = {0}\n'.format(BANNER)) f.write('range = 0.0.0.0/0\n')
<commit_before><commit_msg>Create a conf file for the internet.<commit_after>#!/usr/bin/env python3 RATE = 150000.00 FORMAT = 'list' PORT = '445' OUTFILE = 'internet_{0}.list'.format(PORT) BANNER = 'false' with open('internet.conf', 'w') as f: f.write('# Internet Masscan Configuration.\n') f.write('rate = {0}\n'.format(RATE)) f.write('output-format = {0}\n'.format(FORMAT)) f.write('output-filename = {0}\n'.format(OUTFILE)) f.write('port = {0}\n'.format(PORT)) f.write('exclude-file = exclude.conf\n') f.write('banners = {0}\n'.format(BANNER)) f.write('range = 0.0.0.0/0\n')
69d573d3b544a6da8dd37f7605ab95fedbe63f6e
tests/test_channelfile.py
tests/test_channelfile.py
from mock import patch, MagicMock from paramiko import Channel, ChannelFile class TestChannelFile(object): @patch("paramiko.channel.ChannelFile._set_mode") def test_defaults_to_unbuffered_reading(self, setmode): cf = ChannelFile(Channel(None)) setmode.assert_called_once_with("r", -1) @patch("paramiko.channel.ChannelFile._set_mode") def test_can_override_mode_and_bufsize(self, setmode): cf = ChannelFile(Channel(None), mode="w", bufsize=25) setmode.assert_called_once_with("w", 25) def test_read_recvs_from_channel(self): chan = MagicMock() cf = ChannelFile(chan) cf.read(100) chan.recv.assert_called_once_with(100) def test_write_calls_channel_sendall(self): chan = MagicMock() cf = ChannelFile(chan, mode="w") cf.write("ohai") chan.sendall.assert_called_once_with(b"ohai") def TestChannelStderrFile(object): def test_read_calls_channel_recv_stderr(self): chan = MagicMock() cf = ChannelStderrFile(chan) cf.read(100) chan.recv_stderr.assert_called_once_with(100) def test_write_calls_channel_sendall(self): chan = MagicMock() cf = ChannelStderrFile(chan, mode="w") cf.write("ohai") chan.sendall_stderr.assert_called_once_with(b"ohai")
Add some basic tests for Channel(Stderr)File
Add some basic tests for Channel(Stderr)File
Python
lgpl-2.1
ameily/paramiko,paramiko/paramiko
Add some basic tests for Channel(Stderr)File
from mock import patch, MagicMock from paramiko import Channel, ChannelFile class TestChannelFile(object): @patch("paramiko.channel.ChannelFile._set_mode") def test_defaults_to_unbuffered_reading(self, setmode): cf = ChannelFile(Channel(None)) setmode.assert_called_once_with("r", -1) @patch("paramiko.channel.ChannelFile._set_mode") def test_can_override_mode_and_bufsize(self, setmode): cf = ChannelFile(Channel(None), mode="w", bufsize=25) setmode.assert_called_once_with("w", 25) def test_read_recvs_from_channel(self): chan = MagicMock() cf = ChannelFile(chan) cf.read(100) chan.recv.assert_called_once_with(100) def test_write_calls_channel_sendall(self): chan = MagicMock() cf = ChannelFile(chan, mode="w") cf.write("ohai") chan.sendall.assert_called_once_with(b"ohai") def TestChannelStderrFile(object): def test_read_calls_channel_recv_stderr(self): chan = MagicMock() cf = ChannelStderrFile(chan) cf.read(100) chan.recv_stderr.assert_called_once_with(100) def test_write_calls_channel_sendall(self): chan = MagicMock() cf = ChannelStderrFile(chan, mode="w") cf.write("ohai") chan.sendall_stderr.assert_called_once_with(b"ohai")
<commit_before><commit_msg>Add some basic tests for Channel(Stderr)File<commit_after>
from mock import patch, MagicMock from paramiko import Channel, ChannelFile class TestChannelFile(object): @patch("paramiko.channel.ChannelFile._set_mode") def test_defaults_to_unbuffered_reading(self, setmode): cf = ChannelFile(Channel(None)) setmode.assert_called_once_with("r", -1) @patch("paramiko.channel.ChannelFile._set_mode") def test_can_override_mode_and_bufsize(self, setmode): cf = ChannelFile(Channel(None), mode="w", bufsize=25) setmode.assert_called_once_with("w", 25) def test_read_recvs_from_channel(self): chan = MagicMock() cf = ChannelFile(chan) cf.read(100) chan.recv.assert_called_once_with(100) def test_write_calls_channel_sendall(self): chan = MagicMock() cf = ChannelFile(chan, mode="w") cf.write("ohai") chan.sendall.assert_called_once_with(b"ohai") def TestChannelStderrFile(object): def test_read_calls_channel_recv_stderr(self): chan = MagicMock() cf = ChannelStderrFile(chan) cf.read(100) chan.recv_stderr.assert_called_once_with(100) def test_write_calls_channel_sendall(self): chan = MagicMock() cf = ChannelStderrFile(chan, mode="w") cf.write("ohai") chan.sendall_stderr.assert_called_once_with(b"ohai")
Add some basic tests for Channel(Stderr)Filefrom mock import patch, MagicMock from paramiko import Channel, ChannelFile class TestChannelFile(object): @patch("paramiko.channel.ChannelFile._set_mode") def test_defaults_to_unbuffered_reading(self, setmode): cf = ChannelFile(Channel(None)) setmode.assert_called_once_with("r", -1) @patch("paramiko.channel.ChannelFile._set_mode") def test_can_override_mode_and_bufsize(self, setmode): cf = ChannelFile(Channel(None), mode="w", bufsize=25) setmode.assert_called_once_with("w", 25) def test_read_recvs_from_channel(self): chan = MagicMock() cf = ChannelFile(chan) cf.read(100) chan.recv.assert_called_once_with(100) def test_write_calls_channel_sendall(self): chan = MagicMock() cf = ChannelFile(chan, mode="w") cf.write("ohai") chan.sendall.assert_called_once_with(b"ohai") def TestChannelStderrFile(object): def test_read_calls_channel_recv_stderr(self): chan = MagicMock() cf = ChannelStderrFile(chan) cf.read(100) chan.recv_stderr.assert_called_once_with(100) def test_write_calls_channel_sendall(self): chan = MagicMock() cf = ChannelStderrFile(chan, mode="w") cf.write("ohai") chan.sendall_stderr.assert_called_once_with(b"ohai")
<commit_before><commit_msg>Add some basic tests for Channel(Stderr)File<commit_after>from mock import patch, MagicMock from paramiko import Channel, ChannelFile class TestChannelFile(object): @patch("paramiko.channel.ChannelFile._set_mode") def test_defaults_to_unbuffered_reading(self, setmode): cf = ChannelFile(Channel(None)) setmode.assert_called_once_with("r", -1) @patch("paramiko.channel.ChannelFile._set_mode") def test_can_override_mode_and_bufsize(self, setmode): cf = ChannelFile(Channel(None), mode="w", bufsize=25) setmode.assert_called_once_with("w", 25) def test_read_recvs_from_channel(self): chan = MagicMock() cf = ChannelFile(chan) cf.read(100) chan.recv.assert_called_once_with(100) def test_write_calls_channel_sendall(self): chan = MagicMock() cf = ChannelFile(chan, mode="w") cf.write("ohai") chan.sendall.assert_called_once_with(b"ohai") def TestChannelStderrFile(object): def test_read_calls_channel_recv_stderr(self): chan = MagicMock() cf = ChannelStderrFile(chan) cf.read(100) chan.recv_stderr.assert_called_once_with(100) def test_write_calls_channel_sendall(self): chan = MagicMock() cf = ChannelStderrFile(chan, mode="w") cf.write("ohai") chan.sendall_stderr.assert_called_once_with(b"ohai")
f2c6e9033b3ff33acdc0bd763a45ad6aa78b9f5e
generate_enums.py
generate_enums.py
#!/usr/bin/env python import re valgrind_home = "/usr" input = open(valgrind_home + "/include/valgrind/libvex_ir.h").read() input += open(valgrind_home + "/include/valgrind/libvex.h").read() out = """ #include <libvex.h> #include <stdio.h> #include <string.h> #include "pyvex_macros.h" #include "pyvex_logging.h" """ errors = ["VexArchInfo"] enums = [ ("VexArch", r"VexArch\w+"), ("IRExprTag", r"Iex_\w+"), ("IRStmtTag", r"Ist_\w+"), ("IREndness", r"Iend_\w+"), ("IRMBusEvent", r"Imbe_\w+"), ("IREffect", r"Ifx_\w+"), ("IRJumpKind", r"Ijk_\w+"), ("IRConstTag", r"Ico_\w+"), ("IRType", r"Ity_\w+"), ("IROp", r"Iop_\w+"), ] to_str = """ const char *{0}_to_str({0} e) {{ switch(e) {{ {1} default: error("PyVEX: Unknown {0}"); return NULL; }} }} """ from_str = """ {0} str_to_{0}(const char *s) {{ {1} return -1; }} """ # http://stackoverflow.com/questions/480214 def uniq(seq): seen = set() seen_add = seen.add return [ x for x in seq if x not in seen and not seen_add(x)] for ty,enum in enums: insts = uniq(re.findall(enum, input)) insts = [x for x in insts if x not in errors] to_strs = "\n".join("\t\tPYVEX_ENUMCONV_TOSTRCASE("+x+")" for x in insts) out += to_str.format(ty, to_strs) from_strs = "\n".join("\tPYVEX_ENUMCONV_FROMSTR("+x+")" for x in insts) out += from_str.format(ty, from_strs) print out
Add script for generating enums.
Add script for generating enums.
Python
bsd-2-clause
chubbymaggie/pyvex,0xbc/pyvex,0xbc/pyvex,avain/pyvex,angr/pyvex,angr/pyvex,avain/pyvex,chubbymaggie/pyvex
Add script for generating enums.
#!/usr/bin/env python import re valgrind_home = "/usr" input = open(valgrind_home + "/include/valgrind/libvex_ir.h").read() input += open(valgrind_home + "/include/valgrind/libvex.h").read() out = """ #include <libvex.h> #include <stdio.h> #include <string.h> #include "pyvex_macros.h" #include "pyvex_logging.h" """ errors = ["VexArchInfo"] enums = [ ("VexArch", r"VexArch\w+"), ("IRExprTag", r"Iex_\w+"), ("IRStmtTag", r"Ist_\w+"), ("IREndness", r"Iend_\w+"), ("IRMBusEvent", r"Imbe_\w+"), ("IREffect", r"Ifx_\w+"), ("IRJumpKind", r"Ijk_\w+"), ("IRConstTag", r"Ico_\w+"), ("IRType", r"Ity_\w+"), ("IROp", r"Iop_\w+"), ] to_str = """ const char *{0}_to_str({0} e) {{ switch(e) {{ {1} default: error("PyVEX: Unknown {0}"); return NULL; }} }} """ from_str = """ {0} str_to_{0}(const char *s) {{ {1} return -1; }} """ # http://stackoverflow.com/questions/480214 def uniq(seq): seen = set() seen_add = seen.add return [ x for x in seq if x not in seen and not seen_add(x)] for ty,enum in enums: insts = uniq(re.findall(enum, input)) insts = [x for x in insts if x not in errors] to_strs = "\n".join("\t\tPYVEX_ENUMCONV_TOSTRCASE("+x+")" for x in insts) out += to_str.format(ty, to_strs) from_strs = "\n".join("\tPYVEX_ENUMCONV_FROMSTR("+x+")" for x in insts) out += from_str.format(ty, from_strs) print out
<commit_before><commit_msg>Add script for generating enums.<commit_after>
#!/usr/bin/env python import re valgrind_home = "/usr" input = open(valgrind_home + "/include/valgrind/libvex_ir.h").read() input += open(valgrind_home + "/include/valgrind/libvex.h").read() out = """ #include <libvex.h> #include <stdio.h> #include <string.h> #include "pyvex_macros.h" #include "pyvex_logging.h" """ errors = ["VexArchInfo"] enums = [ ("VexArch", r"VexArch\w+"), ("IRExprTag", r"Iex_\w+"), ("IRStmtTag", r"Ist_\w+"), ("IREndness", r"Iend_\w+"), ("IRMBusEvent", r"Imbe_\w+"), ("IREffect", r"Ifx_\w+"), ("IRJumpKind", r"Ijk_\w+"), ("IRConstTag", r"Ico_\w+"), ("IRType", r"Ity_\w+"), ("IROp", r"Iop_\w+"), ] to_str = """ const char *{0}_to_str({0} e) {{ switch(e) {{ {1} default: error("PyVEX: Unknown {0}"); return NULL; }} }} """ from_str = """ {0} str_to_{0}(const char *s) {{ {1} return -1; }} """ # http://stackoverflow.com/questions/480214 def uniq(seq): seen = set() seen_add = seen.add return [ x for x in seq if x not in seen and not seen_add(x)] for ty,enum in enums: insts = uniq(re.findall(enum, input)) insts = [x for x in insts if x not in errors] to_strs = "\n".join("\t\tPYVEX_ENUMCONV_TOSTRCASE("+x+")" for x in insts) out += to_str.format(ty, to_strs) from_strs = "\n".join("\tPYVEX_ENUMCONV_FROMSTR("+x+")" for x in insts) out += from_str.format(ty, from_strs) print out
Add script for generating enums.#!/usr/bin/env python import re valgrind_home = "/usr" input = open(valgrind_home + "/include/valgrind/libvex_ir.h").read() input += open(valgrind_home + "/include/valgrind/libvex.h").read() out = """ #include <libvex.h> #include <stdio.h> #include <string.h> #include "pyvex_macros.h" #include "pyvex_logging.h" """ errors = ["VexArchInfo"] enums = [ ("VexArch", r"VexArch\w+"), ("IRExprTag", r"Iex_\w+"), ("IRStmtTag", r"Ist_\w+"), ("IREndness", r"Iend_\w+"), ("IRMBusEvent", r"Imbe_\w+"), ("IREffect", r"Ifx_\w+"), ("IRJumpKind", r"Ijk_\w+"), ("IRConstTag", r"Ico_\w+"), ("IRType", r"Ity_\w+"), ("IROp", r"Iop_\w+"), ] to_str = """ const char *{0}_to_str({0} e) {{ switch(e) {{ {1} default: error("PyVEX: Unknown {0}"); return NULL; }} }} """ from_str = """ {0} str_to_{0}(const char *s) {{ {1} return -1; }} """ # http://stackoverflow.com/questions/480214 def uniq(seq): seen = set() seen_add = seen.add return [ x for x in seq if x not in seen and not seen_add(x)] for ty,enum in enums: insts = uniq(re.findall(enum, input)) insts = [x for x in insts if x not in errors] to_strs = "\n".join("\t\tPYVEX_ENUMCONV_TOSTRCASE("+x+")" for x in insts) out += to_str.format(ty, to_strs) from_strs = "\n".join("\tPYVEX_ENUMCONV_FROMSTR("+x+")" for x in insts) out += from_str.format(ty, from_strs) print out
<commit_before><commit_msg>Add script for generating enums.<commit_after>#!/usr/bin/env python import re valgrind_home = "/usr" input = open(valgrind_home + "/include/valgrind/libvex_ir.h").read() input += open(valgrind_home + "/include/valgrind/libvex.h").read() out = """ #include <libvex.h> #include <stdio.h> #include <string.h> #include "pyvex_macros.h" #include "pyvex_logging.h" """ errors = ["VexArchInfo"] enums = [ ("VexArch", r"VexArch\w+"), ("IRExprTag", r"Iex_\w+"), ("IRStmtTag", r"Ist_\w+"), ("IREndness", r"Iend_\w+"), ("IRMBusEvent", r"Imbe_\w+"), ("IREffect", r"Ifx_\w+"), ("IRJumpKind", r"Ijk_\w+"), ("IRConstTag", r"Ico_\w+"), ("IRType", r"Ity_\w+"), ("IROp", r"Iop_\w+"), ] to_str = """ const char *{0}_to_str({0} e) {{ switch(e) {{ {1} default: error("PyVEX: Unknown {0}"); return NULL; }} }} """ from_str = """ {0} str_to_{0}(const char *s) {{ {1} return -1; }} """ # http://stackoverflow.com/questions/480214 def uniq(seq): seen = set() seen_add = seen.add return [ x for x in seq if x not in seen and not seen_add(x)] for ty,enum in enums: insts = uniq(re.findall(enum, input)) insts = [x for x in insts if x not in errors] to_strs = "\n".join("\t\tPYVEX_ENUMCONV_TOSTRCASE("+x+")" for x in insts) out += to_str.format(ty, to_strs) from_strs = "\n".join("\tPYVEX_ENUMCONV_FROMSTR("+x+")" for x in insts) out += from_str.format(ty, from_strs) print out
8fe5d28b227ad0e59561e6be210eebfaa0f38ac8
paystackapi/tests/test_subaccount.py
paystackapi/tests/test_subaccount.py
import httpretty from paystackapi.tests.base_test_case import BaseTestCase from paystackapi.subaccount import SubAccount class TestSubAccount(BaseTestCase): @httpretty.activate def test_subaccount_create(self): pass
Add initial test class for subaccount
Add initial test class for subaccount
Python
mit
andela-sjames/paystack-python
Add initial test class for subaccount
import httpretty from paystackapi.tests.base_test_case import BaseTestCase from paystackapi.subaccount import SubAccount class TestSubAccount(BaseTestCase): @httpretty.activate def test_subaccount_create(self): pass
<commit_before><commit_msg>Add initial test class for subaccount<commit_after>
import httpretty from paystackapi.tests.base_test_case import BaseTestCase from paystackapi.subaccount import SubAccount class TestSubAccount(BaseTestCase): @httpretty.activate def test_subaccount_create(self): pass
Add initial test class for subaccountimport httpretty from paystackapi.tests.base_test_case import BaseTestCase from paystackapi.subaccount import SubAccount class TestSubAccount(BaseTestCase): @httpretty.activate def test_subaccount_create(self): pass
<commit_before><commit_msg>Add initial test class for subaccount<commit_after>import httpretty from paystackapi.tests.base_test_case import BaseTestCase from paystackapi.subaccount import SubAccount class TestSubAccount(BaseTestCase): @httpretty.activate def test_subaccount_create(self): pass
503dd762c43f37b22407472271e9c4492827935a
pycoreutils/test/test_pycoreutils.py
pycoreutils/test/test_pycoreutils.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009, 2010, 2011 Hans van Leeuwen. # See LICENSE.txt for details. from __future__ import unicode_literals import unittest import pycoreutils from pycoreutils.test import BaseTestCase class TestCase(BaseTestCase): def test_getcommand(self): for cmd in pycoreutils.command.__all__: pycoreutils.getcommand(cmd[4:]) if __name__ == '__main__': unittest.main()
Test if all commands can import
Test if all commands can import
Python
mit
davidfischer/pycoreutils
Test if all commands can import
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009, 2010, 2011 Hans van Leeuwen. # See LICENSE.txt for details. from __future__ import unicode_literals import unittest import pycoreutils from pycoreutils.test import BaseTestCase class TestCase(BaseTestCase): def test_getcommand(self): for cmd in pycoreutils.command.__all__: pycoreutils.getcommand(cmd[4:]) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Test if all commands can import<commit_after>
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009, 2010, 2011 Hans van Leeuwen. # See LICENSE.txt for details. from __future__ import unicode_literals import unittest import pycoreutils from pycoreutils.test import BaseTestCase class TestCase(BaseTestCase): def test_getcommand(self): for cmd in pycoreutils.command.__all__: pycoreutils.getcommand(cmd[4:]) if __name__ == '__main__': unittest.main()
Test if all commands can import#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009, 2010, 2011 Hans van Leeuwen. # See LICENSE.txt for details. from __future__ import unicode_literals import unittest import pycoreutils from pycoreutils.test import BaseTestCase class TestCase(BaseTestCase): def test_getcommand(self): for cmd in pycoreutils.command.__all__: pycoreutils.getcommand(cmd[4:]) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Test if all commands can import<commit_after>#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009, 2010, 2011 Hans van Leeuwen. # See LICENSE.txt for details. from __future__ import unicode_literals import unittest import pycoreutils from pycoreutils.test import BaseTestCase class TestCase(BaseTestCase): def test_getcommand(self): for cmd in pycoreutils.command.__all__: pycoreutils.getcommand(cmd[4:]) if __name__ == '__main__': unittest.main()
b1149ce5e0bc639a5118432a1fdfd8af546996f7
rasterio/tests/test_blocks.py
rasterio/tests/test_blocks.py
import unittest import rasterio class RasterBlocksTest(unittest.TestCase): def test_blocks(self): with rasterio.open('rasterio/tests/data/RGB.byte.tif') as s: blocks = list(s.blocks) self.assertEqual(len(blocks), 1)
Test of access to a dataset's blocks.
Test of access to a dataset's blocks. See #6.
Python
bsd-3-clause
perrygeo/rasterio,kapadia/rasterio,johanvdw/rasterio,sgillies/rasterio,njwilson23/rasterio,clembou/rasterio,snorfalorpagus/rasterio,youngpm/rasterio,njwilson23/rasterio,johanvdw/rasterio,youngpm/rasterio,brendan-ward/rasterio,clembou/rasterio,brendan-ward/rasterio,johanvdw/rasterio,clembou/rasterio,brendan-ward/rasterio,kapadia/rasterio,kapadia/rasterio,perrygeo/rasterio,youngpm/rasterio,perrygeo/rasterio,njwilson23/rasterio
Test of access to a dataset's blocks. See #6.
import unittest import rasterio class RasterBlocksTest(unittest.TestCase): def test_blocks(self): with rasterio.open('rasterio/tests/data/RGB.byte.tif') as s: blocks = list(s.blocks) self.assertEqual(len(blocks), 1)
<commit_before><commit_msg>Test of access to a dataset's blocks. See #6.<commit_after>
import unittest import rasterio class RasterBlocksTest(unittest.TestCase): def test_blocks(self): with rasterio.open('rasterio/tests/data/RGB.byte.tif') as s: blocks = list(s.blocks) self.assertEqual(len(blocks), 1)
Test of access to a dataset's blocks. See #6.import unittest import rasterio class RasterBlocksTest(unittest.TestCase): def test_blocks(self): with rasterio.open('rasterio/tests/data/RGB.byte.tif') as s: blocks = list(s.blocks) self.assertEqual(len(blocks), 1)
<commit_before><commit_msg>Test of access to a dataset's blocks. See #6.<commit_after>import unittest import rasterio class RasterBlocksTest(unittest.TestCase): def test_blocks(self): with rasterio.open('rasterio/tests/data/RGB.byte.tif') as s: blocks = list(s.blocks) self.assertEqual(len(blocks), 1)
b4a700dca980ef91247a0d0b0601fcb2c5166c07
laalaa/apps/advisers/tests/test_seed.py
laalaa/apps/advisers/tests/test_seed.py
from django.core.management import call_command from django.test import TestCase from advisers import models, tasks class SeedTest(TestCase): def test_seed_loads_models_with_organisation_type(self): tasks.clear_db() call_command("seed") # OrganisationType model loaded from initial_categories.json self.assertGreater(models.OrganisationType.objects.count(), 0) # Location model loaded from initial_advisers.json self.assertGreater(models.Location.objects.count(), 0)
Create test case to ensure data from initial_categories is loaded
Create test case to ensure data from initial_categories is loaded Had to run tasks.clear_db() first as one of the migration files includes importing the initial_categories.json file and saving this to the db. Tests check data from both fixture files, initial_categories.json and initial_advisers.json are both present
Python
mit
ministryofjustice/laa-legal-adviser-api,ministryofjustice/laa-legal-adviser-api,ministryofjustice/laa-legal-adviser-api
Create test case to ensure data from initial_categories is loaded Had to run tasks.clear_db() first as one of the migration files includes importing the initial_categories.json file and saving this to the db. Tests check data from both fixture files, initial_categories.json and initial_advisers.json are both present
from django.core.management import call_command from django.test import TestCase from advisers import models, tasks class SeedTest(TestCase): def test_seed_loads_models_with_organisation_type(self): tasks.clear_db() call_command("seed") # OrganisationType model loaded from initial_categories.json self.assertGreater(models.OrganisationType.objects.count(), 0) # Location model loaded from initial_advisers.json self.assertGreater(models.Location.objects.count(), 0)
<commit_before><commit_msg>Create test case to ensure data from initial_categories is loaded Had to run tasks.clear_db() first as one of the migration files includes importing the initial_categories.json file and saving this to the db. Tests check data from both fixture files, initial_categories.json and initial_advisers.json are both present<commit_after>
from django.core.management import call_command from django.test import TestCase from advisers import models, tasks class SeedTest(TestCase): def test_seed_loads_models_with_organisation_type(self): tasks.clear_db() call_command("seed") # OrganisationType model loaded from initial_categories.json self.assertGreater(models.OrganisationType.objects.count(), 0) # Location model loaded from initial_advisers.json self.assertGreater(models.Location.objects.count(), 0)
Create test case to ensure data from initial_categories is loaded Had to run tasks.clear_db() first as one of the migration files includes importing the initial_categories.json file and saving this to the db. Tests check data from both fixture files, initial_categories.json and initial_advisers.json are both presentfrom django.core.management import call_command from django.test import TestCase from advisers import models, tasks class SeedTest(TestCase): def test_seed_loads_models_with_organisation_type(self): tasks.clear_db() call_command("seed") # OrganisationType model loaded from initial_categories.json self.assertGreater(models.OrganisationType.objects.count(), 0) # Location model loaded from initial_advisers.json self.assertGreater(models.Location.objects.count(), 0)
<commit_before><commit_msg>Create test case to ensure data from initial_categories is loaded Had to run tasks.clear_db() first as one of the migration files includes importing the initial_categories.json file and saving this to the db. Tests check data from both fixture files, initial_categories.json and initial_advisers.json are both present<commit_after>from django.core.management import call_command from django.test import TestCase from advisers import models, tasks class SeedTest(TestCase): def test_seed_loads_models_with_organisation_type(self): tasks.clear_db() call_command("seed") # OrganisationType model loaded from initial_categories.json self.assertGreater(models.OrganisationType.objects.count(), 0) # Location model loaded from initial_advisers.json self.assertGreater(models.Location.objects.count(), 0)
3e339aca69a9db2ff2cc2a05d6c7f7dbc1b2bb83
bin/get_barcode_hathi_record_number.py
bin/get_barcode_hathi_record_number.py
#!/usr/bin/env python3 # Copyright (c) 2017 The Regents of the University of Michigan. # All Rights Reserved. Licensed according to the terms of the Revised # BSD License. See LICENSE.txt for details. from argparse import ArgumentParser from urllib.request import urlopen from falcom.api.uri import URI, APIQuerier from falcom.api.marc import get_marc_data_from_xml AlephURI = URI("http://mirlyn-aleph.lib.umich.edu/cgi-bin/bc2meta") aleph_api = APIQuerier(AlephURI, url_opener=urlopen) parser = ArgumentParser(description="Get info for barcodes") parser.add_argument("barcodes", nargs="+") args = parser.parse_args() for barcode in args.barcodes: marc = get_marc_data_from_xml(aleph_api.get( id=barcode, type="bc", schema="marcxml")) if not marc: marc = get_marc_data_from_xml(aleph_api.get( id="mdp."+barcode, schema="marcxml")) if marc.bib: print("\t".join((barcode, marc.bib)))
Add binary script for getting HT record number
Add binary script for getting HT record number
Python
bsd-3-clause
mlibrary/image-conversion-and-validation,mlibrary/image-conversion-and-validation
Add binary script for getting HT record number
#!/usr/bin/env python3 # Copyright (c) 2017 The Regents of the University of Michigan. # All Rights Reserved. Licensed according to the terms of the Revised # BSD License. See LICENSE.txt for details. from argparse import ArgumentParser from urllib.request import urlopen from falcom.api.uri import URI, APIQuerier from falcom.api.marc import get_marc_data_from_xml AlephURI = URI("http://mirlyn-aleph.lib.umich.edu/cgi-bin/bc2meta") aleph_api = APIQuerier(AlephURI, url_opener=urlopen) parser = ArgumentParser(description="Get info for barcodes") parser.add_argument("barcodes", nargs="+") args = parser.parse_args() for barcode in args.barcodes: marc = get_marc_data_from_xml(aleph_api.get( id=barcode, type="bc", schema="marcxml")) if not marc: marc = get_marc_data_from_xml(aleph_api.get( id="mdp."+barcode, schema="marcxml")) if marc.bib: print("\t".join((barcode, marc.bib)))
<commit_before><commit_msg>Add binary script for getting HT record number<commit_after>
#!/usr/bin/env python3 # Copyright (c) 2017 The Regents of the University of Michigan. # All Rights Reserved. Licensed according to the terms of the Revised # BSD License. See LICENSE.txt for details. from argparse import ArgumentParser from urllib.request import urlopen from falcom.api.uri import URI, APIQuerier from falcom.api.marc import get_marc_data_from_xml AlephURI = URI("http://mirlyn-aleph.lib.umich.edu/cgi-bin/bc2meta") aleph_api = APIQuerier(AlephURI, url_opener=urlopen) parser = ArgumentParser(description="Get info for barcodes") parser.add_argument("barcodes", nargs="+") args = parser.parse_args() for barcode in args.barcodes: marc = get_marc_data_from_xml(aleph_api.get( id=barcode, type="bc", schema="marcxml")) if not marc: marc = get_marc_data_from_xml(aleph_api.get( id="mdp."+barcode, schema="marcxml")) if marc.bib: print("\t".join((barcode, marc.bib)))
Add binary script for getting HT record number#!/usr/bin/env python3 # Copyright (c) 2017 The Regents of the University of Michigan. # All Rights Reserved. Licensed according to the terms of the Revised # BSD License. See LICENSE.txt for details. from argparse import ArgumentParser from urllib.request import urlopen from falcom.api.uri import URI, APIQuerier from falcom.api.marc import get_marc_data_from_xml AlephURI = URI("http://mirlyn-aleph.lib.umich.edu/cgi-bin/bc2meta") aleph_api = APIQuerier(AlephURI, url_opener=urlopen) parser = ArgumentParser(description="Get info for barcodes") parser.add_argument("barcodes", nargs="+") args = parser.parse_args() for barcode in args.barcodes: marc = get_marc_data_from_xml(aleph_api.get( id=barcode, type="bc", schema="marcxml")) if not marc: marc = get_marc_data_from_xml(aleph_api.get( id="mdp."+barcode, schema="marcxml")) if marc.bib: print("\t".join((barcode, marc.bib)))
<commit_before><commit_msg>Add binary script for getting HT record number<commit_after>#!/usr/bin/env python3 # Copyright (c) 2017 The Regents of the University of Michigan. # All Rights Reserved. Licensed according to the terms of the Revised # BSD License. See LICENSE.txt for details. from argparse import ArgumentParser from urllib.request import urlopen from falcom.api.uri import URI, APIQuerier from falcom.api.marc import get_marc_data_from_xml AlephURI = URI("http://mirlyn-aleph.lib.umich.edu/cgi-bin/bc2meta") aleph_api = APIQuerier(AlephURI, url_opener=urlopen) parser = ArgumentParser(description="Get info for barcodes") parser.add_argument("barcodes", nargs="+") args = parser.parse_args() for barcode in args.barcodes: marc = get_marc_data_from_xml(aleph_api.get( id=barcode, type="bc", schema="marcxml")) if not marc: marc = get_marc_data_from_xml(aleph_api.get( id="mdp."+barcode, schema="marcxml")) if marc.bib: print("\t".join((barcode, marc.bib)))
7a04457e8003a638f0356e18d3c424f688e40a8f
tests/cli/test_rasa_show.py
tests/cli/test_rasa_show.py
def test_show_stories_help(run): help = run("show", "stories", "--help") help_text = """usage: rasa show stories [-h] [-v] [-vv] [--quiet] [-d DOMAIN] [-s STORIES] [-c CONFIG] [--output OUTPUT] [--max-history MAX_HISTORY] [-nlu NLU_DATA]""" lines = help_text.split("\n") for i, line in enumerate(lines): assert help.outlines[i] == line
Test rasa show stories help
Test rasa show stories help
Python
apache-2.0
RasaHQ/rasa_nlu,RasaHQ/rasa_nlu,RasaHQ/rasa_nlu
Test rasa show stories help
def test_show_stories_help(run): help = run("show", "stories", "--help") help_text = """usage: rasa show stories [-h] [-v] [-vv] [--quiet] [-d DOMAIN] [-s STORIES] [-c CONFIG] [--output OUTPUT] [--max-history MAX_HISTORY] [-nlu NLU_DATA]""" lines = help_text.split("\n") for i, line in enumerate(lines): assert help.outlines[i] == line
<commit_before><commit_msg>Test rasa show stories help<commit_after>
def test_show_stories_help(run): help = run("show", "stories", "--help") help_text = """usage: rasa show stories [-h] [-v] [-vv] [--quiet] [-d DOMAIN] [-s STORIES] [-c CONFIG] [--output OUTPUT] [--max-history MAX_HISTORY] [-nlu NLU_DATA]""" lines = help_text.split("\n") for i, line in enumerate(lines): assert help.outlines[i] == line
Test rasa show stories helpdef test_show_stories_help(run): help = run("show", "stories", "--help") help_text = """usage: rasa show stories [-h] [-v] [-vv] [--quiet] [-d DOMAIN] [-s STORIES] [-c CONFIG] [--output OUTPUT] [--max-history MAX_HISTORY] [-nlu NLU_DATA]""" lines = help_text.split("\n") for i, line in enumerate(lines): assert help.outlines[i] == line
<commit_before><commit_msg>Test rasa show stories help<commit_after>def test_show_stories_help(run): help = run("show", "stories", "--help") help_text = """usage: rasa show stories [-h] [-v] [-vv] [--quiet] [-d DOMAIN] [-s STORIES] [-c CONFIG] [--output OUTPUT] [--max-history MAX_HISTORY] [-nlu NLU_DATA]""" lines = help_text.split("\n") for i, line in enumerate(lines): assert help.outlines[i] == line
9d29b4c569362dcd0af9519587aab576b895e159
txt2ml.py
txt2ml.py
"""Script to convert text file to input for embem classifier. The script tokenizes the text and writes it to a new file containing: <sentence id>\t<sentence (tokens separated by space)>\tNone\n Usage: python txt2ml.py <dir in> <dir out> """ import argparse import nltk.data from nltk.tokenize import word_tokenize import codecs import os if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('dir_in', help='the name of the list containing the ' 'selected ceneton titles (xlsx)') parser.add_argument('dir_out', help='the name of the output directory') args = parser.parse_args() tokenizer = nltk.data.load('tokenizers/punkt/dutch.pickle') text_files = [t for t in os.listdir(args.dir_in) if t.endswith('.txt')] for text_file in text_files: print text_file text = '' fname = os.path.join(args.dir_in, text_file) with codecs.open(fname, 'rb', 'utf8') as f: text = f.read() text = text.replace('\n', ' ') sentences = tokenizer.tokenize(text) fname = os.path.join(args.dir_out, text_file) with codecs.open(fname, 'wb', 'utf8') as f: for i, s in enumerate(sentences): words = word_tokenize(s) words_str = unicode(' '.join(words)) s_id = '{}_s_{}'.format(text_file.replace('.txt', ''), i) f.write(u'{}\t{}\tNone\n'.format(s_id, words_str))
Add script that converts text files to input for the classifier
Add script that converts text files to input for the classifier The script tokenizes the text and writes sentences in the correct format to file. Used for the Ceneton texts.
Python
apache-2.0
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
Add script that converts text files to input for the classifier The script tokenizes the text and writes sentences in the correct format to file. Used for the Ceneton texts.
"""Script to convert text file to input for embem classifier. The script tokenizes the text and writes it to a new file containing: <sentence id>\t<sentence (tokens separated by space)>\tNone\n Usage: python txt2ml.py <dir in> <dir out> """ import argparse import nltk.data from nltk.tokenize import word_tokenize import codecs import os if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('dir_in', help='the name of the list containing the ' 'selected ceneton titles (xlsx)') parser.add_argument('dir_out', help='the name of the output directory') args = parser.parse_args() tokenizer = nltk.data.load('tokenizers/punkt/dutch.pickle') text_files = [t for t in os.listdir(args.dir_in) if t.endswith('.txt')] for text_file in text_files: print text_file text = '' fname = os.path.join(args.dir_in, text_file) with codecs.open(fname, 'rb', 'utf8') as f: text = f.read() text = text.replace('\n', ' ') sentences = tokenizer.tokenize(text) fname = os.path.join(args.dir_out, text_file) with codecs.open(fname, 'wb', 'utf8') as f: for i, s in enumerate(sentences): words = word_tokenize(s) words_str = unicode(' '.join(words)) s_id = '{}_s_{}'.format(text_file.replace('.txt', ''), i) f.write(u'{}\t{}\tNone\n'.format(s_id, words_str))
<commit_before><commit_msg>Add script that converts text files to input for the classifier The script tokenizes the text and writes sentences in the correct format to file. Used for the Ceneton texts.<commit_after>
"""Script to convert text file to input for embem classifier. The script tokenizes the text and writes it to a new file containing: <sentence id>\t<sentence (tokens separated by space)>\tNone\n Usage: python txt2ml.py <dir in> <dir out> """ import argparse import nltk.data from nltk.tokenize import word_tokenize import codecs import os if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('dir_in', help='the name of the list containing the ' 'selected ceneton titles (xlsx)') parser.add_argument('dir_out', help='the name of the output directory') args = parser.parse_args() tokenizer = nltk.data.load('tokenizers/punkt/dutch.pickle') text_files = [t for t in os.listdir(args.dir_in) if t.endswith('.txt')] for text_file in text_files: print text_file text = '' fname = os.path.join(args.dir_in, text_file) with codecs.open(fname, 'rb', 'utf8') as f: text = f.read() text = text.replace('\n', ' ') sentences = tokenizer.tokenize(text) fname = os.path.join(args.dir_out, text_file) with codecs.open(fname, 'wb', 'utf8') as f: for i, s in enumerate(sentences): words = word_tokenize(s) words_str = unicode(' '.join(words)) s_id = '{}_s_{}'.format(text_file.replace('.txt', ''), i) f.write(u'{}\t{}\tNone\n'.format(s_id, words_str))
Add script that converts text files to input for the classifier The script tokenizes the text and writes sentences in the correct format to file. Used for the Ceneton texts."""Script to convert text file to input for embem classifier. The script tokenizes the text and writes it to a new file containing: <sentence id>\t<sentence (tokens separated by space)>\tNone\n Usage: python txt2ml.py <dir in> <dir out> """ import argparse import nltk.data from nltk.tokenize import word_tokenize import codecs import os if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('dir_in', help='the name of the list containing the ' 'selected ceneton titles (xlsx)') parser.add_argument('dir_out', help='the name of the output directory') args = parser.parse_args() tokenizer = nltk.data.load('tokenizers/punkt/dutch.pickle') text_files = [t for t in os.listdir(args.dir_in) if t.endswith('.txt')] for text_file in text_files: print text_file text = '' fname = os.path.join(args.dir_in, text_file) with codecs.open(fname, 'rb', 'utf8') as f: text = f.read() text = text.replace('\n', ' ') sentences = tokenizer.tokenize(text) fname = os.path.join(args.dir_out, text_file) with codecs.open(fname, 'wb', 'utf8') as f: for i, s in enumerate(sentences): words = word_tokenize(s) words_str = unicode(' '.join(words)) s_id = '{}_s_{}'.format(text_file.replace('.txt', ''), i) f.write(u'{}\t{}\tNone\n'.format(s_id, words_str))
<commit_before><commit_msg>Add script that converts text files to input for the classifier The script tokenizes the text and writes sentences in the correct format to file. Used for the Ceneton texts.<commit_after>"""Script to convert text file to input for embem classifier. The script tokenizes the text and writes it to a new file containing: <sentence id>\t<sentence (tokens separated by space)>\tNone\n Usage: python txt2ml.py <dir in> <dir out> """ import argparse import nltk.data from nltk.tokenize import word_tokenize import codecs import os if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('dir_in', help='the name of the list containing the ' 'selected ceneton titles (xlsx)') parser.add_argument('dir_out', help='the name of the output directory') args = parser.parse_args() tokenizer = nltk.data.load('tokenizers/punkt/dutch.pickle') text_files = [t for t in os.listdir(args.dir_in) if t.endswith('.txt')] for text_file in text_files: print text_file text = '' fname = os.path.join(args.dir_in, text_file) with codecs.open(fname, 'rb', 'utf8') as f: text = f.read() text = text.replace('\n', ' ') sentences = tokenizer.tokenize(text) fname = os.path.join(args.dir_out, text_file) with codecs.open(fname, 'wb', 'utf8') as f: for i, s in enumerate(sentences): words = word_tokenize(s) words_str = unicode(' '.join(words)) s_id = '{}_s_{}'.format(text_file.replace('.txt', ''), i) f.write(u'{}\t{}\tNone\n'.format(s_id, words_str))
e9207faa45368eeb341555e94bd4fb41e39067df
tests/acceptance/test_running.py
tests/acceptance/test_running.py
import pytest from tests.acceptance import ( run_gitlabform, ) class TestRunning: # noinspection PyPep8Naming def test__ALL(self, gitlab, group, project, other_project): config = f""" projects_and_groups: '*': project_settings: request_access_enabled: true """ run_gitlabform(config, "ALL") project = gitlab.get_project(f"{group}/{project}") assert project["request_access_enabled"] is True other_project = gitlab.get_project(f"{group}/{other_project}") assert other_project["request_access_enabled"] is True # noinspection PyPep8Naming def test__ALL_DEFINED(self, gitlab, group, project, other_project): group_and_project = f"{group}/{project}" config = f""" projects_and_groups: {group_and_project}: project_settings: suggestion_commit_message: 'foobar' """ run_gitlabform(config, "ALL_DEFINED") project = gitlab.get_project(group_and_project) assert project["suggestion_commit_message"] == "foobar" group_and_other_project = f"{group}/{other_project}" project = gitlab.get_project(group_and_other_project) assert project["suggestion_commit_message"] != "foobar" config = f""" projects_and_groups: non/existent_project: project_settings: suggestion_commit_message: 'foobar' """ with pytest.raises(SystemExit): run_gitlabform(config, "ALL_DEFINED") config = f""" projects_and_groups: non_existent_group/*: project_settings: suggestion_commit_message: 'foobar' """ with pytest.raises(SystemExit): run_gitlabform(config, "ALL_DEFINED")
Test running with ALL and ALL_DEFINED
Test running with ALL and ALL_DEFINED
Python
mit
egnyte/gitlabform,egnyte/gitlabform
Test running with ALL and ALL_DEFINED
import pytest from tests.acceptance import ( run_gitlabform, ) class TestRunning: # noinspection PyPep8Naming def test__ALL(self, gitlab, group, project, other_project): config = f""" projects_and_groups: '*': project_settings: request_access_enabled: true """ run_gitlabform(config, "ALL") project = gitlab.get_project(f"{group}/{project}") assert project["request_access_enabled"] is True other_project = gitlab.get_project(f"{group}/{other_project}") assert other_project["request_access_enabled"] is True # noinspection PyPep8Naming def test__ALL_DEFINED(self, gitlab, group, project, other_project): group_and_project = f"{group}/{project}" config = f""" projects_and_groups: {group_and_project}: project_settings: suggestion_commit_message: 'foobar' """ run_gitlabform(config, "ALL_DEFINED") project = gitlab.get_project(group_and_project) assert project["suggestion_commit_message"] == "foobar" group_and_other_project = f"{group}/{other_project}" project = gitlab.get_project(group_and_other_project) assert project["suggestion_commit_message"] != "foobar" config = f""" projects_and_groups: non/existent_project: project_settings: suggestion_commit_message: 'foobar' """ with pytest.raises(SystemExit): run_gitlabform(config, "ALL_DEFINED") config = f""" projects_and_groups: non_existent_group/*: project_settings: suggestion_commit_message: 'foobar' """ with pytest.raises(SystemExit): run_gitlabform(config, "ALL_DEFINED")
<commit_before><commit_msg>Test running with ALL and ALL_DEFINED<commit_after>
import pytest from tests.acceptance import ( run_gitlabform, ) class TestRunning: # noinspection PyPep8Naming def test__ALL(self, gitlab, group, project, other_project): config = f""" projects_and_groups: '*': project_settings: request_access_enabled: true """ run_gitlabform(config, "ALL") project = gitlab.get_project(f"{group}/{project}") assert project["request_access_enabled"] is True other_project = gitlab.get_project(f"{group}/{other_project}") assert other_project["request_access_enabled"] is True # noinspection PyPep8Naming def test__ALL_DEFINED(self, gitlab, group, project, other_project): group_and_project = f"{group}/{project}" config = f""" projects_and_groups: {group_and_project}: project_settings: suggestion_commit_message: 'foobar' """ run_gitlabform(config, "ALL_DEFINED") project = gitlab.get_project(group_and_project) assert project["suggestion_commit_message"] == "foobar" group_and_other_project = f"{group}/{other_project}" project = gitlab.get_project(group_and_other_project) assert project["suggestion_commit_message"] != "foobar" config = f""" projects_and_groups: non/existent_project: project_settings: suggestion_commit_message: 'foobar' """ with pytest.raises(SystemExit): run_gitlabform(config, "ALL_DEFINED") config = f""" projects_and_groups: non_existent_group/*: project_settings: suggestion_commit_message: 'foobar' """ with pytest.raises(SystemExit): run_gitlabform(config, "ALL_DEFINED")
Test running with ALL and ALL_DEFINEDimport pytest from tests.acceptance import ( run_gitlabform, ) class TestRunning: # noinspection PyPep8Naming def test__ALL(self, gitlab, group, project, other_project): config = f""" projects_and_groups: '*': project_settings: request_access_enabled: true """ run_gitlabform(config, "ALL") project = gitlab.get_project(f"{group}/{project}") assert project["request_access_enabled"] is True other_project = gitlab.get_project(f"{group}/{other_project}") assert other_project["request_access_enabled"] is True # noinspection PyPep8Naming def test__ALL_DEFINED(self, gitlab, group, project, other_project): group_and_project = f"{group}/{project}" config = f""" projects_and_groups: {group_and_project}: project_settings: suggestion_commit_message: 'foobar' """ run_gitlabform(config, "ALL_DEFINED") project = gitlab.get_project(group_and_project) assert project["suggestion_commit_message"] == "foobar" group_and_other_project = f"{group}/{other_project}" project = gitlab.get_project(group_and_other_project) assert project["suggestion_commit_message"] != "foobar" config = f""" projects_and_groups: non/existent_project: project_settings: suggestion_commit_message: 'foobar' """ with pytest.raises(SystemExit): run_gitlabform(config, "ALL_DEFINED") config = f""" projects_and_groups: non_existent_group/*: project_settings: suggestion_commit_message: 'foobar' """ with pytest.raises(SystemExit): run_gitlabform(config, "ALL_DEFINED")
<commit_before><commit_msg>Test running with ALL and ALL_DEFINED<commit_after>import pytest from tests.acceptance import ( run_gitlabform, ) class TestRunning: # noinspection PyPep8Naming def test__ALL(self, gitlab, group, project, other_project): config = f""" projects_and_groups: '*': project_settings: request_access_enabled: true """ run_gitlabform(config, "ALL") project = gitlab.get_project(f"{group}/{project}") assert project["request_access_enabled"] is True other_project = gitlab.get_project(f"{group}/{other_project}") assert other_project["request_access_enabled"] is True # noinspection PyPep8Naming def test__ALL_DEFINED(self, gitlab, group, project, other_project): group_and_project = f"{group}/{project}" config = f""" projects_and_groups: {group_and_project}: project_settings: suggestion_commit_message: 'foobar' """ run_gitlabform(config, "ALL_DEFINED") project = gitlab.get_project(group_and_project) assert project["suggestion_commit_message"] == "foobar" group_and_other_project = f"{group}/{other_project}" project = gitlab.get_project(group_and_other_project) assert project["suggestion_commit_message"] != "foobar" config = f""" projects_and_groups: non/existent_project: project_settings: suggestion_commit_message: 'foobar' """ with pytest.raises(SystemExit): run_gitlabform(config, "ALL_DEFINED") config = f""" projects_and_groups: non_existent_group/*: project_settings: suggestion_commit_message: 'foobar' """ with pytest.raises(SystemExit): run_gitlabform(config, "ALL_DEFINED")
f86cca3b06c48f7f3270648147878de8a389cebb
thermof/simulation/initialize.py
thermof/simulation/initialize.py
# Date: September 2017 # Author: Kutay B. Sezginel """ Initialize Lammps simulation using lammps_interface """ import os from lammps_interface.lammps_main import LammpsSimulation from lammps_interface.structure_data import from_CIF def write_lammps_files(parameters): """ Write Lammps files using lammps_interface. Args: - parameters (Parameters): Lammps simulation parameters Returns: - None: Writes Lammps simulation files to simulation directory """ sim = LammpsSimulation(parameters) cell, graph = from_CIF(parameters.cif_file) sim.set_cell(cell) sim.set_graph(graph) sim.split_graph() sim.assign_force_fields() sim.compute_simulation_size() sim.merge_graphs() sim.write_lammps_files(parameters.sim_dir)
Add function to write Lammps files using lammps_interface.
Add function to write Lammps files using lammps_interface.
Python
mit
kbsezginel/tee_mof,kbsezginel/tee_mof
Add function to write Lammps files using lammps_interface.
# Date: September 2017 # Author: Kutay B. Sezginel """ Initialize Lammps simulation using lammps_interface """ import os from lammps_interface.lammps_main import LammpsSimulation from lammps_interface.structure_data import from_CIF def write_lammps_files(parameters): """ Write Lammps files using lammps_interface. Args: - parameters (Parameters): Lammps simulation parameters Returns: - None: Writes Lammps simulation files to simulation directory """ sim = LammpsSimulation(parameters) cell, graph = from_CIF(parameters.cif_file) sim.set_cell(cell) sim.set_graph(graph) sim.split_graph() sim.assign_force_fields() sim.compute_simulation_size() sim.merge_graphs() sim.write_lammps_files(parameters.sim_dir)
<commit_before><commit_msg>Add function to write Lammps files using lammps_interface.<commit_after>
# Date: September 2017 # Author: Kutay B. Sezginel """ Initialize Lammps simulation using lammps_interface """ import os from lammps_interface.lammps_main import LammpsSimulation from lammps_interface.structure_data import from_CIF def write_lammps_files(parameters): """ Write Lammps files using lammps_interface. Args: - parameters (Parameters): Lammps simulation parameters Returns: - None: Writes Lammps simulation files to simulation directory """ sim = LammpsSimulation(parameters) cell, graph = from_CIF(parameters.cif_file) sim.set_cell(cell) sim.set_graph(graph) sim.split_graph() sim.assign_force_fields() sim.compute_simulation_size() sim.merge_graphs() sim.write_lammps_files(parameters.sim_dir)
Add function to write Lammps files using lammps_interface.# Date: September 2017 # Author: Kutay B. Sezginel """ Initialize Lammps simulation using lammps_interface """ import os from lammps_interface.lammps_main import LammpsSimulation from lammps_interface.structure_data import from_CIF def write_lammps_files(parameters): """ Write Lammps files using lammps_interface. Args: - parameters (Parameters): Lammps simulation parameters Returns: - None: Writes Lammps simulation files to simulation directory """ sim = LammpsSimulation(parameters) cell, graph = from_CIF(parameters.cif_file) sim.set_cell(cell) sim.set_graph(graph) sim.split_graph() sim.assign_force_fields() sim.compute_simulation_size() sim.merge_graphs() sim.write_lammps_files(parameters.sim_dir)
<commit_before><commit_msg>Add function to write Lammps files using lammps_interface.<commit_after># Date: September 2017 # Author: Kutay B. Sezginel """ Initialize Lammps simulation using lammps_interface """ import os from lammps_interface.lammps_main import LammpsSimulation from lammps_interface.structure_data import from_CIF def write_lammps_files(parameters): """ Write Lammps files using lammps_interface. Args: - parameters (Parameters): Lammps simulation parameters Returns: - None: Writes Lammps simulation files to simulation directory """ sim = LammpsSimulation(parameters) cell, graph = from_CIF(parameters.cif_file) sim.set_cell(cell) sim.set_graph(graph) sim.split_graph() sim.assign_force_fields() sim.compute_simulation_size() sim.merge_graphs() sim.write_lammps_files(parameters.sim_dir)
ab094c32b1bcea2978796d6118bf43365571f2b7
stacker_blueprints/security_rules.py
stacker_blueprints/security_rules.py
from troposphere.ec2 import SecurityGroupIngress, SecurityGroupEgress from stacker.blueprints.base import Blueprint CLASS_MAP = { "IngressRules": SecurityGroupIngress, "EgressRules": SecurityGroupEgress, } class SecurityGroupRules(Blueprint): """Used to add Ingress/Egress rules to existing security groups. This blueprint uses two variables: IngressRules: A dict with keys of the virtual titles for each rule, and with the value being a dict of the parameters taken directly from: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group-ingress.html EgressRules: A dict with keys of the virtual titles for each rule, and with the value being a dict of the parameters taken directly from: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-security-group-egress.html An example: name: mySecurityRules class_path: stacker_blueprints.security_rules.SecurityGroupRules variables: IngressRules: All80ToWebserverGroup: CidrIp: 0.0.0.0/0 FromPort: 80 ToPort: 80 GroupId: ${output WebserverStack::SecurityGroup} IpProtocol: tcp """ VARIABLES = { "IngressRules": { "type": dict, "description": "A dict of ingress rules where the key is the " "name of the rule to create, and the value is " "a dictionary of keys/values based on the " "attributes of the " "troposphere.ec2.SecurityGroupIngress class.", }, "EgressRules": { "type": dict, "description": "A dict of ingress rules where the key is the " "name of the rule to create, and the value is " "a dictionary of keys/values based on the " "attributes of the " "troposphere.ec2.SecurityGroupEgress class.", } } def create_template(self): t = self.template variables = self.get_variables() for rule_type, rule_class in CLASS_MAP.items(): for rule_title, rule_attrs in variables[rule_type]: t.add_resource(rule_class.from_dict(rule_title, rule_attrs))
Add new, low-level security group rule blueprint
Add new, low-level security group rule blueprint This should make it super simple to have a stack that maintains security group rules on security groups built in other stacks. Mostly useful for rules that glue two stacks together (like adding a rule to allow instances in your webserver stack to talk to your RDS database on the right port)
Python
bsd-2-clause
remind101/stacker_blueprints,remind101/stacker_blueprints
Add new, low-level security group rule blueprint This should make it super simple to have a stack that maintains security group rules on security groups built in other stacks. Mostly useful for rules that glue two stacks together (like adding a rule to allow instances in your webserver stack to talk to your RDS database on the right port)
from troposphere.ec2 import SecurityGroupIngress, SecurityGroupEgress from stacker.blueprints.base import Blueprint CLASS_MAP = { "IngressRules": SecurityGroupIngress, "EgressRules": SecurityGroupEgress, } class SecurityGroupRules(Blueprint): """Used to add Ingress/Egress rules to existing security groups. This blueprint uses two variables: IngressRules: A dict with keys of the virtual titles for each rule, and with the value being a dict of the parameters taken directly from: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group-ingress.html EgressRules: A dict with keys of the virtual titles for each rule, and with the value being a dict of the parameters taken directly from: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-security-group-egress.html An example: name: mySecurityRules class_path: stacker_blueprints.security_rules.SecurityGroupRules variables: IngressRules: All80ToWebserverGroup: CidrIp: 0.0.0.0/0 FromPort: 80 ToPort: 80 GroupId: ${output WebserverStack::SecurityGroup} IpProtocol: tcp """ VARIABLES = { "IngressRules": { "type": dict, "description": "A dict of ingress rules where the key is the " "name of the rule to create, and the value is " "a dictionary of keys/values based on the " "attributes of the " "troposphere.ec2.SecurityGroupIngress class.", }, "EgressRules": { "type": dict, "description": "A dict of ingress rules where the key is the " "name of the rule to create, and the value is " "a dictionary of keys/values based on the " "attributes of the " "troposphere.ec2.SecurityGroupEgress class.", } } def create_template(self): t = self.template variables = self.get_variables() for rule_type, rule_class in CLASS_MAP.items(): for rule_title, rule_attrs in variables[rule_type]: t.add_resource(rule_class.from_dict(rule_title, rule_attrs))
<commit_before><commit_msg>Add new, low-level security group rule blueprint This should make it super simple to have a stack that maintains security group rules on security groups built in other stacks. Mostly useful for rules that glue two stacks together (like adding a rule to allow instances in your webserver stack to talk to your RDS database on the right port)<commit_after>
from troposphere.ec2 import SecurityGroupIngress, SecurityGroupEgress from stacker.blueprints.base import Blueprint CLASS_MAP = { "IngressRules": SecurityGroupIngress, "EgressRules": SecurityGroupEgress, } class SecurityGroupRules(Blueprint): """Used to add Ingress/Egress rules to existing security groups. This blueprint uses two variables: IngressRules: A dict with keys of the virtual titles for each rule, and with the value being a dict of the parameters taken directly from: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group-ingress.html EgressRules: A dict with keys of the virtual titles for each rule, and with the value being a dict of the parameters taken directly from: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-security-group-egress.html An example: name: mySecurityRules class_path: stacker_blueprints.security_rules.SecurityGroupRules variables: IngressRules: All80ToWebserverGroup: CidrIp: 0.0.0.0/0 FromPort: 80 ToPort: 80 GroupId: ${output WebserverStack::SecurityGroup} IpProtocol: tcp """ VARIABLES = { "IngressRules": { "type": dict, "description": "A dict of ingress rules where the key is the " "name of the rule to create, and the value is " "a dictionary of keys/values based on the " "attributes of the " "troposphere.ec2.SecurityGroupIngress class.", }, "EgressRules": { "type": dict, "description": "A dict of ingress rules where the key is the " "name of the rule to create, and the value is " "a dictionary of keys/values based on the " "attributes of the " "troposphere.ec2.SecurityGroupEgress class.", } } def create_template(self): t = self.template variables = self.get_variables() for rule_type, rule_class in CLASS_MAP.items(): for rule_title, rule_attrs in variables[rule_type]: t.add_resource(rule_class.from_dict(rule_title, rule_attrs))
Add new, low-level security group rule blueprint This should make it super simple to have a stack that maintains security group rules on security groups built in other stacks. Mostly useful for rules that glue two stacks together (like adding a rule to allow instances in your webserver stack to talk to your RDS database on the right port)from troposphere.ec2 import SecurityGroupIngress, SecurityGroupEgress from stacker.blueprints.base import Blueprint CLASS_MAP = { "IngressRules": SecurityGroupIngress, "EgressRules": SecurityGroupEgress, } class SecurityGroupRules(Blueprint): """Used to add Ingress/Egress rules to existing security groups. This blueprint uses two variables: IngressRules: A dict with keys of the virtual titles for each rule, and with the value being a dict of the parameters taken directly from: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group-ingress.html EgressRules: A dict with keys of the virtual titles for each rule, and with the value being a dict of the parameters taken directly from: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-security-group-egress.html An example: name: mySecurityRules class_path: stacker_blueprints.security_rules.SecurityGroupRules variables: IngressRules: All80ToWebserverGroup: CidrIp: 0.0.0.0/0 FromPort: 80 ToPort: 80 GroupId: ${output WebserverStack::SecurityGroup} IpProtocol: tcp """ VARIABLES = { "IngressRules": { "type": dict, "description": "A dict of ingress rules where the key is the " "name of the rule to create, and the value is " "a dictionary of keys/values based on the " "attributes of the " "troposphere.ec2.SecurityGroupIngress class.", }, "EgressRules": { "type": dict, "description": "A dict of ingress rules where the key is the " "name of the rule to create, and the value is " "a dictionary of keys/values based on the " "attributes of the " "troposphere.ec2.SecurityGroupEgress class.", } } def create_template(self): t = self.template variables = self.get_variables() for rule_type, rule_class in CLASS_MAP.items(): for rule_title, rule_attrs in variables[rule_type]: t.add_resource(rule_class.from_dict(rule_title, rule_attrs))
<commit_before><commit_msg>Add new, low-level security group rule blueprint This should make it super simple to have a stack that maintains security group rules on security groups built in other stacks. Mostly useful for rules that glue two stacks together (like adding a rule to allow instances in your webserver stack to talk to your RDS database on the right port)<commit_after>from troposphere.ec2 import SecurityGroupIngress, SecurityGroupEgress from stacker.blueprints.base import Blueprint CLASS_MAP = { "IngressRules": SecurityGroupIngress, "EgressRules": SecurityGroupEgress, } class SecurityGroupRules(Blueprint): """Used to add Ingress/Egress rules to existing security groups. This blueprint uses two variables: IngressRules: A dict with keys of the virtual titles for each rule, and with the value being a dict of the parameters taken directly from: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group-ingress.html EgressRules: A dict with keys of the virtual titles for each rule, and with the value being a dict of the parameters taken directly from: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-security-group-egress.html An example: name: mySecurityRules class_path: stacker_blueprints.security_rules.SecurityGroupRules variables: IngressRules: All80ToWebserverGroup: CidrIp: 0.0.0.0/0 FromPort: 80 ToPort: 80 GroupId: ${output WebserverStack::SecurityGroup} IpProtocol: tcp """ VARIABLES = { "IngressRules": { "type": dict, "description": "A dict of ingress rules where the key is the " "name of the rule to create, and the value is " "a dictionary of keys/values based on the " "attributes of the " "troposphere.ec2.SecurityGroupIngress class.", }, "EgressRules": { "type": dict, "description": "A dict of ingress rules where the key is the " "name of the rule to create, and the value is " "a dictionary of keys/values based on the " "attributes of the " "troposphere.ec2.SecurityGroupEgress class.", } } def create_template(self): t = self.template variables = self.get_variables() for rule_type, rule_class in CLASS_MAP.items(): for rule_title, rule_attrs in variables[rule_type]: t.add_resource(rule_class.from_dict(rule_title, rule_attrs))
dbe5a653785042c8a6051b331675235d1c7c60a2
contrib_bots/bots/help/test_help.py
contrib_bots/bots/help/test_help.py
#!/usr/bin/env python from __future__ import absolute_import from __future__ import print_function import os import sys our_dir = os.path.dirname(os.path.abspath(__file__)) # For dev setups, we can find the API in the repo itself. if os.path.exists(os.path.join(our_dir, '..')): sys.path.insert(0, '..') from bots_test_lib import BotTestCase class TestHelpBot(BotTestCase): bot_name = "help" def test_bot(self): self.assert_bot_output( {'content': "help", 'type': "private", 'sender_email': "foo"}, "Info on Zulip can be found here:\nhttps://github.com/zulip/zulip" ) self.assert_bot_output( {'content': "Hi, my name is abc", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"}, "Info on Zulip can be found here:\nhttps://github.com/zulip/zulip" ) self.assert_bot_output( {'content': "", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"}, "Info on Zulip can be found here:\nhttps://github.com/zulip/zulip" )
Add tests for help bot in contrib_bots.
testsuite: Add tests for help bot in contrib_bots. Add test file 'test_help.py'.
Python
apache-2.0
dhcrzf/zulip,Galexrt/zulip,punchagan/zulip,Galexrt/zulip,andersk/zulip,rht/zulip,andersk/zulip,brockwhittaker/zulip,mahim97/zulip,shubhamdhama/zulip,mahim97/zulip,shubhamdhama/zulip,amanharitsh123/zulip,hackerkid/zulip,hackerkid/zulip,verma-varsha/zulip,timabbott/zulip,zulip/zulip,vabs22/zulip,kou/zulip,jrowan/zulip,kou/zulip,hackerkid/zulip,dhcrzf/zulip,verma-varsha/zulip,timabbott/zulip,dhcrzf/zulip,kou/zulip,jackrzhang/zulip,timabbott/zulip,showell/zulip,rishig/zulip,brainwane/zulip,Galexrt/zulip,vaidap/zulip,punchagan/zulip,dhcrzf/zulip,eeshangarg/zulip,brockwhittaker/zulip,jackrzhang/zulip,shubhamdhama/zulip,jrowan/zulip,hackerkid/zulip,brainwane/zulip,jrowan/zulip,andersk/zulip,tommyip/zulip,rishig/zulip,andersk/zulip,hackerkid/zulip,Galexrt/zulip,jrowan/zulip,amanharitsh123/zulip,vabs22/zulip,showell/zulip,eeshangarg/zulip,tommyip/zulip,brainwane/zulip,mahim97/zulip,amanharitsh123/zulip,dhcrzf/zulip,verma-varsha/zulip,synicalsyntax/zulip,synicalsyntax/zulip,rht/zulip,vabs22/zulip,zulip/zulip,eeshangarg/zulip,showell/zulip,punchagan/zulip,kou/zulip,rishig/zulip,mahim97/zulip,jackrzhang/zulip,rht/zulip,hackerkid/zulip,synicalsyntax/zulip,eeshangarg/zulip,brockwhittaker/zulip,tommyip/zulip,andersk/zulip,amanharitsh123/zulip,kou/zulip,vaidap/zulip,andersk/zulip,brockwhittaker/zulip,jackrzhang/zulip,shubhamdhama/zulip,brainwane/zulip,zulip/zulip,rishig/zulip,dhcrzf/zulip,zulip/zulip,vaidap/zulip,showell/zulip,rht/zulip,andersk/zulip,rishig/zulip,brainwane/zulip,rht/zulip,rishig/zulip,jackrzhang/zulip,rht/zulip,punchagan/zulip,timabbott/zulip,timabbott/zulip,eeshangarg/zulip,brockwhittaker/zulip,brockwhittaker/zulip,zulip/zulip,vaidap/zulip,showell/zulip,showell/zulip,kou/zulip,verma-varsha/zulip,vaidap/zulip,tommyip/zulip,synicalsyntax/zulip,Galexrt/zulip,zulip/zulip,tommyip/zulip,kou/zulip,mahim97/zulip,Galexrt/zulip,punchagan/zulip,tommyip/zulip,eeshangarg/zulip,rishig/zulip,tommyip/zulip,hackerkid/zulip,punchagan/zulip,jrowan/zulip,amanharitsh123/zulip,synicalsyntax/zulip,verma-varsha/zulip,punchagan/zulip,jackrzhang/zulip,vabs22/zulip,jackrzhang/zulip,synicalsyntax/zulip,Galexrt/zulip,synicalsyntax/zulip,vabs22/zulip,dhcrzf/zulip,brainwane/zulip,zulip/zulip,shubhamdhama/zulip,timabbott/zulip,showell/zulip,shubhamdhama/zulip,mahim97/zulip,timabbott/zulip,vabs22/zulip,vaidap/zulip,shubhamdhama/zulip,amanharitsh123/zulip,eeshangarg/zulip,jrowan/zulip,brainwane/zulip,rht/zulip,verma-varsha/zulip
testsuite: Add tests for help bot in contrib_bots. Add test file 'test_help.py'.
#!/usr/bin/env python from __future__ import absolute_import from __future__ import print_function import os import sys our_dir = os.path.dirname(os.path.abspath(__file__)) # For dev setups, we can find the API in the repo itself. if os.path.exists(os.path.join(our_dir, '..')): sys.path.insert(0, '..') from bots_test_lib import BotTestCase class TestHelpBot(BotTestCase): bot_name = "help" def test_bot(self): self.assert_bot_output( {'content': "help", 'type': "private", 'sender_email': "foo"}, "Info on Zulip can be found here:\nhttps://github.com/zulip/zulip" ) self.assert_bot_output( {'content': "Hi, my name is abc", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"}, "Info on Zulip can be found here:\nhttps://github.com/zulip/zulip" ) self.assert_bot_output( {'content': "", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"}, "Info on Zulip can be found here:\nhttps://github.com/zulip/zulip" )
<commit_before><commit_msg>testsuite: Add tests for help bot in contrib_bots. Add test file 'test_help.py'.<commit_after>
#!/usr/bin/env python from __future__ import absolute_import from __future__ import print_function import os import sys our_dir = os.path.dirname(os.path.abspath(__file__)) # For dev setups, we can find the API in the repo itself. if os.path.exists(os.path.join(our_dir, '..')): sys.path.insert(0, '..') from bots_test_lib import BotTestCase class TestHelpBot(BotTestCase): bot_name = "help" def test_bot(self): self.assert_bot_output( {'content': "help", 'type': "private", 'sender_email': "foo"}, "Info on Zulip can be found here:\nhttps://github.com/zulip/zulip" ) self.assert_bot_output( {'content': "Hi, my name is abc", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"}, "Info on Zulip can be found here:\nhttps://github.com/zulip/zulip" ) self.assert_bot_output( {'content': "", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"}, "Info on Zulip can be found here:\nhttps://github.com/zulip/zulip" )
testsuite: Add tests for help bot in contrib_bots. Add test file 'test_help.py'.#!/usr/bin/env python from __future__ import absolute_import from __future__ import print_function import os import sys our_dir = os.path.dirname(os.path.abspath(__file__)) # For dev setups, we can find the API in the repo itself. if os.path.exists(os.path.join(our_dir, '..')): sys.path.insert(0, '..') from bots_test_lib import BotTestCase class TestHelpBot(BotTestCase): bot_name = "help" def test_bot(self): self.assert_bot_output( {'content': "help", 'type': "private", 'sender_email': "foo"}, "Info on Zulip can be found here:\nhttps://github.com/zulip/zulip" ) self.assert_bot_output( {'content': "Hi, my name is abc", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"}, "Info on Zulip can be found here:\nhttps://github.com/zulip/zulip" ) self.assert_bot_output( {'content': "", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"}, "Info on Zulip can be found here:\nhttps://github.com/zulip/zulip" )
<commit_before><commit_msg>testsuite: Add tests for help bot in contrib_bots. Add test file 'test_help.py'.<commit_after>#!/usr/bin/env python from __future__ import absolute_import from __future__ import print_function import os import sys our_dir = os.path.dirname(os.path.abspath(__file__)) # For dev setups, we can find the API in the repo itself. if os.path.exists(os.path.join(our_dir, '..')): sys.path.insert(0, '..') from bots_test_lib import BotTestCase class TestHelpBot(BotTestCase): bot_name = "help" def test_bot(self): self.assert_bot_output( {'content': "help", 'type': "private", 'sender_email': "foo"}, "Info on Zulip can be found here:\nhttps://github.com/zulip/zulip" ) self.assert_bot_output( {'content': "Hi, my name is abc", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"}, "Info on Zulip can be found here:\nhttps://github.com/zulip/zulip" ) self.assert_bot_output( {'content': "", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"}, "Info on Zulip can be found here:\nhttps://github.com/zulip/zulip" )
cd088415d0fd652c3e485fcd3b6e16032f09d707
test/client/local_recognizer_test.py
test/client/local_recognizer_test.py
import unittest import os from speech_recognition import WavFile from mycroft.client.speech.listener import RecognizerLoop __author__ = 'seanfitz' DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data") class LocalRecognizerTest(unittest.TestCase): def setUp(self): self.recognizer = RecognizerLoop.create_mycroft_recognizer(16000, "en-us") def testRecognizerWrapper(self): source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() source = WavFile(os.path.join(DATA_DIR, "mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() def testRecognitionInLongerUtterance(self): source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower()
import unittest import os from speech_recognition import WavFile from mycroft.client.speech.listener import RecognizerLoop __author__ = 'seanfitz' DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data") class LocalRecognizerTest(unittest.TestCase): def setUp(self): rl = RecognizerLoop() self.recognizer = RecognizerLoop.create_mycroft_recognizer(rl, 16000, "en-us") def testRecognizerWrapper(self): source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() source = WavFile(os.path.join(DATA_DIR, "mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() def testRecognitionInLongerUtterance(self): source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower()
Fix init of local recognizer
Fix init of local recognizer
Python
apache-2.0
linuxipho/mycroft-core,Dark5ide/mycroft-core,forslund/mycroft-core,linuxipho/mycroft-core,forslund/mycroft-core,aatchison/mycroft-core,aatchison/mycroft-core,Dark5ide/mycroft-core,MycroftAI/mycroft-core,MycroftAI/mycroft-core
import unittest import os from speech_recognition import WavFile from mycroft.client.speech.listener import RecognizerLoop __author__ = 'seanfitz' DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data") class LocalRecognizerTest(unittest.TestCase): def setUp(self): self.recognizer = RecognizerLoop.create_mycroft_recognizer(16000, "en-us") def testRecognizerWrapper(self): source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() source = WavFile(os.path.join(DATA_DIR, "mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() def testRecognitionInLongerUtterance(self): source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() Fix init of local recognizer
import unittest import os from speech_recognition import WavFile from mycroft.client.speech.listener import RecognizerLoop __author__ = 'seanfitz' DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data") class LocalRecognizerTest(unittest.TestCase): def setUp(self): rl = RecognizerLoop() self.recognizer = RecognizerLoop.create_mycroft_recognizer(rl, 16000, "en-us") def testRecognizerWrapper(self): source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() source = WavFile(os.path.join(DATA_DIR, "mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() def testRecognitionInLongerUtterance(self): source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower()
<commit_before>import unittest import os from speech_recognition import WavFile from mycroft.client.speech.listener import RecognizerLoop __author__ = 'seanfitz' DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data") class LocalRecognizerTest(unittest.TestCase): def setUp(self): self.recognizer = RecognizerLoop.create_mycroft_recognizer(16000, "en-us") def testRecognizerWrapper(self): source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() source = WavFile(os.path.join(DATA_DIR, "mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() def testRecognitionInLongerUtterance(self): source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() <commit_msg>Fix init of local recognizer<commit_after>
import unittest import os from speech_recognition import WavFile from mycroft.client.speech.listener import RecognizerLoop __author__ = 'seanfitz' DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data") class LocalRecognizerTest(unittest.TestCase): def setUp(self): rl = RecognizerLoop() self.recognizer = RecognizerLoop.create_mycroft_recognizer(rl, 16000, "en-us") def testRecognizerWrapper(self): source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() source = WavFile(os.path.join(DATA_DIR, "mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() def testRecognitionInLongerUtterance(self): source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower()
import unittest import os from speech_recognition import WavFile from mycroft.client.speech.listener import RecognizerLoop __author__ = 'seanfitz' DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data") class LocalRecognizerTest(unittest.TestCase): def setUp(self): self.recognizer = RecognizerLoop.create_mycroft_recognizer(16000, "en-us") def testRecognizerWrapper(self): source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() source = WavFile(os.path.join(DATA_DIR, "mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() def testRecognitionInLongerUtterance(self): source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() Fix init of local recognizerimport unittest import os from speech_recognition import WavFile from mycroft.client.speech.listener import RecognizerLoop __author__ = 'seanfitz' DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data") class LocalRecognizerTest(unittest.TestCase): def setUp(self): rl = RecognizerLoop() self.recognizer = RecognizerLoop.create_mycroft_recognizer(rl, 16000, "en-us") def testRecognizerWrapper(self): source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() source = WavFile(os.path.join(DATA_DIR, "mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() def testRecognitionInLongerUtterance(self): source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower()
<commit_before>import unittest import os from speech_recognition import WavFile from mycroft.client.speech.listener import RecognizerLoop __author__ = 'seanfitz' DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data") class LocalRecognizerTest(unittest.TestCase): def setUp(self): self.recognizer = RecognizerLoop.create_mycroft_recognizer(16000, "en-us") def testRecognizerWrapper(self): source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() source = WavFile(os.path.join(DATA_DIR, "mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() def testRecognitionInLongerUtterance(self): source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() <commit_msg>Fix init of local recognizer<commit_after>import unittest import os from speech_recognition import WavFile from mycroft.client.speech.listener import RecognizerLoop __author__ = 'seanfitz' DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data") class LocalRecognizerTest(unittest.TestCase): def setUp(self): rl = RecognizerLoop() self.recognizer = RecognizerLoop.create_mycroft_recognizer(rl, 16000, "en-us") def testRecognizerWrapper(self): source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() source = WavFile(os.path.join(DATA_DIR, "mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower() def testRecognitionInLongerUtterance(self): source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav")) with source as audio: hyp = self.recognizer.transcribe(audio.stream.read()) assert self.recognizer.key_phrase in hyp.hypstr.lower()
90def43806ca31e449ab70b1daa09fa354b7fd74
bin/generate_test_queries.py
bin/generate_test_queries.py
#!/usr/bin/env python import os import sys import time try: import wikipedia except ImportError: print('Queries generation requires wikipedia package. \n' 'You can install it by running "pip install wikipedia"') exit() # If user in bin directory set correct path to queries if os.getcwd().endswith('bin'): save_folder = os.getcwd().rstrip('bin') + 'test/queries' else: save_folder = os.getcwd() + '/test/queries' if len(sys.argv) != 3: print('Please run script by format: python bin/generate_test_queries.py en 100') exit() language = sys.argv[1] queries_num = int(sys.argv[2]) wikipedia.set_lang(language) queries = wikipedia.random(queries_num) with open('{}/{}_{}.txt'.format(save_folder, language, int(time.time())), 'w') as file: for query in queries: if sys.version[0] == '3': file.write('%s\n' % query) else: file.write('%s\n' % query.encode('utf-8')) print('Done.')
Create script for generation test queries
Create script for generation test queries
Python
lgpl-2.1
daminisatya/loklak_server,PiotrKowalski/loklak_server,singhpratyush/loklak_server,smokingwheels/loklak_server_frontend_hdd,arashahmadi/sensemi_ai,singhpratyush/loklak_server,smsunarto/loklak_server,loklak/loklak_server,sudheesh001/loklak_server,singhpratyush/loklak_server,smsunarto/loklak_server,singhpratyush/loklak_server,DravitLochan/susi_server,YagoGG/loklak_server,fazeem84/susi_server,singhpratyush/loklak_server,singhpratyush/loklak_server,daminisatya/loklak_server,fazeem84/susi_server,DravitLochan/susi_server,sudheesh001/loklak_server,daminisatya/loklak_server,loklak/loklak_server,kavithaenair/apps.loklak.org,sudheesh001/loklak_server,PiotrKowalski/loklak_server,smsunarto/loklak_server,sudheesh001/loklak_server,PiotrKowalski/loklak_server,smsunarto/loklak_server,loklak/loklak_server,smokingwheels/loklak_server_frontend_hdd,PiotrKowalski/loklak_server,sudheesh001/loklak_server,shivenmian/loklak_server,smsunarto/loklak_server,djmgit/apps.loklak.org,arashahmadi/sensemi_ai,YagoGG/loklak_server,smokingwheels/loklak_server_frontend_hdd,fazeem84/susi_server,sudheesh001/loklak_server,shivenmian/loklak_server,smokingwheels/loklak_server_frontend_hdd,shivenmian/loklak_server,smokingwheels/loklak_server_frontend_hdd,djmgit/apps.loklak.org,loklak/loklak_server,kavithaenair/apps.loklak.org,singhpratyush/loklak_server,loklak/loklak_server,shivenmian/loklak_server,DravitLochan/susi_server,arashahmadi/sensemi_ai,sudheesh001/loklak_server,YagoGG/loklak_server,smokingwheels/loklak_server_frontend_hdd,loklak/loklak_server,daminisatya/loklak_server,kavithaenair/apps.loklak.org,smsunarto/loklak_server,smokingwheels/loklak_server_frontend_hdd,PiotrKowalski/loklak_server,daminisatya/loklak_server,loklak/loklak_server,smsunarto/loklak_server,djmgit/apps.loklak.org,YagoGG/loklak_server,YagoGG/loklak_server,PiotrKowalski/loklak_server,fazeem84/susi_server,YagoGG/loklak_server,daminisatya/loklak_server,daminisatya/loklak_server,shivenmian/loklak_server,kavithaenair/apps.loklak.org,arashahmadi/sensemi_ai,djmgit/apps.loklak.org,DravitLochan/susi_server,shivenmian/loklak_server,PiotrKowalski/loklak_server,shivenmian/loklak_server,YagoGG/loklak_server
Create script for generation test queries
#!/usr/bin/env python import os import sys import time try: import wikipedia except ImportError: print('Queries generation requires wikipedia package. \n' 'You can install it by running "pip install wikipedia"') exit() # If user in bin directory set correct path to queries if os.getcwd().endswith('bin'): save_folder = os.getcwd().rstrip('bin') + 'test/queries' else: save_folder = os.getcwd() + '/test/queries' if len(sys.argv) != 3: print('Please run script by format: python bin/generate_test_queries.py en 100') exit() language = sys.argv[1] queries_num = int(sys.argv[2]) wikipedia.set_lang(language) queries = wikipedia.random(queries_num) with open('{}/{}_{}.txt'.format(save_folder, language, int(time.time())), 'w') as file: for query in queries: if sys.version[0] == '3': file.write('%s\n' % query) else: file.write('%s\n' % query.encode('utf-8')) print('Done.')
<commit_before><commit_msg>Create script for generation test queries<commit_after>
#!/usr/bin/env python import os import sys import time try: import wikipedia except ImportError: print('Queries generation requires wikipedia package. \n' 'You can install it by running "pip install wikipedia"') exit() # If user in bin directory set correct path to queries if os.getcwd().endswith('bin'): save_folder = os.getcwd().rstrip('bin') + 'test/queries' else: save_folder = os.getcwd() + '/test/queries' if len(sys.argv) != 3: print('Please run script by format: python bin/generate_test_queries.py en 100') exit() language = sys.argv[1] queries_num = int(sys.argv[2]) wikipedia.set_lang(language) queries = wikipedia.random(queries_num) with open('{}/{}_{}.txt'.format(save_folder, language, int(time.time())), 'w') as file: for query in queries: if sys.version[0] == '3': file.write('%s\n' % query) else: file.write('%s\n' % query.encode('utf-8')) print('Done.')
Create script for generation test queries#!/usr/bin/env python import os import sys import time try: import wikipedia except ImportError: print('Queries generation requires wikipedia package. \n' 'You can install it by running "pip install wikipedia"') exit() # If user in bin directory set correct path to queries if os.getcwd().endswith('bin'): save_folder = os.getcwd().rstrip('bin') + 'test/queries' else: save_folder = os.getcwd() + '/test/queries' if len(sys.argv) != 3: print('Please run script by format: python bin/generate_test_queries.py en 100') exit() language = sys.argv[1] queries_num = int(sys.argv[2]) wikipedia.set_lang(language) queries = wikipedia.random(queries_num) with open('{}/{}_{}.txt'.format(save_folder, language, int(time.time())), 'w') as file: for query in queries: if sys.version[0] == '3': file.write('%s\n' % query) else: file.write('%s\n' % query.encode('utf-8')) print('Done.')
<commit_before><commit_msg>Create script for generation test queries<commit_after>#!/usr/bin/env python import os import sys import time try: import wikipedia except ImportError: print('Queries generation requires wikipedia package. \n' 'You can install it by running "pip install wikipedia"') exit() # If user in bin directory set correct path to queries if os.getcwd().endswith('bin'): save_folder = os.getcwd().rstrip('bin') + 'test/queries' else: save_folder = os.getcwd() + '/test/queries' if len(sys.argv) != 3: print('Please run script by format: python bin/generate_test_queries.py en 100') exit() language = sys.argv[1] queries_num = int(sys.argv[2]) wikipedia.set_lang(language) queries = wikipedia.random(queries_num) with open('{}/{}_{}.txt'.format(save_folder, language, int(time.time())), 'w') as file: for query in queries: if sys.version[0] == '3': file.write('%s\n' % query) else: file.write('%s\n' % query.encode('utf-8')) print('Done.')
d3c5c5612c23c2ffcc103402d1f4d2acf7b81009
photutils/utils/tests/test_moments.py
photutils/utils/tests/test_moments.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from numpy.testing import assert_equal, assert_allclose import pytest from .._moments import _moments, _moments_central def test_moments(): data = np.array([[0, 1], [0, 1]]) moments = _moments(data, order=2) result = np.array([[2, 2, 2], [1, 1, 1], [1, 1, 1]]) assert_equal(moments, result) assert_allclose(moments[0, 1] / moments[0, 0], 1.0) assert_allclose(moments[1, 0] / moments[0, 0], 0.5) def test_moments_central(): data = np.array([[0, 1], [0, 1]]) moments = _moments_central(data, order=2) result = np.array([[2., 0., 0.], [0., 0., 0.], [0.5, 0., 0.]]) assert_allclose(moments, result) def test_moments_central_nonsquare(): data = np.array([[0, 1], [0, 1], [0, 1]]) moments = _moments_central(data, order=2) result = np.array([[3., 0., 0.], [0., 0., 0.], [2., 0., 0.]]) assert_allclose(moments, result) def test_moments_central_invalid_dim(): data = np.arange(27).reshape(3, 3, 3) with pytest.raises(ValueError): _moments_central(data, order=3)
Add tests for new moment functions
Add tests for new moment functions
Python
bsd-3-clause
astropy/photutils,larrybradley/photutils
Add tests for new moment functions
# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from numpy.testing import assert_equal, assert_allclose import pytest from .._moments import _moments, _moments_central def test_moments(): data = np.array([[0, 1], [0, 1]]) moments = _moments(data, order=2) result = np.array([[2, 2, 2], [1, 1, 1], [1, 1, 1]]) assert_equal(moments, result) assert_allclose(moments[0, 1] / moments[0, 0], 1.0) assert_allclose(moments[1, 0] / moments[0, 0], 0.5) def test_moments_central(): data = np.array([[0, 1], [0, 1]]) moments = _moments_central(data, order=2) result = np.array([[2., 0., 0.], [0., 0., 0.], [0.5, 0., 0.]]) assert_allclose(moments, result) def test_moments_central_nonsquare(): data = np.array([[0, 1], [0, 1], [0, 1]]) moments = _moments_central(data, order=2) result = np.array([[3., 0., 0.], [0., 0., 0.], [2., 0., 0.]]) assert_allclose(moments, result) def test_moments_central_invalid_dim(): data = np.arange(27).reshape(3, 3, 3) with pytest.raises(ValueError): _moments_central(data, order=3)
<commit_before><commit_msg>Add tests for new moment functions<commit_after>
# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from numpy.testing import assert_equal, assert_allclose import pytest from .._moments import _moments, _moments_central def test_moments(): data = np.array([[0, 1], [0, 1]]) moments = _moments(data, order=2) result = np.array([[2, 2, 2], [1, 1, 1], [1, 1, 1]]) assert_equal(moments, result) assert_allclose(moments[0, 1] / moments[0, 0], 1.0) assert_allclose(moments[1, 0] / moments[0, 0], 0.5) def test_moments_central(): data = np.array([[0, 1], [0, 1]]) moments = _moments_central(data, order=2) result = np.array([[2., 0., 0.], [0., 0., 0.], [0.5, 0., 0.]]) assert_allclose(moments, result) def test_moments_central_nonsquare(): data = np.array([[0, 1], [0, 1], [0, 1]]) moments = _moments_central(data, order=2) result = np.array([[3., 0., 0.], [0., 0., 0.], [2., 0., 0.]]) assert_allclose(moments, result) def test_moments_central_invalid_dim(): data = np.arange(27).reshape(3, 3, 3) with pytest.raises(ValueError): _moments_central(data, order=3)
Add tests for new moment functions# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from numpy.testing import assert_equal, assert_allclose import pytest from .._moments import _moments, _moments_central def test_moments(): data = np.array([[0, 1], [0, 1]]) moments = _moments(data, order=2) result = np.array([[2, 2, 2], [1, 1, 1], [1, 1, 1]]) assert_equal(moments, result) assert_allclose(moments[0, 1] / moments[0, 0], 1.0) assert_allclose(moments[1, 0] / moments[0, 0], 0.5) def test_moments_central(): data = np.array([[0, 1], [0, 1]]) moments = _moments_central(data, order=2) result = np.array([[2., 0., 0.], [0., 0., 0.], [0.5, 0., 0.]]) assert_allclose(moments, result) def test_moments_central_nonsquare(): data = np.array([[0, 1], [0, 1], [0, 1]]) moments = _moments_central(data, order=2) result = np.array([[3., 0., 0.], [0., 0., 0.], [2., 0., 0.]]) assert_allclose(moments, result) def test_moments_central_invalid_dim(): data = np.arange(27).reshape(3, 3, 3) with pytest.raises(ValueError): _moments_central(data, order=3)
<commit_before><commit_msg>Add tests for new moment functions<commit_after># Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from numpy.testing import assert_equal, assert_allclose import pytest from .._moments import _moments, _moments_central def test_moments(): data = np.array([[0, 1], [0, 1]]) moments = _moments(data, order=2) result = np.array([[2, 2, 2], [1, 1, 1], [1, 1, 1]]) assert_equal(moments, result) assert_allclose(moments[0, 1] / moments[0, 0], 1.0) assert_allclose(moments[1, 0] / moments[0, 0], 0.5) def test_moments_central(): data = np.array([[0, 1], [0, 1]]) moments = _moments_central(data, order=2) result = np.array([[2., 0., 0.], [0., 0., 0.], [0.5, 0., 0.]]) assert_allclose(moments, result) def test_moments_central_nonsquare(): data = np.array([[0, 1], [0, 1], [0, 1]]) moments = _moments_central(data, order=2) result = np.array([[3., 0., 0.], [0., 0., 0.], [2., 0., 0.]]) assert_allclose(moments, result) def test_moments_central_invalid_dim(): data = np.arange(27).reshape(3, 3, 3) with pytest.raises(ValueError): _moments_central(data, order=3)
f217c8c6def69d8a7428c655c5d350b0c1d8deb2
dynd/tests/test_ndobject_cast.py
dynd/tests/test_ndobject_cast.py
import sys import unittest from datetime import date from dynd import nd, ndt class TestNDObjectCast(unittest.TestCase): def test_broadcast_cast(self): a = nd.ndobject(10) b = a.cast('3, int32') self.assertRaises(RuntimeError, b.eval) def test_strided_to_fixed(self): a = nd.ndobject([5,1,2]) b = a.cast('3, int32').eval() self.assertEqual(nd.as_py(b), [5,1,2]) self.assertEqual(b.dtype, nd.dtype('3, int32')) if __name__ == '__main__': unittest.main()
Test that uses the convert_dtype's get_shape
Test that uses the convert_dtype's get_shape
Python
bsd-2-clause
insertinterestingnamehere/dynd-python,cpcloud/dynd-python,pombredanne/dynd-python,ContinuumIO/dynd-python,insertinterestingnamehere/dynd-python,insertinterestingnamehere/dynd-python,ContinuumIO/dynd-python,izaid/dynd-python,aterrel/dynd-python,aterrel/dynd-python,cpcloud/dynd-python,ContinuumIO/dynd-python,michaelpacer/dynd-python,aterrel/dynd-python,michaelpacer/dynd-python,aterrel/dynd-python,michaelpacer/dynd-python,mwiebe/dynd-python,izaid/dynd-python,michaelpacer/dynd-python,cpcloud/dynd-python,insertinterestingnamehere/dynd-python,pombredanne/dynd-python,mwiebe/dynd-python,mwiebe/dynd-python,izaid/dynd-python,izaid/dynd-python,mwiebe/dynd-python,pombredanne/dynd-python,pombredanne/dynd-python,cpcloud/dynd-python,ContinuumIO/dynd-python
Test that uses the convert_dtype's get_shape
import sys import unittest from datetime import date from dynd import nd, ndt class TestNDObjectCast(unittest.TestCase): def test_broadcast_cast(self): a = nd.ndobject(10) b = a.cast('3, int32') self.assertRaises(RuntimeError, b.eval) def test_strided_to_fixed(self): a = nd.ndobject([5,1,2]) b = a.cast('3, int32').eval() self.assertEqual(nd.as_py(b), [5,1,2]) self.assertEqual(b.dtype, nd.dtype('3, int32')) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Test that uses the convert_dtype's get_shape<commit_after>
import sys import unittest from datetime import date from dynd import nd, ndt class TestNDObjectCast(unittest.TestCase): def test_broadcast_cast(self): a = nd.ndobject(10) b = a.cast('3, int32') self.assertRaises(RuntimeError, b.eval) def test_strided_to_fixed(self): a = nd.ndobject([5,1,2]) b = a.cast('3, int32').eval() self.assertEqual(nd.as_py(b), [5,1,2]) self.assertEqual(b.dtype, nd.dtype('3, int32')) if __name__ == '__main__': unittest.main()
Test that uses the convert_dtype's get_shapeimport sys import unittest from datetime import date from dynd import nd, ndt class TestNDObjectCast(unittest.TestCase): def test_broadcast_cast(self): a = nd.ndobject(10) b = a.cast('3, int32') self.assertRaises(RuntimeError, b.eval) def test_strided_to_fixed(self): a = nd.ndobject([5,1,2]) b = a.cast('3, int32').eval() self.assertEqual(nd.as_py(b), [5,1,2]) self.assertEqual(b.dtype, nd.dtype('3, int32')) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Test that uses the convert_dtype's get_shape<commit_after>import sys import unittest from datetime import date from dynd import nd, ndt class TestNDObjectCast(unittest.TestCase): def test_broadcast_cast(self): a = nd.ndobject(10) b = a.cast('3, int32') self.assertRaises(RuntimeError, b.eval) def test_strided_to_fixed(self): a = nd.ndobject([5,1,2]) b = a.cast('3, int32').eval() self.assertEqual(nd.as_py(b), [5,1,2]) self.assertEqual(b.dtype, nd.dtype('3, int32')) if __name__ == '__main__': unittest.main()
9416de2b1aae97639b9a77bfb0e0f50a1c09ea6d
sierra_adapter/sierra_window_generator/src/sierra_window_generator.py
sierra_adapter/sierra_window_generator/src/sierra_window_generator.py
# -*- encoding: utf-8 -*- """ Publish a new Sierra update window to SNS. """ import os import boto3 import maya from wellcome_aws_utils.sns_utils import publish_sns_message def main(event, _): print(f'event = {event!r}') topic_arn = os.environ['TOPIC_ARN'] window_start = os.environ['WINDOW_START'] start = maya.when(window_start) now = maya.now() message = { 'start': start.iso8601(), 'end': now.iso8601(), } client = boto3.client('sns') publish_sns_message(client=client, topic_arn=topic_arn, message=message)
Create the first Sierra window generator
Create the first Sierra window generator
Python
mit
wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api
Create the first Sierra window generator
# -*- encoding: utf-8 -*- """ Publish a new Sierra update window to SNS. """ import os import boto3 import maya from wellcome_aws_utils.sns_utils import publish_sns_message def main(event, _): print(f'event = {event!r}') topic_arn = os.environ['TOPIC_ARN'] window_start = os.environ['WINDOW_START'] start = maya.when(window_start) now = maya.now() message = { 'start': start.iso8601(), 'end': now.iso8601(), } client = boto3.client('sns') publish_sns_message(client=client, topic_arn=topic_arn, message=message)
<commit_before><commit_msg>Create the first Sierra window generator<commit_after>
# -*- encoding: utf-8 -*- """ Publish a new Sierra update window to SNS. """ import os import boto3 import maya from wellcome_aws_utils.sns_utils import publish_sns_message def main(event, _): print(f'event = {event!r}') topic_arn = os.environ['TOPIC_ARN'] window_start = os.environ['WINDOW_START'] start = maya.when(window_start) now = maya.now() message = { 'start': start.iso8601(), 'end': now.iso8601(), } client = boto3.client('sns') publish_sns_message(client=client, topic_arn=topic_arn, message=message)
Create the first Sierra window generator# -*- encoding: utf-8 -*- """ Publish a new Sierra update window to SNS. """ import os import boto3 import maya from wellcome_aws_utils.sns_utils import publish_sns_message def main(event, _): print(f'event = {event!r}') topic_arn = os.environ['TOPIC_ARN'] window_start = os.environ['WINDOW_START'] start = maya.when(window_start) now = maya.now() message = { 'start': start.iso8601(), 'end': now.iso8601(), } client = boto3.client('sns') publish_sns_message(client=client, topic_arn=topic_arn, message=message)
<commit_before><commit_msg>Create the first Sierra window generator<commit_after># -*- encoding: utf-8 -*- """ Publish a new Sierra update window to SNS. """ import os import boto3 import maya from wellcome_aws_utils.sns_utils import publish_sns_message def main(event, _): print(f'event = {event!r}') topic_arn = os.environ['TOPIC_ARN'] window_start = os.environ['WINDOW_START'] start = maya.when(window_start) now = maya.now() message = { 'start': start.iso8601(), 'end': now.iso8601(), } client = boto3.client('sns') publish_sns_message(client=client, topic_arn=topic_arn, message=message)
fa85cd0d992b5153f9a48f3cf9d504a2b33cca1d
tempest/tests/lib/services/volume/v2/test_availability_zone_client.py
tempest/tests/lib/services/volume/v2/test_availability_zone_client.py
# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.services.volume.v2 import availability_zone_client from tempest.tests.lib import fake_auth_provider from tempest.tests.lib.services import base class TestAvailabilityZoneClient(base.BaseServiceTest): FAKE_AZ_LIST = { "availabilityZoneInfo": [ { "zoneState": { "available": True }, "zoneName": "nova" } ] } def setUp(self): super(TestAvailabilityZoneClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client = availability_zone_client.AvailabilityZoneClient( fake_auth, 'volume', 'regionOne') def _test_list_availability_zones(self, bytes_body=False): self.check_service_client_function( self.client.list_availability_zones, 'tempest.lib.common.rest_client.RestClient.get', self.FAKE_AZ_LIST, bytes_body) def test_list_availability_zones_with_str_body(self): self._test_list_availability_zones() def test_list_availability_zones_with_bytes_body(self): self._test_list_availability_zones(bytes_body=True)
Add unit test for volume availability zone client
Add unit test for volume availability zone client This patch adds unit test for volume v2 availability zone client. Partially Implements: blueprint tempest-lib-missing-test-coverage Change-Id: I94f758307255de06fbaf8c6744912b46e15a6cb2
Python
apache-2.0
openstack/tempest,Juniper/tempest,openstack/tempest,masayukig/tempest,masayukig/tempest,Juniper/tempest,cisco-openstack/tempest,cisco-openstack/tempest
Add unit test for volume availability zone client This patch adds unit test for volume v2 availability zone client. Partially Implements: blueprint tempest-lib-missing-test-coverage Change-Id: I94f758307255de06fbaf8c6744912b46e15a6cb2
# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.services.volume.v2 import availability_zone_client from tempest.tests.lib import fake_auth_provider from tempest.tests.lib.services import base class TestAvailabilityZoneClient(base.BaseServiceTest): FAKE_AZ_LIST = { "availabilityZoneInfo": [ { "zoneState": { "available": True }, "zoneName": "nova" } ] } def setUp(self): super(TestAvailabilityZoneClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client = availability_zone_client.AvailabilityZoneClient( fake_auth, 'volume', 'regionOne') def _test_list_availability_zones(self, bytes_body=False): self.check_service_client_function( self.client.list_availability_zones, 'tempest.lib.common.rest_client.RestClient.get', self.FAKE_AZ_LIST, bytes_body) def test_list_availability_zones_with_str_body(self): self._test_list_availability_zones() def test_list_availability_zones_with_bytes_body(self): self._test_list_availability_zones(bytes_body=True)
<commit_before><commit_msg>Add unit test for volume availability zone client This patch adds unit test for volume v2 availability zone client. Partially Implements: blueprint tempest-lib-missing-test-coverage Change-Id: I94f758307255de06fbaf8c6744912b46e15a6cb2<commit_after>
# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.services.volume.v2 import availability_zone_client from tempest.tests.lib import fake_auth_provider from tempest.tests.lib.services import base class TestAvailabilityZoneClient(base.BaseServiceTest): FAKE_AZ_LIST = { "availabilityZoneInfo": [ { "zoneState": { "available": True }, "zoneName": "nova" } ] } def setUp(self): super(TestAvailabilityZoneClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client = availability_zone_client.AvailabilityZoneClient( fake_auth, 'volume', 'regionOne') def _test_list_availability_zones(self, bytes_body=False): self.check_service_client_function( self.client.list_availability_zones, 'tempest.lib.common.rest_client.RestClient.get', self.FAKE_AZ_LIST, bytes_body) def test_list_availability_zones_with_str_body(self): self._test_list_availability_zones() def test_list_availability_zones_with_bytes_body(self): self._test_list_availability_zones(bytes_body=True)
Add unit test for volume availability zone client This patch adds unit test for volume v2 availability zone client. Partially Implements: blueprint tempest-lib-missing-test-coverage Change-Id: I94f758307255de06fbaf8c6744912b46e15a6cb2# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.services.volume.v2 import availability_zone_client from tempest.tests.lib import fake_auth_provider from tempest.tests.lib.services import base class TestAvailabilityZoneClient(base.BaseServiceTest): FAKE_AZ_LIST = { "availabilityZoneInfo": [ { "zoneState": { "available": True }, "zoneName": "nova" } ] } def setUp(self): super(TestAvailabilityZoneClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client = availability_zone_client.AvailabilityZoneClient( fake_auth, 'volume', 'regionOne') def _test_list_availability_zones(self, bytes_body=False): self.check_service_client_function( self.client.list_availability_zones, 'tempest.lib.common.rest_client.RestClient.get', self.FAKE_AZ_LIST, bytes_body) def test_list_availability_zones_with_str_body(self): self._test_list_availability_zones() def test_list_availability_zones_with_bytes_body(self): self._test_list_availability_zones(bytes_body=True)
<commit_before><commit_msg>Add unit test for volume availability zone client This patch adds unit test for volume v2 availability zone client. Partially Implements: blueprint tempest-lib-missing-test-coverage Change-Id: I94f758307255de06fbaf8c6744912b46e15a6cb2<commit_after># Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.services.volume.v2 import availability_zone_client from tempest.tests.lib import fake_auth_provider from tempest.tests.lib.services import base class TestAvailabilityZoneClient(base.BaseServiceTest): FAKE_AZ_LIST = { "availabilityZoneInfo": [ { "zoneState": { "available": True }, "zoneName": "nova" } ] } def setUp(self): super(TestAvailabilityZoneClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client = availability_zone_client.AvailabilityZoneClient( fake_auth, 'volume', 'regionOne') def _test_list_availability_zones(self, bytes_body=False): self.check_service_client_function( self.client.list_availability_zones, 'tempest.lib.common.rest_client.RestClient.get', self.FAKE_AZ_LIST, bytes_body) def test_list_availability_zones_with_str_body(self): self._test_list_availability_zones() def test_list_availability_zones_with_bytes_body(self): self._test_list_availability_zones(bytes_body=True)
6b6f6fb8a0266b19d1edc8272d0b3f1aea8d0c1b
analysis/patch-to-timeout.py
analysis/patch-to-timeout.py
#!/usr/bin/env python # vim: set sw=2 ts=2 softtabstop=2 expandtab: """ This script is designed to take a set of results and set them all to timeouts. """ import argparse import os import logging import sys import yaml from br_util import FinalResultType, classifyResult try: # Try to use libyaml which is faster from yaml import CLoader as Loader, CDumper as Dumper except ImportError: # fall back on python implementation from yaml import Loader, Dumper def main(args): parser = argparse.ArgumentParser() parser.add_argument("-l","--log-level",type=str, default="info", dest="log_level", choices=['debug','info','warning','error']) parser.add_argument('input_yml', type=argparse.FileType('r')) parser.add_argument('output_yml', type=str) pargs = parser.parse_args(args) logLevel = getattr(logging, pargs.log_level.upper(),None) logging.basicConfig(level=logLevel) if os.path.exists(pargs.output_yml): logging.error('Refusing to overwrite "{}"'.format(pargs.output_yml)) return 1 results = yaml.load(pargs.input_yml, Loader=Loader) assert isinstance(results, list) if len(results) == 0: logging.error('Result list is empty') return 1 for r in results: r['failed'] = False r['bound_hit'] = False r['bug_found'] = False r['timeout_hit'] = True r['exit_code'] = None if 'original_results' in r: r.pop('original_results') r['total_time'] = 900.0 r['out_of_memory'] = False r['total_time_stddev'] = 0.0 if 'sbx_dir' in r: r.pop('sbx_dir') r['sbx_dir'] = '/not/real/result' r['log_file'] = '/not/real/result' if 'instructions_executed' in r: r.pop('instructions_executed') assert classifyResult(r) == FinalResultType.TIMED_OUT # Write result out with open(pargs.output_yml, 'w') as f: yamlText = yaml.dump(results, default_flow_style=False, Dumper=Dumper) f.write(yamlText) return 0 if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
Add script to take a result set and turn them all into timeouts.
Add script to take a result set and turn them all into timeouts.
Python
bsd-3-clause
symbooglix/boogie-runner,symbooglix/boogie-runner
Add script to take a result set and turn them all into timeouts.
#!/usr/bin/env python # vim: set sw=2 ts=2 softtabstop=2 expandtab: """ This script is designed to take a set of results and set them all to timeouts. """ import argparse import os import logging import sys import yaml from br_util import FinalResultType, classifyResult try: # Try to use libyaml which is faster from yaml import CLoader as Loader, CDumper as Dumper except ImportError: # fall back on python implementation from yaml import Loader, Dumper def main(args): parser = argparse.ArgumentParser() parser.add_argument("-l","--log-level",type=str, default="info", dest="log_level", choices=['debug','info','warning','error']) parser.add_argument('input_yml', type=argparse.FileType('r')) parser.add_argument('output_yml', type=str) pargs = parser.parse_args(args) logLevel = getattr(logging, pargs.log_level.upper(),None) logging.basicConfig(level=logLevel) if os.path.exists(pargs.output_yml): logging.error('Refusing to overwrite "{}"'.format(pargs.output_yml)) return 1 results = yaml.load(pargs.input_yml, Loader=Loader) assert isinstance(results, list) if len(results) == 0: logging.error('Result list is empty') return 1 for r in results: r['failed'] = False r['bound_hit'] = False r['bug_found'] = False r['timeout_hit'] = True r['exit_code'] = None if 'original_results' in r: r.pop('original_results') r['total_time'] = 900.0 r['out_of_memory'] = False r['total_time_stddev'] = 0.0 if 'sbx_dir' in r: r.pop('sbx_dir') r['sbx_dir'] = '/not/real/result' r['log_file'] = '/not/real/result' if 'instructions_executed' in r: r.pop('instructions_executed') assert classifyResult(r) == FinalResultType.TIMED_OUT # Write result out with open(pargs.output_yml, 'w') as f: yamlText = yaml.dump(results, default_flow_style=False, Dumper=Dumper) f.write(yamlText) return 0 if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
<commit_before><commit_msg>Add script to take a result set and turn them all into timeouts.<commit_after>
#!/usr/bin/env python # vim: set sw=2 ts=2 softtabstop=2 expandtab: """ This script is designed to take a set of results and set them all to timeouts. """ import argparse import os import logging import sys import yaml from br_util import FinalResultType, classifyResult try: # Try to use libyaml which is faster from yaml import CLoader as Loader, CDumper as Dumper except ImportError: # fall back on python implementation from yaml import Loader, Dumper def main(args): parser = argparse.ArgumentParser() parser.add_argument("-l","--log-level",type=str, default="info", dest="log_level", choices=['debug','info','warning','error']) parser.add_argument('input_yml', type=argparse.FileType('r')) parser.add_argument('output_yml', type=str) pargs = parser.parse_args(args) logLevel = getattr(logging, pargs.log_level.upper(),None) logging.basicConfig(level=logLevel) if os.path.exists(pargs.output_yml): logging.error('Refusing to overwrite "{}"'.format(pargs.output_yml)) return 1 results = yaml.load(pargs.input_yml, Loader=Loader) assert isinstance(results, list) if len(results) == 0: logging.error('Result list is empty') return 1 for r in results: r['failed'] = False r['bound_hit'] = False r['bug_found'] = False r['timeout_hit'] = True r['exit_code'] = None if 'original_results' in r: r.pop('original_results') r['total_time'] = 900.0 r['out_of_memory'] = False r['total_time_stddev'] = 0.0 if 'sbx_dir' in r: r.pop('sbx_dir') r['sbx_dir'] = '/not/real/result' r['log_file'] = '/not/real/result' if 'instructions_executed' in r: r.pop('instructions_executed') assert classifyResult(r) == FinalResultType.TIMED_OUT # Write result out with open(pargs.output_yml, 'w') as f: yamlText = yaml.dump(results, default_flow_style=False, Dumper=Dumper) f.write(yamlText) return 0 if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
Add script to take a result set and turn them all into timeouts.#!/usr/bin/env python # vim: set sw=2 ts=2 softtabstop=2 expandtab: """ This script is designed to take a set of results and set them all to timeouts. """ import argparse import os import logging import sys import yaml from br_util import FinalResultType, classifyResult try: # Try to use libyaml which is faster from yaml import CLoader as Loader, CDumper as Dumper except ImportError: # fall back on python implementation from yaml import Loader, Dumper def main(args): parser = argparse.ArgumentParser() parser.add_argument("-l","--log-level",type=str, default="info", dest="log_level", choices=['debug','info','warning','error']) parser.add_argument('input_yml', type=argparse.FileType('r')) parser.add_argument('output_yml', type=str) pargs = parser.parse_args(args) logLevel = getattr(logging, pargs.log_level.upper(),None) logging.basicConfig(level=logLevel) if os.path.exists(pargs.output_yml): logging.error('Refusing to overwrite "{}"'.format(pargs.output_yml)) return 1 results = yaml.load(pargs.input_yml, Loader=Loader) assert isinstance(results, list) if len(results) == 0: logging.error('Result list is empty') return 1 for r in results: r['failed'] = False r['bound_hit'] = False r['bug_found'] = False r['timeout_hit'] = True r['exit_code'] = None if 'original_results' in r: r.pop('original_results') r['total_time'] = 900.0 r['out_of_memory'] = False r['total_time_stddev'] = 0.0 if 'sbx_dir' in r: r.pop('sbx_dir') r['sbx_dir'] = '/not/real/result' r['log_file'] = '/not/real/result' if 'instructions_executed' in r: r.pop('instructions_executed') assert classifyResult(r) == FinalResultType.TIMED_OUT # Write result out with open(pargs.output_yml, 'w') as f: yamlText = yaml.dump(results, default_flow_style=False, Dumper=Dumper) f.write(yamlText) return 0 if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
<commit_before><commit_msg>Add script to take a result set and turn them all into timeouts.<commit_after>#!/usr/bin/env python # vim: set sw=2 ts=2 softtabstop=2 expandtab: """ This script is designed to take a set of results and set them all to timeouts. """ import argparse import os import logging import sys import yaml from br_util import FinalResultType, classifyResult try: # Try to use libyaml which is faster from yaml import CLoader as Loader, CDumper as Dumper except ImportError: # fall back on python implementation from yaml import Loader, Dumper def main(args): parser = argparse.ArgumentParser() parser.add_argument("-l","--log-level",type=str, default="info", dest="log_level", choices=['debug','info','warning','error']) parser.add_argument('input_yml', type=argparse.FileType('r')) parser.add_argument('output_yml', type=str) pargs = parser.parse_args(args) logLevel = getattr(logging, pargs.log_level.upper(),None) logging.basicConfig(level=logLevel) if os.path.exists(pargs.output_yml): logging.error('Refusing to overwrite "{}"'.format(pargs.output_yml)) return 1 results = yaml.load(pargs.input_yml, Loader=Loader) assert isinstance(results, list) if len(results) == 0: logging.error('Result list is empty') return 1 for r in results: r['failed'] = False r['bound_hit'] = False r['bug_found'] = False r['timeout_hit'] = True r['exit_code'] = None if 'original_results' in r: r.pop('original_results') r['total_time'] = 900.0 r['out_of_memory'] = False r['total_time_stddev'] = 0.0 if 'sbx_dir' in r: r.pop('sbx_dir') r['sbx_dir'] = '/not/real/result' r['log_file'] = '/not/real/result' if 'instructions_executed' in r: r.pop('instructions_executed') assert classifyResult(r) == FinalResultType.TIMED_OUT # Write result out with open(pargs.output_yml, 'w') as f: yamlText = yaml.dump(results, default_flow_style=False, Dumper=Dumper) f.write(yamlText) return 0 if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
9382b3e6cf2dacb5380d482acd6a2bd97a6dd0ce
tests/benchmarks/benchmark_transform.py
tests/benchmarks/benchmark_transform.py
from warnings import WarningMessage import pytest import subprocess import os import h5py import numpy as np import time from desc.__main__ import main from desc.grid import Grid, LinearGrid, ConcentricGrid from desc.basis import ( PowerSeries, FourierSeries, DoubleFourierSeries, ZernikePolynomial, FourierZernikeBasis, ) from desc.transform import Transform @pytest.mark.benchmark( min_rounds=1, max_time=50, disable_gc=False, warmup=True, warmup_iterations=50 ) def test_build_transform_fft_lowres(benchmark): """Tests how long it takes to build a transform (after it has already been compiled) for lowres""" def build(): L = 5 M = 5 N = 5 grid = ConcentricGrid(L=L, M=M, N=N) basis = FourierZernikeBasis(L=L, M=M, N=N) transf = Transform(grid, basis, method="fft", build=False) transf.build() benchmark(build) @pytest.mark.benchmark(min_rounds=1, max_time=100, disable_gc=False, warmup=True) def test_build_transform_fft_midres(benchmark): """Tests how long it takes to build a transform (after it has already been compiled) for midres""" def build(): L = 15 M = 15 N = 15 grid = ConcentricGrid(L=L, M=M, N=N) basis = FourierZernikeBasis(L=L, M=M, N=N) transf = Transform(grid, basis, method="fft", build=False) transf.build() benchmark.pedantic(build, iterations=1, warmup_rounds=1, rounds=50) @pytest.mark.benchmark(min_rounds=1, max_time=100, disable_gc=False, warmup=True) def test_build_transform_fft_highres(benchmark): """Tests how long it takes to build a transform (after it has already been compiled) for highres""" def build(): L = 25 M = 25 N = 25 grid = ConcentricGrid(L=L, M=M, N=N) basis = FourierZernikeBasis(L=L, M=M, N=N) transf = Transform(grid, basis, method="fft", build=False) transf.build() benchmark.pedantic(build, iterations=1, warmup_rounds=1, rounds=25)
Add benchmark for building transforms (not including compilation time)
Add benchmark for building transforms (not including compilation time)
Python
mit
PlasmaControl/DESC,PlasmaControl/DESC
Add benchmark for building transforms (not including compilation time)
from warnings import WarningMessage import pytest import subprocess import os import h5py import numpy as np import time from desc.__main__ import main from desc.grid import Grid, LinearGrid, ConcentricGrid from desc.basis import ( PowerSeries, FourierSeries, DoubleFourierSeries, ZernikePolynomial, FourierZernikeBasis, ) from desc.transform import Transform @pytest.mark.benchmark( min_rounds=1, max_time=50, disable_gc=False, warmup=True, warmup_iterations=50 ) def test_build_transform_fft_lowres(benchmark): """Tests how long it takes to build a transform (after it has already been compiled) for lowres""" def build(): L = 5 M = 5 N = 5 grid = ConcentricGrid(L=L, M=M, N=N) basis = FourierZernikeBasis(L=L, M=M, N=N) transf = Transform(grid, basis, method="fft", build=False) transf.build() benchmark(build) @pytest.mark.benchmark(min_rounds=1, max_time=100, disable_gc=False, warmup=True) def test_build_transform_fft_midres(benchmark): """Tests how long it takes to build a transform (after it has already been compiled) for midres""" def build(): L = 15 M = 15 N = 15 grid = ConcentricGrid(L=L, M=M, N=N) basis = FourierZernikeBasis(L=L, M=M, N=N) transf = Transform(grid, basis, method="fft", build=False) transf.build() benchmark.pedantic(build, iterations=1, warmup_rounds=1, rounds=50) @pytest.mark.benchmark(min_rounds=1, max_time=100, disable_gc=False, warmup=True) def test_build_transform_fft_highres(benchmark): """Tests how long it takes to build a transform (after it has already been compiled) for highres""" def build(): L = 25 M = 25 N = 25 grid = ConcentricGrid(L=L, M=M, N=N) basis = FourierZernikeBasis(L=L, M=M, N=N) transf = Transform(grid, basis, method="fft", build=False) transf.build() benchmark.pedantic(build, iterations=1, warmup_rounds=1, rounds=25)
<commit_before><commit_msg>Add benchmark for building transforms (not including compilation time)<commit_after>
from warnings import WarningMessage import pytest import subprocess import os import h5py import numpy as np import time from desc.__main__ import main from desc.grid import Grid, LinearGrid, ConcentricGrid from desc.basis import ( PowerSeries, FourierSeries, DoubleFourierSeries, ZernikePolynomial, FourierZernikeBasis, ) from desc.transform import Transform @pytest.mark.benchmark( min_rounds=1, max_time=50, disable_gc=False, warmup=True, warmup_iterations=50 ) def test_build_transform_fft_lowres(benchmark): """Tests how long it takes to build a transform (after it has already been compiled) for lowres""" def build(): L = 5 M = 5 N = 5 grid = ConcentricGrid(L=L, M=M, N=N) basis = FourierZernikeBasis(L=L, M=M, N=N) transf = Transform(grid, basis, method="fft", build=False) transf.build() benchmark(build) @pytest.mark.benchmark(min_rounds=1, max_time=100, disable_gc=False, warmup=True) def test_build_transform_fft_midres(benchmark): """Tests how long it takes to build a transform (after it has already been compiled) for midres""" def build(): L = 15 M = 15 N = 15 grid = ConcentricGrid(L=L, M=M, N=N) basis = FourierZernikeBasis(L=L, M=M, N=N) transf = Transform(grid, basis, method="fft", build=False) transf.build() benchmark.pedantic(build, iterations=1, warmup_rounds=1, rounds=50) @pytest.mark.benchmark(min_rounds=1, max_time=100, disable_gc=False, warmup=True) def test_build_transform_fft_highres(benchmark): """Tests how long it takes to build a transform (after it has already been compiled) for highres""" def build(): L = 25 M = 25 N = 25 grid = ConcentricGrid(L=L, M=M, N=N) basis = FourierZernikeBasis(L=L, M=M, N=N) transf = Transform(grid, basis, method="fft", build=False) transf.build() benchmark.pedantic(build, iterations=1, warmup_rounds=1, rounds=25)
Add benchmark for building transforms (not including compilation time)from warnings import WarningMessage import pytest import subprocess import os import h5py import numpy as np import time from desc.__main__ import main from desc.grid import Grid, LinearGrid, ConcentricGrid from desc.basis import ( PowerSeries, FourierSeries, DoubleFourierSeries, ZernikePolynomial, FourierZernikeBasis, ) from desc.transform import Transform @pytest.mark.benchmark( min_rounds=1, max_time=50, disable_gc=False, warmup=True, warmup_iterations=50 ) def test_build_transform_fft_lowres(benchmark): """Tests how long it takes to build a transform (after it has already been compiled) for lowres""" def build(): L = 5 M = 5 N = 5 grid = ConcentricGrid(L=L, M=M, N=N) basis = FourierZernikeBasis(L=L, M=M, N=N) transf = Transform(grid, basis, method="fft", build=False) transf.build() benchmark(build) @pytest.mark.benchmark(min_rounds=1, max_time=100, disable_gc=False, warmup=True) def test_build_transform_fft_midres(benchmark): """Tests how long it takes to build a transform (after it has already been compiled) for midres""" def build(): L = 15 M = 15 N = 15 grid = ConcentricGrid(L=L, M=M, N=N) basis = FourierZernikeBasis(L=L, M=M, N=N) transf = Transform(grid, basis, method="fft", build=False) transf.build() benchmark.pedantic(build, iterations=1, warmup_rounds=1, rounds=50) @pytest.mark.benchmark(min_rounds=1, max_time=100, disable_gc=False, warmup=True) def test_build_transform_fft_highres(benchmark): """Tests how long it takes to build a transform (after it has already been compiled) for highres""" def build(): L = 25 M = 25 N = 25 grid = ConcentricGrid(L=L, M=M, N=N) basis = FourierZernikeBasis(L=L, M=M, N=N) transf = Transform(grid, basis, method="fft", build=False) transf.build() benchmark.pedantic(build, iterations=1, warmup_rounds=1, rounds=25)
<commit_before><commit_msg>Add benchmark for building transforms (not including compilation time)<commit_after>from warnings import WarningMessage import pytest import subprocess import os import h5py import numpy as np import time from desc.__main__ import main from desc.grid import Grid, LinearGrid, ConcentricGrid from desc.basis import ( PowerSeries, FourierSeries, DoubleFourierSeries, ZernikePolynomial, FourierZernikeBasis, ) from desc.transform import Transform @pytest.mark.benchmark( min_rounds=1, max_time=50, disable_gc=False, warmup=True, warmup_iterations=50 ) def test_build_transform_fft_lowres(benchmark): """Tests how long it takes to build a transform (after it has already been compiled) for lowres""" def build(): L = 5 M = 5 N = 5 grid = ConcentricGrid(L=L, M=M, N=N) basis = FourierZernikeBasis(L=L, M=M, N=N) transf = Transform(grid, basis, method="fft", build=False) transf.build() benchmark(build) @pytest.mark.benchmark(min_rounds=1, max_time=100, disable_gc=False, warmup=True) def test_build_transform_fft_midres(benchmark): """Tests how long it takes to build a transform (after it has already been compiled) for midres""" def build(): L = 15 M = 15 N = 15 grid = ConcentricGrid(L=L, M=M, N=N) basis = FourierZernikeBasis(L=L, M=M, N=N) transf = Transform(grid, basis, method="fft", build=False) transf.build() benchmark.pedantic(build, iterations=1, warmup_rounds=1, rounds=50) @pytest.mark.benchmark(min_rounds=1, max_time=100, disable_gc=False, warmup=True) def test_build_transform_fft_highres(benchmark): """Tests how long it takes to build a transform (after it has already been compiled) for highres""" def build(): L = 25 M = 25 N = 25 grid = ConcentricGrid(L=L, M=M, N=N) basis = FourierZernikeBasis(L=L, M=M, N=N) transf = Transform(grid, basis, method="fft", build=False) transf.build() benchmark.pedantic(build, iterations=1, warmup_rounds=1, rounds=25)
bf40adfe7042518c4cefc1aaf4a0929c9ce8b2a4
mta.py
mta.py
from datetime import datetime import urllib2 import xml.etree.ElementTree as ET MTA_SERVICE_STATUS_URL = 'http://web.mta.info/status/serviceStatus.txt' def service_status(): service_status_xml = urllib2.urlopen(MTA_SERVICE_STATUS_URL).read() tree = ET.fromstring(service_status_xml) return MTASubwayStatus(tree) class MTASubwayStatus(object): ''' Represents the MTA subway service status at a certain point in time. ''' def __init__(self, response_xml): self.raw_xml = response_xml self.retrieved_at = self._parse_time() self.delays = self._parse_delays() def _parse_time(self): timestamp = self.raw_xml.find('timestamp').text format_string = '%m/%d/%Y %I:%M:%S %p' return datetime.strptime(timestamp, format_string) def _parse_delays(self): delays = [] for line in self.raw_xml.find('subway'): if self._line_delayed(line): delays.append(MTASubwayDelay(line)) return delays def _line_delayed(self, line_node): return line_node.find('status').text == 'DELAYS' def has_delays(self): return bool(self.delays) class MTASubwayDelay(object): def __init__(self, line_node): self.line = line_node.find('name').text self.info = line_node.find('text').text self.date = datetime.strptime(line_node.find('Date').text, '%m/%d/%Y') self.time = datetime.strptime(line_node.find('Time').text, '%I:%M%p')
Add basic functionality for using the MTA service status feed
Add basic functionality for using the MTA service status feed
Python
isc
softdev-projects/mta-smart-alerts
Add basic functionality for using the MTA service status feed
from datetime import datetime import urllib2 import xml.etree.ElementTree as ET MTA_SERVICE_STATUS_URL = 'http://web.mta.info/status/serviceStatus.txt' def service_status(): service_status_xml = urllib2.urlopen(MTA_SERVICE_STATUS_URL).read() tree = ET.fromstring(service_status_xml) return MTASubwayStatus(tree) class MTASubwayStatus(object): ''' Represents the MTA subway service status at a certain point in time. ''' def __init__(self, response_xml): self.raw_xml = response_xml self.retrieved_at = self._parse_time() self.delays = self._parse_delays() def _parse_time(self): timestamp = self.raw_xml.find('timestamp').text format_string = '%m/%d/%Y %I:%M:%S %p' return datetime.strptime(timestamp, format_string) def _parse_delays(self): delays = [] for line in self.raw_xml.find('subway'): if self._line_delayed(line): delays.append(MTASubwayDelay(line)) return delays def _line_delayed(self, line_node): return line_node.find('status').text == 'DELAYS' def has_delays(self): return bool(self.delays) class MTASubwayDelay(object): def __init__(self, line_node): self.line = line_node.find('name').text self.info = line_node.find('text').text self.date = datetime.strptime(line_node.find('Date').text, '%m/%d/%Y') self.time = datetime.strptime(line_node.find('Time').text, '%I:%M%p')
<commit_before><commit_msg>Add basic functionality for using the MTA service status feed<commit_after>
from datetime import datetime import urllib2 import xml.etree.ElementTree as ET MTA_SERVICE_STATUS_URL = 'http://web.mta.info/status/serviceStatus.txt' def service_status(): service_status_xml = urllib2.urlopen(MTA_SERVICE_STATUS_URL).read() tree = ET.fromstring(service_status_xml) return MTASubwayStatus(tree) class MTASubwayStatus(object): ''' Represents the MTA subway service status at a certain point in time. ''' def __init__(self, response_xml): self.raw_xml = response_xml self.retrieved_at = self._parse_time() self.delays = self._parse_delays() def _parse_time(self): timestamp = self.raw_xml.find('timestamp').text format_string = '%m/%d/%Y %I:%M:%S %p' return datetime.strptime(timestamp, format_string) def _parse_delays(self): delays = [] for line in self.raw_xml.find('subway'): if self._line_delayed(line): delays.append(MTASubwayDelay(line)) return delays def _line_delayed(self, line_node): return line_node.find('status').text == 'DELAYS' def has_delays(self): return bool(self.delays) class MTASubwayDelay(object): def __init__(self, line_node): self.line = line_node.find('name').text self.info = line_node.find('text').text self.date = datetime.strptime(line_node.find('Date').text, '%m/%d/%Y') self.time = datetime.strptime(line_node.find('Time').text, '%I:%M%p')
Add basic functionality for using the MTA service status feedfrom datetime import datetime import urllib2 import xml.etree.ElementTree as ET MTA_SERVICE_STATUS_URL = 'http://web.mta.info/status/serviceStatus.txt' def service_status(): service_status_xml = urllib2.urlopen(MTA_SERVICE_STATUS_URL).read() tree = ET.fromstring(service_status_xml) return MTASubwayStatus(tree) class MTASubwayStatus(object): ''' Represents the MTA subway service status at a certain point in time. ''' def __init__(self, response_xml): self.raw_xml = response_xml self.retrieved_at = self._parse_time() self.delays = self._parse_delays() def _parse_time(self): timestamp = self.raw_xml.find('timestamp').text format_string = '%m/%d/%Y %I:%M:%S %p' return datetime.strptime(timestamp, format_string) def _parse_delays(self): delays = [] for line in self.raw_xml.find('subway'): if self._line_delayed(line): delays.append(MTASubwayDelay(line)) return delays def _line_delayed(self, line_node): return line_node.find('status').text == 'DELAYS' def has_delays(self): return bool(self.delays) class MTASubwayDelay(object): def __init__(self, line_node): self.line = line_node.find('name').text self.info = line_node.find('text').text self.date = datetime.strptime(line_node.find('Date').text, '%m/%d/%Y') self.time = datetime.strptime(line_node.find('Time').text, '%I:%M%p')
<commit_before><commit_msg>Add basic functionality for using the MTA service status feed<commit_after>from datetime import datetime import urllib2 import xml.etree.ElementTree as ET MTA_SERVICE_STATUS_URL = 'http://web.mta.info/status/serviceStatus.txt' def service_status(): service_status_xml = urllib2.urlopen(MTA_SERVICE_STATUS_URL).read() tree = ET.fromstring(service_status_xml) return MTASubwayStatus(tree) class MTASubwayStatus(object): ''' Represents the MTA subway service status at a certain point in time. ''' def __init__(self, response_xml): self.raw_xml = response_xml self.retrieved_at = self._parse_time() self.delays = self._parse_delays() def _parse_time(self): timestamp = self.raw_xml.find('timestamp').text format_string = '%m/%d/%Y %I:%M:%S %p' return datetime.strptime(timestamp, format_string) def _parse_delays(self): delays = [] for line in self.raw_xml.find('subway'): if self._line_delayed(line): delays.append(MTASubwayDelay(line)) return delays def _line_delayed(self, line_node): return line_node.find('status').text == 'DELAYS' def has_delays(self): return bool(self.delays) class MTASubwayDelay(object): def __init__(self, line_node): self.line = line_node.find('name').text self.info = line_node.find('text').text self.date = datetime.strptime(line_node.find('Date').text, '%m/%d/%Y') self.time = datetime.strptime(line_node.find('Time').text, '%I:%M%p')
ffab86b081357fbd51e0c9676f03f4c39b65658b
emails/models.py
emails/models.py
from django.db import models from datetime import datetime import settings class Email(models.Model): ''' Monitor emails sent ''' to = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='emails') subject = models.CharField(max_length=150) body = models.TextField() at = models.DateTimeField(default=datetime.now) prefetch = ['to'] def __str__(self): return 'TO: %s, %s' % (self.to, self.subject) @models.permalink def get_absolute_url(self): if self.body: return 'email', [self.pk] return '' class Meta: db_table = 'emails' class UserSubscription(models.Model): ''' Abstract subscription model to subclass. Add boolean fields to your subclass to make your own subscriptions named recieve_x; e.g.: receive_newsletter, receive_alerts etc. This will allow users to subscribe to different types of non-transactional emails. ''' user = models.OneToOneField(settings.AUTH_USER_MODEL, primary_key=True) receive_email = models.BooleanField('E-mail', default=True) def __str__(self): return str(self.pk) class Meta: abstract = True
Add a django model to save emails and specify subscriptions.
Add a django model to save emails and specify subscriptions.
Python
bsd-3-clause
fmalina/emails,fmalina/emails
Add a django model to save emails and specify subscriptions.
from django.db import models from datetime import datetime import settings class Email(models.Model): ''' Monitor emails sent ''' to = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='emails') subject = models.CharField(max_length=150) body = models.TextField() at = models.DateTimeField(default=datetime.now) prefetch = ['to'] def __str__(self): return 'TO: %s, %s' % (self.to, self.subject) @models.permalink def get_absolute_url(self): if self.body: return 'email', [self.pk] return '' class Meta: db_table = 'emails' class UserSubscription(models.Model): ''' Abstract subscription model to subclass. Add boolean fields to your subclass to make your own subscriptions named recieve_x; e.g.: receive_newsletter, receive_alerts etc. This will allow users to subscribe to different types of non-transactional emails. ''' user = models.OneToOneField(settings.AUTH_USER_MODEL, primary_key=True) receive_email = models.BooleanField('E-mail', default=True) def __str__(self): return str(self.pk) class Meta: abstract = True
<commit_before><commit_msg>Add a django model to save emails and specify subscriptions.<commit_after>
from django.db import models from datetime import datetime import settings class Email(models.Model): ''' Monitor emails sent ''' to = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='emails') subject = models.CharField(max_length=150) body = models.TextField() at = models.DateTimeField(default=datetime.now) prefetch = ['to'] def __str__(self): return 'TO: %s, %s' % (self.to, self.subject) @models.permalink def get_absolute_url(self): if self.body: return 'email', [self.pk] return '' class Meta: db_table = 'emails' class UserSubscription(models.Model): ''' Abstract subscription model to subclass. Add boolean fields to your subclass to make your own subscriptions named recieve_x; e.g.: receive_newsletter, receive_alerts etc. This will allow users to subscribe to different types of non-transactional emails. ''' user = models.OneToOneField(settings.AUTH_USER_MODEL, primary_key=True) receive_email = models.BooleanField('E-mail', default=True) def __str__(self): return str(self.pk) class Meta: abstract = True
Add a django model to save emails and specify subscriptions.from django.db import models from datetime import datetime import settings class Email(models.Model): ''' Monitor emails sent ''' to = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='emails') subject = models.CharField(max_length=150) body = models.TextField() at = models.DateTimeField(default=datetime.now) prefetch = ['to'] def __str__(self): return 'TO: %s, %s' % (self.to, self.subject) @models.permalink def get_absolute_url(self): if self.body: return 'email', [self.pk] return '' class Meta: db_table = 'emails' class UserSubscription(models.Model): ''' Abstract subscription model to subclass. Add boolean fields to your subclass to make your own subscriptions named recieve_x; e.g.: receive_newsletter, receive_alerts etc. This will allow users to subscribe to different types of non-transactional emails. ''' user = models.OneToOneField(settings.AUTH_USER_MODEL, primary_key=True) receive_email = models.BooleanField('E-mail', default=True) def __str__(self): return str(self.pk) class Meta: abstract = True
<commit_before><commit_msg>Add a django model to save emails and specify subscriptions.<commit_after>from django.db import models from datetime import datetime import settings class Email(models.Model): ''' Monitor emails sent ''' to = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='emails') subject = models.CharField(max_length=150) body = models.TextField() at = models.DateTimeField(default=datetime.now) prefetch = ['to'] def __str__(self): return 'TO: %s, %s' % (self.to, self.subject) @models.permalink def get_absolute_url(self): if self.body: return 'email', [self.pk] return '' class Meta: db_table = 'emails' class UserSubscription(models.Model): ''' Abstract subscription model to subclass. Add boolean fields to your subclass to make your own subscriptions named recieve_x; e.g.: receive_newsletter, receive_alerts etc. This will allow users to subscribe to different types of non-transactional emails. ''' user = models.OneToOneField(settings.AUTH_USER_MODEL, primary_key=True) receive_email = models.BooleanField('E-mail', default=True) def __str__(self): return str(self.pk) class Meta: abstract = True
4325bf0fc91011a263f9b27065659aca09ca3d49
tools/droplets/cleanup.py
tools/droplets/cleanup.py
import argparse import configparser import os import digitalocean def get_config() -> configparser.ConfigParser: config = configparser.ConfigParser() config.read(os.path.join(os.path.dirname(os.path.abspath(__file__)), "conf.ini")) return config parser = argparse.ArgumentParser(description="Clean up old A / AAAA records in zulipdev.org") parser.add_argument("--force", action="store_true") if __name__ == "__main__": args = parser.parse_args() config = get_config() api_token = config["digitalocean"]["api_token"] seen_ips = set() if not args.force: print("WOULD DELETE:") manager = digitalocean.Manager(token=api_token) my_droplets = manager.get_all_droplets() for droplet in my_droplets: seen_ips.add(droplet.ip_address) if droplet.ipv6: seen_ips.update(net["ip_address"] for net in droplet.networks["v6"]) domain = digitalocean.Domain(token=api_token, name="zulipdev.org") domain.load() records = domain.get_records() for record in sorted(records, key=lambda e: ".".join(reversed(e.name.split(".")))): if record.type not in ("AAAA", "A"): continue elif record.data in seen_ips: continue else: print(f"{record.type} {record.name} = {record.data}") if args.force: record.destroy() if not args.force: print("Re-run with --force to delete")
Add a tool to clean up old hostnames in DNS.
droplets: Add a tool to clean up old hostnames in DNS.
Python
apache-2.0
andersk/zulip,rht/zulip,andersk/zulip,andersk/zulip,zulip/zulip,zulip/zulip,zulip/zulip,zulip/zulip,andersk/zulip,andersk/zulip,andersk/zulip,rht/zulip,rht/zulip,rht/zulip,rht/zulip,zulip/zulip,rht/zulip,andersk/zulip,zulip/zulip,zulip/zulip,rht/zulip
droplets: Add a tool to clean up old hostnames in DNS.
import argparse import configparser import os import digitalocean def get_config() -> configparser.ConfigParser: config = configparser.ConfigParser() config.read(os.path.join(os.path.dirname(os.path.abspath(__file__)), "conf.ini")) return config parser = argparse.ArgumentParser(description="Clean up old A / AAAA records in zulipdev.org") parser.add_argument("--force", action="store_true") if __name__ == "__main__": args = parser.parse_args() config = get_config() api_token = config["digitalocean"]["api_token"] seen_ips = set() if not args.force: print("WOULD DELETE:") manager = digitalocean.Manager(token=api_token) my_droplets = manager.get_all_droplets() for droplet in my_droplets: seen_ips.add(droplet.ip_address) if droplet.ipv6: seen_ips.update(net["ip_address"] for net in droplet.networks["v6"]) domain = digitalocean.Domain(token=api_token, name="zulipdev.org") domain.load() records = domain.get_records() for record in sorted(records, key=lambda e: ".".join(reversed(e.name.split(".")))): if record.type not in ("AAAA", "A"): continue elif record.data in seen_ips: continue else: print(f"{record.type} {record.name} = {record.data}") if args.force: record.destroy() if not args.force: print("Re-run with --force to delete")
<commit_before><commit_msg>droplets: Add a tool to clean up old hostnames in DNS.<commit_after>
import argparse import configparser import os import digitalocean def get_config() -> configparser.ConfigParser: config = configparser.ConfigParser() config.read(os.path.join(os.path.dirname(os.path.abspath(__file__)), "conf.ini")) return config parser = argparse.ArgumentParser(description="Clean up old A / AAAA records in zulipdev.org") parser.add_argument("--force", action="store_true") if __name__ == "__main__": args = parser.parse_args() config = get_config() api_token = config["digitalocean"]["api_token"] seen_ips = set() if not args.force: print("WOULD DELETE:") manager = digitalocean.Manager(token=api_token) my_droplets = manager.get_all_droplets() for droplet in my_droplets: seen_ips.add(droplet.ip_address) if droplet.ipv6: seen_ips.update(net["ip_address"] for net in droplet.networks["v6"]) domain = digitalocean.Domain(token=api_token, name="zulipdev.org") domain.load() records = domain.get_records() for record in sorted(records, key=lambda e: ".".join(reversed(e.name.split(".")))): if record.type not in ("AAAA", "A"): continue elif record.data in seen_ips: continue else: print(f"{record.type} {record.name} = {record.data}") if args.force: record.destroy() if not args.force: print("Re-run with --force to delete")
droplets: Add a tool to clean up old hostnames in DNS.import argparse import configparser import os import digitalocean def get_config() -> configparser.ConfigParser: config = configparser.ConfigParser() config.read(os.path.join(os.path.dirname(os.path.abspath(__file__)), "conf.ini")) return config parser = argparse.ArgumentParser(description="Clean up old A / AAAA records in zulipdev.org") parser.add_argument("--force", action="store_true") if __name__ == "__main__": args = parser.parse_args() config = get_config() api_token = config["digitalocean"]["api_token"] seen_ips = set() if not args.force: print("WOULD DELETE:") manager = digitalocean.Manager(token=api_token) my_droplets = manager.get_all_droplets() for droplet in my_droplets: seen_ips.add(droplet.ip_address) if droplet.ipv6: seen_ips.update(net["ip_address"] for net in droplet.networks["v6"]) domain = digitalocean.Domain(token=api_token, name="zulipdev.org") domain.load() records = domain.get_records() for record in sorted(records, key=lambda e: ".".join(reversed(e.name.split(".")))): if record.type not in ("AAAA", "A"): continue elif record.data in seen_ips: continue else: print(f"{record.type} {record.name} = {record.data}") if args.force: record.destroy() if not args.force: print("Re-run with --force to delete")
<commit_before><commit_msg>droplets: Add a tool to clean up old hostnames in DNS.<commit_after>import argparse import configparser import os import digitalocean def get_config() -> configparser.ConfigParser: config = configparser.ConfigParser() config.read(os.path.join(os.path.dirname(os.path.abspath(__file__)), "conf.ini")) return config parser = argparse.ArgumentParser(description="Clean up old A / AAAA records in zulipdev.org") parser.add_argument("--force", action="store_true") if __name__ == "__main__": args = parser.parse_args() config = get_config() api_token = config["digitalocean"]["api_token"] seen_ips = set() if not args.force: print("WOULD DELETE:") manager = digitalocean.Manager(token=api_token) my_droplets = manager.get_all_droplets() for droplet in my_droplets: seen_ips.add(droplet.ip_address) if droplet.ipv6: seen_ips.update(net["ip_address"] for net in droplet.networks["v6"]) domain = digitalocean.Domain(token=api_token, name="zulipdev.org") domain.load() records = domain.get_records() for record in sorted(records, key=lambda e: ".".join(reversed(e.name.split(".")))): if record.type not in ("AAAA", "A"): continue elif record.data in seen_ips: continue else: print(f"{record.type} {record.name} = {record.data}") if args.force: record.destroy() if not args.force: print("Re-run with --force to delete")
5fdb5e0537049d602a1bc4bfc6401e396076e333
py/ransom-note.py
py/ransom-note.py
from collections import Counter class Solution(object): def canConstruct(self, ransomNote, magazine): """ :type ransomNote: str :type magazine: str :rtype: bool """ cr = Counter(ransomNote) cm = Counter(magazine) for c, v in cr.iteritems(): if v > cm[c]: return False return True
Add py solution for 383. Ransom Note
Add py solution for 383. Ransom Note 383. Ransom Note: https://leetcode.com/problems/ransom-note/
Python
apache-2.0
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
Add py solution for 383. Ransom Note 383. Ransom Note: https://leetcode.com/problems/ransom-note/
from collections import Counter class Solution(object): def canConstruct(self, ransomNote, magazine): """ :type ransomNote: str :type magazine: str :rtype: bool """ cr = Counter(ransomNote) cm = Counter(magazine) for c, v in cr.iteritems(): if v > cm[c]: return False return True
<commit_before><commit_msg>Add py solution for 383. Ransom Note 383. Ransom Note: https://leetcode.com/problems/ransom-note/<commit_after>
from collections import Counter class Solution(object): def canConstruct(self, ransomNote, magazine): """ :type ransomNote: str :type magazine: str :rtype: bool """ cr = Counter(ransomNote) cm = Counter(magazine) for c, v in cr.iteritems(): if v > cm[c]: return False return True
Add py solution for 383. Ransom Note 383. Ransom Note: https://leetcode.com/problems/ransom-note/from collections import Counter class Solution(object): def canConstruct(self, ransomNote, magazine): """ :type ransomNote: str :type magazine: str :rtype: bool """ cr = Counter(ransomNote) cm = Counter(magazine) for c, v in cr.iteritems(): if v > cm[c]: return False return True
<commit_before><commit_msg>Add py solution for 383. Ransom Note 383. Ransom Note: https://leetcode.com/problems/ransom-note/<commit_after>from collections import Counter class Solution(object): def canConstruct(self, ransomNote, magazine): """ :type ransomNote: str :type magazine: str :rtype: bool """ cr = Counter(ransomNote) cm = Counter(magazine) for c, v in cr.iteritems(): if v > cm[c]: return False return True
5cbd8ce4ef321b3c302ca731999b09abbb99db7f
hub/tests/test_globalsettings.py
hub/tests/test_globalsettings.py
# coding: utf-8 import constance from constance.test import override_config from django.urls import reverse from django.test import TestCase class GlobalSettingsTestCase(TestCase): fixtures = ['test_data'] @override_config(MFA_ENABLED=True) def test_mfa_enabled(self): self.client.login(username='someuser', password='someuser') self.assertTrue(constance.config.MFA_ENABLED) response = self.client.get(reverse('kpi-root')) lines = [line.strip() for line in response.content.decode().split('\n')] self.assertTrue("window.MFAEnabled = true;" in lines) @override_config(MFA_ENABLED=False) def test_mfa_disabled(self): self.client.login(username='someuser', password='someuser') self.assertFalse(constance.config.MFA_ENABLED) response = self.client.get(reverse('kpi-root')) lines = [line.strip() for line in response.content.decode().split('\n')] self.assertTrue("window.MFAEnabled = false;" in lines)
Add test to valide MFA status in JS
Add test to valide MFA status in JS
Python
agpl-3.0
kobotoolbox/kpi,kobotoolbox/kpi,kobotoolbox/kpi,kobotoolbox/kpi,kobotoolbox/kpi
Add test to valide MFA status in JS
# coding: utf-8 import constance from constance.test import override_config from django.urls import reverse from django.test import TestCase class GlobalSettingsTestCase(TestCase): fixtures = ['test_data'] @override_config(MFA_ENABLED=True) def test_mfa_enabled(self): self.client.login(username='someuser', password='someuser') self.assertTrue(constance.config.MFA_ENABLED) response = self.client.get(reverse('kpi-root')) lines = [line.strip() for line in response.content.decode().split('\n')] self.assertTrue("window.MFAEnabled = true;" in lines) @override_config(MFA_ENABLED=False) def test_mfa_disabled(self): self.client.login(username='someuser', password='someuser') self.assertFalse(constance.config.MFA_ENABLED) response = self.client.get(reverse('kpi-root')) lines = [line.strip() for line in response.content.decode().split('\n')] self.assertTrue("window.MFAEnabled = false;" in lines)
<commit_before><commit_msg>Add test to valide MFA status in JS<commit_after>
# coding: utf-8 import constance from constance.test import override_config from django.urls import reverse from django.test import TestCase class GlobalSettingsTestCase(TestCase): fixtures = ['test_data'] @override_config(MFA_ENABLED=True) def test_mfa_enabled(self): self.client.login(username='someuser', password='someuser') self.assertTrue(constance.config.MFA_ENABLED) response = self.client.get(reverse('kpi-root')) lines = [line.strip() for line in response.content.decode().split('\n')] self.assertTrue("window.MFAEnabled = true;" in lines) @override_config(MFA_ENABLED=False) def test_mfa_disabled(self): self.client.login(username='someuser', password='someuser') self.assertFalse(constance.config.MFA_ENABLED) response = self.client.get(reverse('kpi-root')) lines = [line.strip() for line in response.content.decode().split('\n')] self.assertTrue("window.MFAEnabled = false;" in lines)
Add test to valide MFA status in JS# coding: utf-8 import constance from constance.test import override_config from django.urls import reverse from django.test import TestCase class GlobalSettingsTestCase(TestCase): fixtures = ['test_data'] @override_config(MFA_ENABLED=True) def test_mfa_enabled(self): self.client.login(username='someuser', password='someuser') self.assertTrue(constance.config.MFA_ENABLED) response = self.client.get(reverse('kpi-root')) lines = [line.strip() for line in response.content.decode().split('\n')] self.assertTrue("window.MFAEnabled = true;" in lines) @override_config(MFA_ENABLED=False) def test_mfa_disabled(self): self.client.login(username='someuser', password='someuser') self.assertFalse(constance.config.MFA_ENABLED) response = self.client.get(reverse('kpi-root')) lines = [line.strip() for line in response.content.decode().split('\n')] self.assertTrue("window.MFAEnabled = false;" in lines)
<commit_before><commit_msg>Add test to valide MFA status in JS<commit_after># coding: utf-8 import constance from constance.test import override_config from django.urls import reverse from django.test import TestCase class GlobalSettingsTestCase(TestCase): fixtures = ['test_data'] @override_config(MFA_ENABLED=True) def test_mfa_enabled(self): self.client.login(username='someuser', password='someuser') self.assertTrue(constance.config.MFA_ENABLED) response = self.client.get(reverse('kpi-root')) lines = [line.strip() for line in response.content.decode().split('\n')] self.assertTrue("window.MFAEnabled = true;" in lines) @override_config(MFA_ENABLED=False) def test_mfa_disabled(self): self.client.login(username='someuser', password='someuser') self.assertFalse(constance.config.MFA_ENABLED) response = self.client.get(reverse('kpi-root')) lines = [line.strip() for line in response.content.decode().split('\n')] self.assertTrue("window.MFAEnabled = false;" in lines)
8214d8b542e2da890bc6b34372de2016e98e7767
tests/services/shop/order/test_ordered_articles_service.py
tests/services/shop/order/test_ordered_articles_service.py
""" :Copyright: 2006-2017 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ from itertools import count from byceps.services.shop.order import ordered_articles_service from byceps.services.shop.order.models.order import PaymentState from testfixtures.party import create_party from testfixtures.shop_article import create_article from testfixtures.shop_order import create_order, create_order_item from testfixtures.user import create_user_with_detail from tests.base import AbstractAppTestCase class OrderedArticlesServiceTestCase(AbstractAppTestCase): def setUp(self): super().setUp() self.user = self.create_user(1) self.article = self.create_article() def test_count_ordered_articles(self): expected = { PaymentState.open: 12, PaymentState.canceled: 7, PaymentState.paid: 3, } order_number_sequence = count(1) for article_quantity, payment_state in [ (4, PaymentState.open), (1, PaymentState.open), (5, PaymentState.canceled), (3, PaymentState.paid), (2, PaymentState.canceled), (7, PaymentState.open), ]: order_number = 'XY-01-B{:05d}'.format(next(order_number_sequence)) self.create_order(order_number, article_quantity, payment_state) totals = ordered_articles_service.count_ordered_articles(self.article) self.assertDictEqual(totals, expected) # -------------------------------------------------------------------- # # helpers def create_party(self, party_id, title): party = create_party(id=party_id, title=title, brand=self.brand) self.db.session.add(party) self.db.session.commit() return party def create_user(self, number): user = create_user_with_detail(number) self.db.session.add(user) self.db.session.commit() return user def create_article(self): article = create_article(party=self.party) self.db.session.add(article) self.db.session.commit() return article def create_order(self, order_number, article_quantity, payment_state): order = create_order(self.party.id, self.user, order_number=order_number) order.payment_state = payment_state self.db.session.add(order) order_item = create_order_item(order, self.article, article_quantity) self.db.session.add(order_item) self.db.session.commit() return order.to_tuple()
Add test for service function that counts ordered article by payment state
Add test for service function that counts ordered article by payment state
Python
bsd-3-clause
m-ober/byceps,homeworkprod/byceps,m-ober/byceps,m-ober/byceps,homeworkprod/byceps,homeworkprod/byceps
Add test for service function that counts ordered article by payment state
""" :Copyright: 2006-2017 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ from itertools import count from byceps.services.shop.order import ordered_articles_service from byceps.services.shop.order.models.order import PaymentState from testfixtures.party import create_party from testfixtures.shop_article import create_article from testfixtures.shop_order import create_order, create_order_item from testfixtures.user import create_user_with_detail from tests.base import AbstractAppTestCase class OrderedArticlesServiceTestCase(AbstractAppTestCase): def setUp(self): super().setUp() self.user = self.create_user(1) self.article = self.create_article() def test_count_ordered_articles(self): expected = { PaymentState.open: 12, PaymentState.canceled: 7, PaymentState.paid: 3, } order_number_sequence = count(1) for article_quantity, payment_state in [ (4, PaymentState.open), (1, PaymentState.open), (5, PaymentState.canceled), (3, PaymentState.paid), (2, PaymentState.canceled), (7, PaymentState.open), ]: order_number = 'XY-01-B{:05d}'.format(next(order_number_sequence)) self.create_order(order_number, article_quantity, payment_state) totals = ordered_articles_service.count_ordered_articles(self.article) self.assertDictEqual(totals, expected) # -------------------------------------------------------------------- # # helpers def create_party(self, party_id, title): party = create_party(id=party_id, title=title, brand=self.brand) self.db.session.add(party) self.db.session.commit() return party def create_user(self, number): user = create_user_with_detail(number) self.db.session.add(user) self.db.session.commit() return user def create_article(self): article = create_article(party=self.party) self.db.session.add(article) self.db.session.commit() return article def create_order(self, order_number, article_quantity, payment_state): order = create_order(self.party.id, self.user, order_number=order_number) order.payment_state = payment_state self.db.session.add(order) order_item = create_order_item(order, self.article, article_quantity) self.db.session.add(order_item) self.db.session.commit() return order.to_tuple()
<commit_before><commit_msg>Add test for service function that counts ordered article by payment state<commit_after>
""" :Copyright: 2006-2017 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ from itertools import count from byceps.services.shop.order import ordered_articles_service from byceps.services.shop.order.models.order import PaymentState from testfixtures.party import create_party from testfixtures.shop_article import create_article from testfixtures.shop_order import create_order, create_order_item from testfixtures.user import create_user_with_detail from tests.base import AbstractAppTestCase class OrderedArticlesServiceTestCase(AbstractAppTestCase): def setUp(self): super().setUp() self.user = self.create_user(1) self.article = self.create_article() def test_count_ordered_articles(self): expected = { PaymentState.open: 12, PaymentState.canceled: 7, PaymentState.paid: 3, } order_number_sequence = count(1) for article_quantity, payment_state in [ (4, PaymentState.open), (1, PaymentState.open), (5, PaymentState.canceled), (3, PaymentState.paid), (2, PaymentState.canceled), (7, PaymentState.open), ]: order_number = 'XY-01-B{:05d}'.format(next(order_number_sequence)) self.create_order(order_number, article_quantity, payment_state) totals = ordered_articles_service.count_ordered_articles(self.article) self.assertDictEqual(totals, expected) # -------------------------------------------------------------------- # # helpers def create_party(self, party_id, title): party = create_party(id=party_id, title=title, brand=self.brand) self.db.session.add(party) self.db.session.commit() return party def create_user(self, number): user = create_user_with_detail(number) self.db.session.add(user) self.db.session.commit() return user def create_article(self): article = create_article(party=self.party) self.db.session.add(article) self.db.session.commit() return article def create_order(self, order_number, article_quantity, payment_state): order = create_order(self.party.id, self.user, order_number=order_number) order.payment_state = payment_state self.db.session.add(order) order_item = create_order_item(order, self.article, article_quantity) self.db.session.add(order_item) self.db.session.commit() return order.to_tuple()
Add test for service function that counts ordered article by payment state""" :Copyright: 2006-2017 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ from itertools import count from byceps.services.shop.order import ordered_articles_service from byceps.services.shop.order.models.order import PaymentState from testfixtures.party import create_party from testfixtures.shop_article import create_article from testfixtures.shop_order import create_order, create_order_item from testfixtures.user import create_user_with_detail from tests.base import AbstractAppTestCase class OrderedArticlesServiceTestCase(AbstractAppTestCase): def setUp(self): super().setUp() self.user = self.create_user(1) self.article = self.create_article() def test_count_ordered_articles(self): expected = { PaymentState.open: 12, PaymentState.canceled: 7, PaymentState.paid: 3, } order_number_sequence = count(1) for article_quantity, payment_state in [ (4, PaymentState.open), (1, PaymentState.open), (5, PaymentState.canceled), (3, PaymentState.paid), (2, PaymentState.canceled), (7, PaymentState.open), ]: order_number = 'XY-01-B{:05d}'.format(next(order_number_sequence)) self.create_order(order_number, article_quantity, payment_state) totals = ordered_articles_service.count_ordered_articles(self.article) self.assertDictEqual(totals, expected) # -------------------------------------------------------------------- # # helpers def create_party(self, party_id, title): party = create_party(id=party_id, title=title, brand=self.brand) self.db.session.add(party) self.db.session.commit() return party def create_user(self, number): user = create_user_with_detail(number) self.db.session.add(user) self.db.session.commit() return user def create_article(self): article = create_article(party=self.party) self.db.session.add(article) self.db.session.commit() return article def create_order(self, order_number, article_quantity, payment_state): order = create_order(self.party.id, self.user, order_number=order_number) order.payment_state = payment_state self.db.session.add(order) order_item = create_order_item(order, self.article, article_quantity) self.db.session.add(order_item) self.db.session.commit() return order.to_tuple()
<commit_before><commit_msg>Add test for service function that counts ordered article by payment state<commit_after>""" :Copyright: 2006-2017 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ from itertools import count from byceps.services.shop.order import ordered_articles_service from byceps.services.shop.order.models.order import PaymentState from testfixtures.party import create_party from testfixtures.shop_article import create_article from testfixtures.shop_order import create_order, create_order_item from testfixtures.user import create_user_with_detail from tests.base import AbstractAppTestCase class OrderedArticlesServiceTestCase(AbstractAppTestCase): def setUp(self): super().setUp() self.user = self.create_user(1) self.article = self.create_article() def test_count_ordered_articles(self): expected = { PaymentState.open: 12, PaymentState.canceled: 7, PaymentState.paid: 3, } order_number_sequence = count(1) for article_quantity, payment_state in [ (4, PaymentState.open), (1, PaymentState.open), (5, PaymentState.canceled), (3, PaymentState.paid), (2, PaymentState.canceled), (7, PaymentState.open), ]: order_number = 'XY-01-B{:05d}'.format(next(order_number_sequence)) self.create_order(order_number, article_quantity, payment_state) totals = ordered_articles_service.count_ordered_articles(self.article) self.assertDictEqual(totals, expected) # -------------------------------------------------------------------- # # helpers def create_party(self, party_id, title): party = create_party(id=party_id, title=title, brand=self.brand) self.db.session.add(party) self.db.session.commit() return party def create_user(self, number): user = create_user_with_detail(number) self.db.session.add(user) self.db.session.commit() return user def create_article(self): article = create_article(party=self.party) self.db.session.add(article) self.db.session.commit() return article def create_order(self, order_number, article_quantity, payment_state): order = create_order(self.party.id, self.user, order_number=order_number) order.payment_state = payment_state self.db.session.add(order) order_item = create_order_item(order, self.article, article_quantity) self.db.session.add(order_item) self.db.session.commit() return order.to_tuple()
c5faa45462ff217f4ce2ea4004de9b8c29ed3c87
bin/analyze.py
bin/analyze.py
#!/usr/bin/env python3 import math import numpy as np import sqlite3 def read(path='tests/fixtures/google.sqlite3'): connection = sqlite3.connect(path) cursor = connection.cursor() cursor.execute('SELECT time FROM arrivals ORDER BY time') data = np.diff(np.array([row[0] for row in cursor])) connection.close() return data data = read() mean, variance = np.mean(data), np.var(data) print('Samples: %d' % len(data)) print('Mean: %.4f ± %.4f' % (mean, math.sqrt(variance)))
Add an auxiliary Python script
Add an auxiliary Python script
Python
mit
learning-on-chip/google-cluster-prediction
Add an auxiliary Python script
#!/usr/bin/env python3 import math import numpy as np import sqlite3 def read(path='tests/fixtures/google.sqlite3'): connection = sqlite3.connect(path) cursor = connection.cursor() cursor.execute('SELECT time FROM arrivals ORDER BY time') data = np.diff(np.array([row[0] for row in cursor])) connection.close() return data data = read() mean, variance = np.mean(data), np.var(data) print('Samples: %d' % len(data)) print('Mean: %.4f ± %.4f' % (mean, math.sqrt(variance)))
<commit_before><commit_msg>Add an auxiliary Python script<commit_after>
#!/usr/bin/env python3 import math import numpy as np import sqlite3 def read(path='tests/fixtures/google.sqlite3'): connection = sqlite3.connect(path) cursor = connection.cursor() cursor.execute('SELECT time FROM arrivals ORDER BY time') data = np.diff(np.array([row[0] for row in cursor])) connection.close() return data data = read() mean, variance = np.mean(data), np.var(data) print('Samples: %d' % len(data)) print('Mean: %.4f ± %.4f' % (mean, math.sqrt(variance)))
Add an auxiliary Python script#!/usr/bin/env python3 import math import numpy as np import sqlite3 def read(path='tests/fixtures/google.sqlite3'): connection = sqlite3.connect(path) cursor = connection.cursor() cursor.execute('SELECT time FROM arrivals ORDER BY time') data = np.diff(np.array([row[0] for row in cursor])) connection.close() return data data = read() mean, variance = np.mean(data), np.var(data) print('Samples: %d' % len(data)) print('Mean: %.4f ± %.4f' % (mean, math.sqrt(variance)))
<commit_before><commit_msg>Add an auxiliary Python script<commit_after>#!/usr/bin/env python3 import math import numpy as np import sqlite3 def read(path='tests/fixtures/google.sqlite3'): connection = sqlite3.connect(path) cursor = connection.cursor() cursor.execute('SELECT time FROM arrivals ORDER BY time') data = np.diff(np.array([row[0] for row in cursor])) connection.close() return data data = read() mean, variance = np.mean(data), np.var(data) print('Samples: %d' % len(data)) print('Mean: %.4f ± %.4f' % (mean, math.sqrt(variance)))
78fc07d59820db3f9992d0285bf18b27c2078ab7
choosealicense/test/test_info.py
choosealicense/test/test_info.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Tests for the `license info` function """ from click.testing import CliRunner from choosealicense.main import info def test_show_license_info(): all_the_licenses = { 'agpl-3.0': ('GNU Affero General Public License v3.0', 11), 'apache-2.0': ('Apache License 2.0', 10), 'artistic-2.0': ('Artistic License 2.0', 9), 'bsd-2-clause': ('BSD 2-clause "Simplified" License', 7), 'bsd-3-clause': ('BSD 3-clause "New" or "Revised" License', 8), 'cc0-1.0': ('Creative Commons Zero v1.0 Universal', 5), 'epl-1.0': ('Eclipse Public License 1.0', 9), 'gpl-2.0': ('GNU General Public License v2.0', 10), 'gpl-3.0': ('GNU General Public License v3.0', 10), 'isc': ('ISC license', 7), 'lgpl-2.1': ('GNU Lesser General Public License v2.1', 10), 'lgpl-3.0': ('GNU Lesser General Public License v3.0', 10), 'mit': ('MIT License', 7), 'mpl-2.0': ('Mozilla Public License 2.0', 10), 'unlicense': ('The Unlicense', 6) } runner = CliRunner() for short_name, fullname_and_rules_number in all_the_licenses.items(): result = runner.invoke(info, [short_name]) output, exit_code = result.output, result.exit_code rules = output.split('Forbidden\n')[1].split('\n') flat_rules = sum([item.split() for item in rules], []) fullname, rules_number = fullname_and_rules_number assert exit_code == 0 assert fullname in output assert '{0:<25}{1:<25}{2}'.format('Required', 'Permitted', 'Forbidden') in output assert len(flat_rules) == rules_number
Add test for `license info` function
Add test for `license info` function
Python
mit
lord63/choosealicense-cli
Add test for `license info` function
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Tests for the `license info` function """ from click.testing import CliRunner from choosealicense.main import info def test_show_license_info(): all_the_licenses = { 'agpl-3.0': ('GNU Affero General Public License v3.0', 11), 'apache-2.0': ('Apache License 2.0', 10), 'artistic-2.0': ('Artistic License 2.0', 9), 'bsd-2-clause': ('BSD 2-clause "Simplified" License', 7), 'bsd-3-clause': ('BSD 3-clause "New" or "Revised" License', 8), 'cc0-1.0': ('Creative Commons Zero v1.0 Universal', 5), 'epl-1.0': ('Eclipse Public License 1.0', 9), 'gpl-2.0': ('GNU General Public License v2.0', 10), 'gpl-3.0': ('GNU General Public License v3.0', 10), 'isc': ('ISC license', 7), 'lgpl-2.1': ('GNU Lesser General Public License v2.1', 10), 'lgpl-3.0': ('GNU Lesser General Public License v3.0', 10), 'mit': ('MIT License', 7), 'mpl-2.0': ('Mozilla Public License 2.0', 10), 'unlicense': ('The Unlicense', 6) } runner = CliRunner() for short_name, fullname_and_rules_number in all_the_licenses.items(): result = runner.invoke(info, [short_name]) output, exit_code = result.output, result.exit_code rules = output.split('Forbidden\n')[1].split('\n') flat_rules = sum([item.split() for item in rules], []) fullname, rules_number = fullname_and_rules_number assert exit_code == 0 assert fullname in output assert '{0:<25}{1:<25}{2}'.format('Required', 'Permitted', 'Forbidden') in output assert len(flat_rules) == rules_number
<commit_before><commit_msg>Add test for `license info` function<commit_after>
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Tests for the `license info` function """ from click.testing import CliRunner from choosealicense.main import info def test_show_license_info(): all_the_licenses = { 'agpl-3.0': ('GNU Affero General Public License v3.0', 11), 'apache-2.0': ('Apache License 2.0', 10), 'artistic-2.0': ('Artistic License 2.0', 9), 'bsd-2-clause': ('BSD 2-clause "Simplified" License', 7), 'bsd-3-clause': ('BSD 3-clause "New" or "Revised" License', 8), 'cc0-1.0': ('Creative Commons Zero v1.0 Universal', 5), 'epl-1.0': ('Eclipse Public License 1.0', 9), 'gpl-2.0': ('GNU General Public License v2.0', 10), 'gpl-3.0': ('GNU General Public License v3.0', 10), 'isc': ('ISC license', 7), 'lgpl-2.1': ('GNU Lesser General Public License v2.1', 10), 'lgpl-3.0': ('GNU Lesser General Public License v3.0', 10), 'mit': ('MIT License', 7), 'mpl-2.0': ('Mozilla Public License 2.0', 10), 'unlicense': ('The Unlicense', 6) } runner = CliRunner() for short_name, fullname_and_rules_number in all_the_licenses.items(): result = runner.invoke(info, [short_name]) output, exit_code = result.output, result.exit_code rules = output.split('Forbidden\n')[1].split('\n') flat_rules = sum([item.split() for item in rules], []) fullname, rules_number = fullname_and_rules_number assert exit_code == 0 assert fullname in output assert '{0:<25}{1:<25}{2}'.format('Required', 'Permitted', 'Forbidden') in output assert len(flat_rules) == rules_number
Add test for `license info` function#!/usr/bin/env python # -*- coding: utf-8 -*- """ Tests for the `license info` function """ from click.testing import CliRunner from choosealicense.main import info def test_show_license_info(): all_the_licenses = { 'agpl-3.0': ('GNU Affero General Public License v3.0', 11), 'apache-2.0': ('Apache License 2.0', 10), 'artistic-2.0': ('Artistic License 2.0', 9), 'bsd-2-clause': ('BSD 2-clause "Simplified" License', 7), 'bsd-3-clause': ('BSD 3-clause "New" or "Revised" License', 8), 'cc0-1.0': ('Creative Commons Zero v1.0 Universal', 5), 'epl-1.0': ('Eclipse Public License 1.0', 9), 'gpl-2.0': ('GNU General Public License v2.0', 10), 'gpl-3.0': ('GNU General Public License v3.0', 10), 'isc': ('ISC license', 7), 'lgpl-2.1': ('GNU Lesser General Public License v2.1', 10), 'lgpl-3.0': ('GNU Lesser General Public License v3.0', 10), 'mit': ('MIT License', 7), 'mpl-2.0': ('Mozilla Public License 2.0', 10), 'unlicense': ('The Unlicense', 6) } runner = CliRunner() for short_name, fullname_and_rules_number in all_the_licenses.items(): result = runner.invoke(info, [short_name]) output, exit_code = result.output, result.exit_code rules = output.split('Forbidden\n')[1].split('\n') flat_rules = sum([item.split() for item in rules], []) fullname, rules_number = fullname_and_rules_number assert exit_code == 0 assert fullname in output assert '{0:<25}{1:<25}{2}'.format('Required', 'Permitted', 'Forbidden') in output assert len(flat_rules) == rules_number
<commit_before><commit_msg>Add test for `license info` function<commit_after>#!/usr/bin/env python # -*- coding: utf-8 -*- """ Tests for the `license info` function """ from click.testing import CliRunner from choosealicense.main import info def test_show_license_info(): all_the_licenses = { 'agpl-3.0': ('GNU Affero General Public License v3.0', 11), 'apache-2.0': ('Apache License 2.0', 10), 'artistic-2.0': ('Artistic License 2.0', 9), 'bsd-2-clause': ('BSD 2-clause "Simplified" License', 7), 'bsd-3-clause': ('BSD 3-clause "New" or "Revised" License', 8), 'cc0-1.0': ('Creative Commons Zero v1.0 Universal', 5), 'epl-1.0': ('Eclipse Public License 1.0', 9), 'gpl-2.0': ('GNU General Public License v2.0', 10), 'gpl-3.0': ('GNU General Public License v3.0', 10), 'isc': ('ISC license', 7), 'lgpl-2.1': ('GNU Lesser General Public License v2.1', 10), 'lgpl-3.0': ('GNU Lesser General Public License v3.0', 10), 'mit': ('MIT License', 7), 'mpl-2.0': ('Mozilla Public License 2.0', 10), 'unlicense': ('The Unlicense', 6) } runner = CliRunner() for short_name, fullname_and_rules_number in all_the_licenses.items(): result = runner.invoke(info, [short_name]) output, exit_code = result.output, result.exit_code rules = output.split('Forbidden\n')[1].split('\n') flat_rules = sum([item.split() for item in rules], []) fullname, rules_number = fullname_and_rules_number assert exit_code == 0 assert fullname in output assert '{0:<25}{1:<25}{2}'.format('Required', 'Permitted', 'Forbidden') in output assert len(flat_rules) == rules_number
6ba394fba3786ecac9eec8896b8fa40e3ddc0612
bmi_ilamb/tests/test_package_directories.py
bmi_ilamb/tests/test_package_directories.py
"""Tests directories set in the package definition file.""" import os from nose.tools import assert_true from .. import package_dir, data_dir def test_package_dir_is_set(): assert(package_dir is not None) def test_data_dir_is_set(): assert(data_dir is not None) def test_package_dir_exists(): assert_true(os.path.isdir(package_dir)) def test_data_dir_exists(): assert_true(os.path.isdir(data_dir))
Add unit tests for package directories
Add unit tests for package directories
Python
mit
permamodel/bmi-ilamb
Add unit tests for package directories
"""Tests directories set in the package definition file.""" import os from nose.tools import assert_true from .. import package_dir, data_dir def test_package_dir_is_set(): assert(package_dir is not None) def test_data_dir_is_set(): assert(data_dir is not None) def test_package_dir_exists(): assert_true(os.path.isdir(package_dir)) def test_data_dir_exists(): assert_true(os.path.isdir(data_dir))
<commit_before><commit_msg>Add unit tests for package directories<commit_after>
"""Tests directories set in the package definition file.""" import os from nose.tools import assert_true from .. import package_dir, data_dir def test_package_dir_is_set(): assert(package_dir is not None) def test_data_dir_is_set(): assert(data_dir is not None) def test_package_dir_exists(): assert_true(os.path.isdir(package_dir)) def test_data_dir_exists(): assert_true(os.path.isdir(data_dir))
Add unit tests for package directories"""Tests directories set in the package definition file.""" import os from nose.tools import assert_true from .. import package_dir, data_dir def test_package_dir_is_set(): assert(package_dir is not None) def test_data_dir_is_set(): assert(data_dir is not None) def test_package_dir_exists(): assert_true(os.path.isdir(package_dir)) def test_data_dir_exists(): assert_true(os.path.isdir(data_dir))
<commit_before><commit_msg>Add unit tests for package directories<commit_after>"""Tests directories set in the package definition file.""" import os from nose.tools import assert_true from .. import package_dir, data_dir def test_package_dir_is_set(): assert(package_dir is not None) def test_data_dir_is_set(): assert(data_dir is not None) def test_package_dir_exists(): assert_true(os.path.isdir(package_dir)) def test_data_dir_exists(): assert_true(os.path.isdir(data_dir))
8268b050ae98180a55b9c2e5285de1af5b8ca3e5
pages/tests.py
pages/tests.py
from django.test import TestCase from pages.models import * from django.test.client import Client class PagesTestCase(TestCase): fixtures = ['tests.json'] def test_01_add_page(self): """ Test that the add admin page could be displayed correctly """ c = Client() c.login(username= 'batiste', password='b') response = c.get('/admin/pages/page/add/') assert(response.status_code == 200)
from django.test import TestCase from pages.models import * from django.test.client import Client class PagesTestCase(TestCase): fixtures = ['tests.json'] def test_01_add_page(self): """ Test that the add admin page could be displayed via the admin """ c = Client() c.login(username= 'batiste', password='b') response = c.get('/admin/pages/page/add/') assert(response.status_code == 200) def test_02_create_page(self): """ Test that a page can be created via the admin """ c = Client() c.login(username= 'batiste', password='b') page_data = {'title':'test page', 'slug':'test-page', 'language':'en', 'sites':[1], 'status':1} response = c.post('/admin/pages/page/add/', page_data) self.assertRedirects(response, '/admin/pages/page/')
Add a create page test
Add a create page test
Python
bsd-3-clause
PiRSquared17/django-page-cms,google-code-export/django-page-cms,google-code-export/django-page-cms,odyaka341/django-page-cms,PiRSquared17/django-page-cms,odyaka341/django-page-cms,odyaka341/django-page-cms,google-code-export/django-page-cms,Alwnikrotikz/django-page-cms,odyaka341/django-page-cms,PiRSquared17/django-page-cms,Alwnikrotikz/django-page-cms,pombreda/django-page-cms,Alwnikrotikz/django-page-cms,PiRSquared17/django-page-cms,google-code-export/django-page-cms,pombreda/django-page-cms,Alwnikrotikz/django-page-cms,pombreda/django-page-cms,pombreda/django-page-cms
from django.test import TestCase from pages.models import * from django.test.client import Client class PagesTestCase(TestCase): fixtures = ['tests.json'] def test_01_add_page(self): """ Test that the add admin page could be displayed correctly """ c = Client() c.login(username= 'batiste', password='b') response = c.get('/admin/pages/page/add/') assert(response.status_code == 200) Add a create page test
from django.test import TestCase from pages.models import * from django.test.client import Client class PagesTestCase(TestCase): fixtures = ['tests.json'] def test_01_add_page(self): """ Test that the add admin page could be displayed via the admin """ c = Client() c.login(username= 'batiste', password='b') response = c.get('/admin/pages/page/add/') assert(response.status_code == 200) def test_02_create_page(self): """ Test that a page can be created via the admin """ c = Client() c.login(username= 'batiste', password='b') page_data = {'title':'test page', 'slug':'test-page', 'language':'en', 'sites':[1], 'status':1} response = c.post('/admin/pages/page/add/', page_data) self.assertRedirects(response, '/admin/pages/page/')
<commit_before>from django.test import TestCase from pages.models import * from django.test.client import Client class PagesTestCase(TestCase): fixtures = ['tests.json'] def test_01_add_page(self): """ Test that the add admin page could be displayed correctly """ c = Client() c.login(username= 'batiste', password='b') response = c.get('/admin/pages/page/add/') assert(response.status_code == 200) <commit_msg>Add a create page test<commit_after>
from django.test import TestCase from pages.models import * from django.test.client import Client class PagesTestCase(TestCase): fixtures = ['tests.json'] def test_01_add_page(self): """ Test that the add admin page could be displayed via the admin """ c = Client() c.login(username= 'batiste', password='b') response = c.get('/admin/pages/page/add/') assert(response.status_code == 200) def test_02_create_page(self): """ Test that a page can be created via the admin """ c = Client() c.login(username= 'batiste', password='b') page_data = {'title':'test page', 'slug':'test-page', 'language':'en', 'sites':[1], 'status':1} response = c.post('/admin/pages/page/add/', page_data) self.assertRedirects(response, '/admin/pages/page/')
from django.test import TestCase from pages.models import * from django.test.client import Client class PagesTestCase(TestCase): fixtures = ['tests.json'] def test_01_add_page(self): """ Test that the add admin page could be displayed correctly """ c = Client() c.login(username= 'batiste', password='b') response = c.get('/admin/pages/page/add/') assert(response.status_code == 200) Add a create page testfrom django.test import TestCase from pages.models import * from django.test.client import Client class PagesTestCase(TestCase): fixtures = ['tests.json'] def test_01_add_page(self): """ Test that the add admin page could be displayed via the admin """ c = Client() c.login(username= 'batiste', password='b') response = c.get('/admin/pages/page/add/') assert(response.status_code == 200) def test_02_create_page(self): """ Test that a page can be created via the admin """ c = Client() c.login(username= 'batiste', password='b') page_data = {'title':'test page', 'slug':'test-page', 'language':'en', 'sites':[1], 'status':1} response = c.post('/admin/pages/page/add/', page_data) self.assertRedirects(response, '/admin/pages/page/')
<commit_before>from django.test import TestCase from pages.models import * from django.test.client import Client class PagesTestCase(TestCase): fixtures = ['tests.json'] def test_01_add_page(self): """ Test that the add admin page could be displayed correctly """ c = Client() c.login(username= 'batiste', password='b') response = c.get('/admin/pages/page/add/') assert(response.status_code == 200) <commit_msg>Add a create page test<commit_after>from django.test import TestCase from pages.models import * from django.test.client import Client class PagesTestCase(TestCase): fixtures = ['tests.json'] def test_01_add_page(self): """ Test that the add admin page could be displayed via the admin """ c = Client() c.login(username= 'batiste', password='b') response = c.get('/admin/pages/page/add/') assert(response.status_code == 200) def test_02_create_page(self): """ Test that a page can be created via the admin """ c = Client() c.login(username= 'batiste', password='b') page_data = {'title':'test page', 'slug':'test-page', 'language':'en', 'sites':[1], 'status':1} response = c.post('/admin/pages/page/add/', page_data) self.assertRedirects(response, '/admin/pages/page/')
eb1cd979ed232690f819def5808a91431132079c
IPython/utils/py3compat.py
IPython/utils/py3compat.py
import sys def no_code(x, encoding=None): return x def decode(s, encoding=None): encoding = encoding or sys.stdin.encoding or sys.getdefaultencoding() return s.decode(encoding, "replace") def encode(u, encoding=None): encoding = encoding or sys.stdin.encoding or sys.getdefaultencoding() return u.encode(encoding, "replace") if sys.version_info[0] >= 3: PY3 = True input = input builtin_mod_name = "builtins" str_to_unicode = no_code unicode_to_str = no_code str_to_bytes = encode bytes_to_str = decode else: PY3 = False input = raw_input builtin_mod_name = "__builtin__" str_to_unicode = decode unicode_to_str = encode str_to_bytes = no_code bytes_to_str = no_code def execfile(fname, glob, loc): exec compile(open(fname).read(), fname, 'exec') in glob, loc
Add module for Python 3 compatibility layer.
Add module for Python 3 compatibility layer.
Python
bsd-3-clause
ipython/ipython,ipython/ipython
Add module for Python 3 compatibility layer.
import sys def no_code(x, encoding=None): return x def decode(s, encoding=None): encoding = encoding or sys.stdin.encoding or sys.getdefaultencoding() return s.decode(encoding, "replace") def encode(u, encoding=None): encoding = encoding or sys.stdin.encoding or sys.getdefaultencoding() return u.encode(encoding, "replace") if sys.version_info[0] >= 3: PY3 = True input = input builtin_mod_name = "builtins" str_to_unicode = no_code unicode_to_str = no_code str_to_bytes = encode bytes_to_str = decode else: PY3 = False input = raw_input builtin_mod_name = "__builtin__" str_to_unicode = decode unicode_to_str = encode str_to_bytes = no_code bytes_to_str = no_code def execfile(fname, glob, loc): exec compile(open(fname).read(), fname, 'exec') in glob, loc
<commit_before><commit_msg>Add module for Python 3 compatibility layer.<commit_after>
import sys def no_code(x, encoding=None): return x def decode(s, encoding=None): encoding = encoding or sys.stdin.encoding or sys.getdefaultencoding() return s.decode(encoding, "replace") def encode(u, encoding=None): encoding = encoding or sys.stdin.encoding or sys.getdefaultencoding() return u.encode(encoding, "replace") if sys.version_info[0] >= 3: PY3 = True input = input builtin_mod_name = "builtins" str_to_unicode = no_code unicode_to_str = no_code str_to_bytes = encode bytes_to_str = decode else: PY3 = False input = raw_input builtin_mod_name = "__builtin__" str_to_unicode = decode unicode_to_str = encode str_to_bytes = no_code bytes_to_str = no_code def execfile(fname, glob, loc): exec compile(open(fname).read(), fname, 'exec') in glob, loc
Add module for Python 3 compatibility layer.import sys def no_code(x, encoding=None): return x def decode(s, encoding=None): encoding = encoding or sys.stdin.encoding or sys.getdefaultencoding() return s.decode(encoding, "replace") def encode(u, encoding=None): encoding = encoding or sys.stdin.encoding or sys.getdefaultencoding() return u.encode(encoding, "replace") if sys.version_info[0] >= 3: PY3 = True input = input builtin_mod_name = "builtins" str_to_unicode = no_code unicode_to_str = no_code str_to_bytes = encode bytes_to_str = decode else: PY3 = False input = raw_input builtin_mod_name = "__builtin__" str_to_unicode = decode unicode_to_str = encode str_to_bytes = no_code bytes_to_str = no_code def execfile(fname, glob, loc): exec compile(open(fname).read(), fname, 'exec') in glob, loc
<commit_before><commit_msg>Add module for Python 3 compatibility layer.<commit_after>import sys def no_code(x, encoding=None): return x def decode(s, encoding=None): encoding = encoding or sys.stdin.encoding or sys.getdefaultencoding() return s.decode(encoding, "replace") def encode(u, encoding=None): encoding = encoding or sys.stdin.encoding or sys.getdefaultencoding() return u.encode(encoding, "replace") if sys.version_info[0] >= 3: PY3 = True input = input builtin_mod_name = "builtins" str_to_unicode = no_code unicode_to_str = no_code str_to_bytes = encode bytes_to_str = decode else: PY3 = False input = raw_input builtin_mod_name = "__builtin__" str_to_unicode = decode unicode_to_str = encode str_to_bytes = no_code bytes_to_str = no_code def execfile(fname, glob, loc): exec compile(open(fname).read(), fname, 'exec') in glob, loc
720e398ac6d21c2e394dda08d47d5110b3a38c40
src/config/settings/secrets_sample.py
src/config/settings/secrets_sample.py
# SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': '', 'HOST': '127.0.0.1', 'PORT': 5432, 'USER': '', 'PASSWORD': '' } }
Add sample file with secrets (db credentials and secret_key).
Add sample file with secrets (db credentials and secret_key).
Python
mit
Alexx-G/django-project-template,Alexx-G/django-project-template
Add sample file with secrets (db credentials and secret_key).
# SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': '', 'HOST': '127.0.0.1', 'PORT': 5432, 'USER': '', 'PASSWORD': '' } }
<commit_before><commit_msg>Add sample file with secrets (db credentials and secret_key).<commit_after>
# SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': '', 'HOST': '127.0.0.1', 'PORT': 5432, 'USER': '', 'PASSWORD': '' } }
Add sample file with secrets (db credentials and secret_key).# SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': '', 'HOST': '127.0.0.1', 'PORT': 5432, 'USER': '', 'PASSWORD': '' } }
<commit_before><commit_msg>Add sample file with secrets (db credentials and secret_key).<commit_after># SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': '', 'HOST': '127.0.0.1', 'PORT': 5432, 'USER': '', 'PASSWORD': '' } }
8908727e9c288c607572e7352abb6016d2f5a640
kboard/board/migrations/0022_auto_20161111_1044.py
kboard/board/migrations/0022_auto_20161111_1044.py
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2016-11-11 01:44 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('board', '0021_auto_20161110_2019'), ] operations = [ migrations.AlterField( model_name='post', name='file', field=models.FileField(blank=True, null=True, upload_to=''), ), ]
Add migration that add blank on file field
Add migration that add blank on file field
Python
mit
kboard/kboard,kboard/kboard,guswnsxodlf/k-board,darjeeling/k-board,kboard/kboard,guswnsxodlf/k-board,cjh5414/kboard,cjh5414/kboard,hyesun03/k-board,cjh5414/kboard,hyesun03/k-board,guswnsxodlf/k-board,hyesun03/k-board
Add migration that add blank on file field
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2016-11-11 01:44 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('board', '0021_auto_20161110_2019'), ] operations = [ migrations.AlterField( model_name='post', name='file', field=models.FileField(blank=True, null=True, upload_to=''), ), ]
<commit_before><commit_msg>Add migration that add blank on file field<commit_after>
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2016-11-11 01:44 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('board', '0021_auto_20161110_2019'), ] operations = [ migrations.AlterField( model_name='post', name='file', field=models.FileField(blank=True, null=True, upload_to=''), ), ]
Add migration that add blank on file field# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2016-11-11 01:44 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('board', '0021_auto_20161110_2019'), ] operations = [ migrations.AlterField( model_name='post', name='file', field=models.FileField(blank=True, null=True, upload_to=''), ), ]
<commit_before><commit_msg>Add migration that add blank on file field<commit_after># -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2016-11-11 01:44 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('board', '0021_auto_20161110_2019'), ] operations = [ migrations.AlterField( model_name='post', name='file', field=models.FileField(blank=True, null=True, upload_to=''), ), ]
4df8d31e638bfbab5893ded2da9350fdb9a630ba
src/main/python/scripts/train_test.py
src/main/python/scripts/train_test.py
# Copyright 2017 Rice University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function # Use this (interactive) script to split a data file into training and testing data. import json import random import argparse class message: def __init__(self, s): self.str = s def __enter__(self): print(self.str + '...', end='', flush=True) def __exit__(self, exc_type, exc_val, exc_tb): print('done') def split(clargs): with message('Loading data. This might take a while'), open(clargs.input_file[0]) as f: js = json.load(f) programs = js['programs'] total = len(programs) print('There are {} programs in total'.format(total)) randomize = input('Randomize them (y/n)? ') if randomize == 'y': with message('Randomizing'): random.shuffle(programs) n = int(input('How many programs in training data (rest will be in testing)? ')) training = programs[:n] testing = programs[n:] with message('Dumping training data into DATA-training.json'), open('DATA-training.json', 'w') as f: json.dump({'programs': training}, fp=f, indent=2) with message('Dumping testing data into DATA-testing.json'), open('DATA-testing.json', 'w') as f: json.dump({'programs': testing}, fp=f, indent=2) if __name__ == '__main__': parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('input_file', type=str, nargs=1, help='input JSON file') clargs = parser.parse_args() split(clargs)
Add script to split data into training and testing
Add script to split data into training and testing
Python
apache-2.0
capergroup/bayou,capergroup/bayou,capergroup/bayou
Add script to split data into training and testing
# Copyright 2017 Rice University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function # Use this (interactive) script to split a data file into training and testing data. import json import random import argparse class message: def __init__(self, s): self.str = s def __enter__(self): print(self.str + '...', end='', flush=True) def __exit__(self, exc_type, exc_val, exc_tb): print('done') def split(clargs): with message('Loading data. This might take a while'), open(clargs.input_file[0]) as f: js = json.load(f) programs = js['programs'] total = len(programs) print('There are {} programs in total'.format(total)) randomize = input('Randomize them (y/n)? ') if randomize == 'y': with message('Randomizing'): random.shuffle(programs) n = int(input('How many programs in training data (rest will be in testing)? ')) training = programs[:n] testing = programs[n:] with message('Dumping training data into DATA-training.json'), open('DATA-training.json', 'w') as f: json.dump({'programs': training}, fp=f, indent=2) with message('Dumping testing data into DATA-testing.json'), open('DATA-testing.json', 'w') as f: json.dump({'programs': testing}, fp=f, indent=2) if __name__ == '__main__': parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('input_file', type=str, nargs=1, help='input JSON file') clargs = parser.parse_args() split(clargs)
<commit_before><commit_msg>Add script to split data into training and testing<commit_after>
# Copyright 2017 Rice University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function # Use this (interactive) script to split a data file into training and testing data. import json import random import argparse class message: def __init__(self, s): self.str = s def __enter__(self): print(self.str + '...', end='', flush=True) def __exit__(self, exc_type, exc_val, exc_tb): print('done') def split(clargs): with message('Loading data. This might take a while'), open(clargs.input_file[0]) as f: js = json.load(f) programs = js['programs'] total = len(programs) print('There are {} programs in total'.format(total)) randomize = input('Randomize them (y/n)? ') if randomize == 'y': with message('Randomizing'): random.shuffle(programs) n = int(input('How many programs in training data (rest will be in testing)? ')) training = programs[:n] testing = programs[n:] with message('Dumping training data into DATA-training.json'), open('DATA-training.json', 'w') as f: json.dump({'programs': training}, fp=f, indent=2) with message('Dumping testing data into DATA-testing.json'), open('DATA-testing.json', 'w') as f: json.dump({'programs': testing}, fp=f, indent=2) if __name__ == '__main__': parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('input_file', type=str, nargs=1, help='input JSON file') clargs = parser.parse_args() split(clargs)
Add script to split data into training and testing# Copyright 2017 Rice University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function # Use this (interactive) script to split a data file into training and testing data. import json import random import argparse class message: def __init__(self, s): self.str = s def __enter__(self): print(self.str + '...', end='', flush=True) def __exit__(self, exc_type, exc_val, exc_tb): print('done') def split(clargs): with message('Loading data. This might take a while'), open(clargs.input_file[0]) as f: js = json.load(f) programs = js['programs'] total = len(programs) print('There are {} programs in total'.format(total)) randomize = input('Randomize them (y/n)? ') if randomize == 'y': with message('Randomizing'): random.shuffle(programs) n = int(input('How many programs in training data (rest will be in testing)? ')) training = programs[:n] testing = programs[n:] with message('Dumping training data into DATA-training.json'), open('DATA-training.json', 'w') as f: json.dump({'programs': training}, fp=f, indent=2) with message('Dumping testing data into DATA-testing.json'), open('DATA-testing.json', 'w') as f: json.dump({'programs': testing}, fp=f, indent=2) if __name__ == '__main__': parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('input_file', type=str, nargs=1, help='input JSON file') clargs = parser.parse_args() split(clargs)
<commit_before><commit_msg>Add script to split data into training and testing<commit_after># Copyright 2017 Rice University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function # Use this (interactive) script to split a data file into training and testing data. import json import random import argparse class message: def __init__(self, s): self.str = s def __enter__(self): print(self.str + '...', end='', flush=True) def __exit__(self, exc_type, exc_val, exc_tb): print('done') def split(clargs): with message('Loading data. This might take a while'), open(clargs.input_file[0]) as f: js = json.load(f) programs = js['programs'] total = len(programs) print('There are {} programs in total'.format(total)) randomize = input('Randomize them (y/n)? ') if randomize == 'y': with message('Randomizing'): random.shuffle(programs) n = int(input('How many programs in training data (rest will be in testing)? ')) training = programs[:n] testing = programs[n:] with message('Dumping training data into DATA-training.json'), open('DATA-training.json', 'w') as f: json.dump({'programs': training}, fp=f, indent=2) with message('Dumping testing data into DATA-testing.json'), open('DATA-testing.json', 'w') as f: json.dump({'programs': testing}, fp=f, indent=2) if __name__ == '__main__': parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('input_file', type=str, nargs=1, help='input JSON file') clargs = parser.parse_args() split(clargs)
aaae5af0570ded8cab9cb330c9c992dac62778c3
vcapp/geometry.py
vcapp/geometry.py
import math def line_magnitude (x1, y1, x2, y2): return math.sqrt(math.pow((x2 - x1), 2) + math.pow((y2 - y1), 2)) #Calc minimum distance from a point and a line segment (i.e. consecutive vertices in a polyline). def distance_point_line (px, py, x1, y1, x2, y2): #http://local.wasp.uwa.edu.au/~pbourke/geometry/pointline/source.vba LineMag = line_magnitude(x1, y1, x2, y2) if LineMag < 0.00000001: dpl = 9999 return dpl u1 = (((px - x1) * (x2 - x1)) + ((py - y1) * (y2 - y1))) u = u1 / (LineMag * LineMag) if (u < 0.00001) or (u > 1): #// closest point does not fall within the line segment, take the shorter distance #// to an endpoint ix = line_magnitude(px, py, x1, y1) iy = line_magnitude(px, py, x2, y2) if ix > iy: dpl = iy else: dpl = ix else: # Intersecting point is on the line, use the formula ix = x1 + u * (x2 - x1) iy = y1 + u * (y2 - y1) dpl = line_magnitude(px, py, ix, iy) return dpl
Move geometric functions into a separate file
Move geometric functions into a separate file
Python
cc0-1.0
edwinsteele/visual-commute,edwinsteele/visual-commute
Move geometric functions into a separate file
import math def line_magnitude (x1, y1, x2, y2): return math.sqrt(math.pow((x2 - x1), 2) + math.pow((y2 - y1), 2)) #Calc minimum distance from a point and a line segment (i.e. consecutive vertices in a polyline). def distance_point_line (px, py, x1, y1, x2, y2): #http://local.wasp.uwa.edu.au/~pbourke/geometry/pointline/source.vba LineMag = line_magnitude(x1, y1, x2, y2) if LineMag < 0.00000001: dpl = 9999 return dpl u1 = (((px - x1) * (x2 - x1)) + ((py - y1) * (y2 - y1))) u = u1 / (LineMag * LineMag) if (u < 0.00001) or (u > 1): #// closest point does not fall within the line segment, take the shorter distance #// to an endpoint ix = line_magnitude(px, py, x1, y1) iy = line_magnitude(px, py, x2, y2) if ix > iy: dpl = iy else: dpl = ix else: # Intersecting point is on the line, use the formula ix = x1 + u * (x2 - x1) iy = y1 + u * (y2 - y1) dpl = line_magnitude(px, py, ix, iy) return dpl
<commit_before><commit_msg>Move geometric functions into a separate file<commit_after>
import math def line_magnitude (x1, y1, x2, y2): return math.sqrt(math.pow((x2 - x1), 2) + math.pow((y2 - y1), 2)) #Calc minimum distance from a point and a line segment (i.e. consecutive vertices in a polyline). def distance_point_line (px, py, x1, y1, x2, y2): #http://local.wasp.uwa.edu.au/~pbourke/geometry/pointline/source.vba LineMag = line_magnitude(x1, y1, x2, y2) if LineMag < 0.00000001: dpl = 9999 return dpl u1 = (((px - x1) * (x2 - x1)) + ((py - y1) * (y2 - y1))) u = u1 / (LineMag * LineMag) if (u < 0.00001) or (u > 1): #// closest point does not fall within the line segment, take the shorter distance #// to an endpoint ix = line_magnitude(px, py, x1, y1) iy = line_magnitude(px, py, x2, y2) if ix > iy: dpl = iy else: dpl = ix else: # Intersecting point is on the line, use the formula ix = x1 + u * (x2 - x1) iy = y1 + u * (y2 - y1) dpl = line_magnitude(px, py, ix, iy) return dpl
Move geometric functions into a separate fileimport math def line_magnitude (x1, y1, x2, y2): return math.sqrt(math.pow((x2 - x1), 2) + math.pow((y2 - y1), 2)) #Calc minimum distance from a point and a line segment (i.e. consecutive vertices in a polyline). def distance_point_line (px, py, x1, y1, x2, y2): #http://local.wasp.uwa.edu.au/~pbourke/geometry/pointline/source.vba LineMag = line_magnitude(x1, y1, x2, y2) if LineMag < 0.00000001: dpl = 9999 return dpl u1 = (((px - x1) * (x2 - x1)) + ((py - y1) * (y2 - y1))) u = u1 / (LineMag * LineMag) if (u < 0.00001) or (u > 1): #// closest point does not fall within the line segment, take the shorter distance #// to an endpoint ix = line_magnitude(px, py, x1, y1) iy = line_magnitude(px, py, x2, y2) if ix > iy: dpl = iy else: dpl = ix else: # Intersecting point is on the line, use the formula ix = x1 + u * (x2 - x1) iy = y1 + u * (y2 - y1) dpl = line_magnitude(px, py, ix, iy) return dpl
<commit_before><commit_msg>Move geometric functions into a separate file<commit_after>import math def line_magnitude (x1, y1, x2, y2): return math.sqrt(math.pow((x2 - x1), 2) + math.pow((y2 - y1), 2)) #Calc minimum distance from a point and a line segment (i.e. consecutive vertices in a polyline). def distance_point_line (px, py, x1, y1, x2, y2): #http://local.wasp.uwa.edu.au/~pbourke/geometry/pointline/source.vba LineMag = line_magnitude(x1, y1, x2, y2) if LineMag < 0.00000001: dpl = 9999 return dpl u1 = (((px - x1) * (x2 - x1)) + ((py - y1) * (y2 - y1))) u = u1 / (LineMag * LineMag) if (u < 0.00001) or (u > 1): #// closest point does not fall within the line segment, take the shorter distance #// to an endpoint ix = line_magnitude(px, py, x1, y1) iy = line_magnitude(px, py, x2, y2) if ix > iy: dpl = iy else: dpl = ix else: # Intersecting point is on the line, use the formula ix = x1 + u * (x2 - x1) iy = y1 + u * (y2 - y1) dpl = line_magnitude(px, py, ix, iy) return dpl
56455e27cd210f98c2cbad924bc7fdf2fc55eff0
girder_worker/docker/__init__.py
girder_worker/docker/__init__.py
from girder_worker import GirderWorkerPluginABC class DockerPlugin(GirderWorkerPluginABC): def __init__(self, app, *args, **kwargs): self.app = app def task_imports(self): return ['girder_worker.docker.tasks']
Add plugin for docker tasks
Add plugin for docker tasks
Python
apache-2.0
girder/girder_worker,girder/girder_worker,girder/girder_worker
Add plugin for docker tasks
from girder_worker import GirderWorkerPluginABC class DockerPlugin(GirderWorkerPluginABC): def __init__(self, app, *args, **kwargs): self.app = app def task_imports(self): return ['girder_worker.docker.tasks']
<commit_before><commit_msg>Add plugin for docker tasks<commit_after>
from girder_worker import GirderWorkerPluginABC class DockerPlugin(GirderWorkerPluginABC): def __init__(self, app, *args, **kwargs): self.app = app def task_imports(self): return ['girder_worker.docker.tasks']
Add plugin for docker tasksfrom girder_worker import GirderWorkerPluginABC class DockerPlugin(GirderWorkerPluginABC): def __init__(self, app, *args, **kwargs): self.app = app def task_imports(self): return ['girder_worker.docker.tasks']
<commit_before><commit_msg>Add plugin for docker tasks<commit_after>from girder_worker import GirderWorkerPluginABC class DockerPlugin(GirderWorkerPluginABC): def __init__(self, app, *args, **kwargs): self.app = app def task_imports(self): return ['girder_worker.docker.tasks']
c4520ff4d7b26eaa18ec7a30a12bba03859081d2
salt/states/status.py
salt/states/status.py
# -*- coding: utf-8 -*- ''' Minion status monitoring Maps to the `status` execution module. ''' __monitor__ = [ 'loadavg', ] def loadavg(name, maximum=None, minimum=None): ''' Return the current load average for the specified minion. Available values for name are `1-min`, `5-min` and `15-min`. `minimum` and `maximum` values should be passed in as strings. ''' # Monitoring state, no changes will be made so no test interface needed ret = {'name': name, 'result': False, 'comment': '', 'changes': {}, 'data': {}} # Data field for monitoring state data = __salt__['status.loadavg']() if name not in data: ret['result'] = False ret['comment'] += 'Requested load average {0} not available '.format( name ) return ret if minimum and maximum and minimum >= maximum: ret['comment'] += 'Min must be less than max' if ret['comment']: return ret cap = float(data[name]) ret['data'] = data[name] if minimum: if cap < float(minimum): ret['comment'] = 'Load avg is below minimum of {0} at {1}'.format( minimum, cap) return ret if maximum: if cap > float(maximum): ret['comment'] = 'Load avg above maximum of {0} at {1}'.format( maximum, cap) return ret ret['comment'] = 'Load avg in acceptable range' ret['result'] = True return ret
Add monitoring state for load average
Add monitoring state for load average
Python
apache-2.0
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
Add monitoring state for load average
# -*- coding: utf-8 -*- ''' Minion status monitoring Maps to the `status` execution module. ''' __monitor__ = [ 'loadavg', ] def loadavg(name, maximum=None, minimum=None): ''' Return the current load average for the specified minion. Available values for name are `1-min`, `5-min` and `15-min`. `minimum` and `maximum` values should be passed in as strings. ''' # Monitoring state, no changes will be made so no test interface needed ret = {'name': name, 'result': False, 'comment': '', 'changes': {}, 'data': {}} # Data field for monitoring state data = __salt__['status.loadavg']() if name not in data: ret['result'] = False ret['comment'] += 'Requested load average {0} not available '.format( name ) return ret if minimum and maximum and minimum >= maximum: ret['comment'] += 'Min must be less than max' if ret['comment']: return ret cap = float(data[name]) ret['data'] = data[name] if minimum: if cap < float(minimum): ret['comment'] = 'Load avg is below minimum of {0} at {1}'.format( minimum, cap) return ret if maximum: if cap > float(maximum): ret['comment'] = 'Load avg above maximum of {0} at {1}'.format( maximum, cap) return ret ret['comment'] = 'Load avg in acceptable range' ret['result'] = True return ret
<commit_before><commit_msg>Add monitoring state for load average<commit_after>
# -*- coding: utf-8 -*- ''' Minion status monitoring Maps to the `status` execution module. ''' __monitor__ = [ 'loadavg', ] def loadavg(name, maximum=None, minimum=None): ''' Return the current load average for the specified minion. Available values for name are `1-min`, `5-min` and `15-min`. `minimum` and `maximum` values should be passed in as strings. ''' # Monitoring state, no changes will be made so no test interface needed ret = {'name': name, 'result': False, 'comment': '', 'changes': {}, 'data': {}} # Data field for monitoring state data = __salt__['status.loadavg']() if name not in data: ret['result'] = False ret['comment'] += 'Requested load average {0} not available '.format( name ) return ret if minimum and maximum and minimum >= maximum: ret['comment'] += 'Min must be less than max' if ret['comment']: return ret cap = float(data[name]) ret['data'] = data[name] if minimum: if cap < float(minimum): ret['comment'] = 'Load avg is below minimum of {0} at {1}'.format( minimum, cap) return ret if maximum: if cap > float(maximum): ret['comment'] = 'Load avg above maximum of {0} at {1}'.format( maximum, cap) return ret ret['comment'] = 'Load avg in acceptable range' ret['result'] = True return ret
Add monitoring state for load average# -*- coding: utf-8 -*- ''' Minion status monitoring Maps to the `status` execution module. ''' __monitor__ = [ 'loadavg', ] def loadavg(name, maximum=None, minimum=None): ''' Return the current load average for the specified minion. Available values for name are `1-min`, `5-min` and `15-min`. `minimum` and `maximum` values should be passed in as strings. ''' # Monitoring state, no changes will be made so no test interface needed ret = {'name': name, 'result': False, 'comment': '', 'changes': {}, 'data': {}} # Data field for monitoring state data = __salt__['status.loadavg']() if name not in data: ret['result'] = False ret['comment'] += 'Requested load average {0} not available '.format( name ) return ret if minimum and maximum and minimum >= maximum: ret['comment'] += 'Min must be less than max' if ret['comment']: return ret cap = float(data[name]) ret['data'] = data[name] if minimum: if cap < float(minimum): ret['comment'] = 'Load avg is below minimum of {0} at {1}'.format( minimum, cap) return ret if maximum: if cap > float(maximum): ret['comment'] = 'Load avg above maximum of {0} at {1}'.format( maximum, cap) return ret ret['comment'] = 'Load avg in acceptable range' ret['result'] = True return ret
<commit_before><commit_msg>Add monitoring state for load average<commit_after># -*- coding: utf-8 -*- ''' Minion status monitoring Maps to the `status` execution module. ''' __monitor__ = [ 'loadavg', ] def loadavg(name, maximum=None, minimum=None): ''' Return the current load average for the specified minion. Available values for name are `1-min`, `5-min` and `15-min`. `minimum` and `maximum` values should be passed in as strings. ''' # Monitoring state, no changes will be made so no test interface needed ret = {'name': name, 'result': False, 'comment': '', 'changes': {}, 'data': {}} # Data field for monitoring state data = __salt__['status.loadavg']() if name not in data: ret['result'] = False ret['comment'] += 'Requested load average {0} not available '.format( name ) return ret if minimum and maximum and minimum >= maximum: ret['comment'] += 'Min must be less than max' if ret['comment']: return ret cap = float(data[name]) ret['data'] = data[name] if minimum: if cap < float(minimum): ret['comment'] = 'Load avg is below minimum of {0} at {1}'.format( minimum, cap) return ret if maximum: if cap > float(maximum): ret['comment'] = 'Load avg above maximum of {0} at {1}'.format( maximum, cap) return ret ret['comment'] = 'Load avg in acceptable range' ret['result'] = True return ret
b8a477d19c1ce5f06c8f3f0a17a8a5bb90ed9861
find_dups.py
find_dups.py
import sys, os, urllib, StringIO, traceback, cgi, binascii, getopt import store, config try: config = config.Config(sys.argv[1], 'webui') except IndexError: print "Usage: find_dups.py config.ini" raise SystemExit store = store.Store(config) store.open() def owner_email(p): result = set() for r,u in store.get_package_roles(p): if r == 'Owner': result.add(store.get_user(u)['email']) return result def mail_dup(email, package1, package2): email = "martin@v.loewis.de" f = os.popen("/usr/lib/sendmail "+email, "w") f.write("To: %s\n" % email) f.write("From: martin@v.loewis.de\n") f.write("Subject: Please cleanup PyPI package names\n\n") f.write("Dear Package Owner,\n") f.write("You have currently registered the following to packages,\n") f.write("which differ only in case:\n\n%s\n%s\n\n" % (package1, package2)) f.write("As a recent policy change, we are now rejecting this kind of\n") f.write("setup. Please remove one of packages.\n\n") f.write("If you need assistance, please let me know.\n\n") f.write("Kind regards,\nMartin v. Loewis\n") f.close() lower = {} for name,version in store.get_packages(): lname = name.lower() if lname in lower: owner1 = owner_email(name) owner2 = owner_email(lower[lname]) owners = owner1.intersection(owner2) if owners: mail_dup(owners.pop(),name,lower[lname]) else: print "Distinct dup", name, lower[lname], owner1, owner2 lower[lname] = name
Add script to email users of name-conflicting packages.
Add script to email users of name-conflicting packages.
Python
bsd-3-clause
techtonik/pydotorg.pypi,techtonik/pydotorg.pypi
Add script to email users of name-conflicting packages.
import sys, os, urllib, StringIO, traceback, cgi, binascii, getopt import store, config try: config = config.Config(sys.argv[1], 'webui') except IndexError: print "Usage: find_dups.py config.ini" raise SystemExit store = store.Store(config) store.open() def owner_email(p): result = set() for r,u in store.get_package_roles(p): if r == 'Owner': result.add(store.get_user(u)['email']) return result def mail_dup(email, package1, package2): email = "martin@v.loewis.de" f = os.popen("/usr/lib/sendmail "+email, "w") f.write("To: %s\n" % email) f.write("From: martin@v.loewis.de\n") f.write("Subject: Please cleanup PyPI package names\n\n") f.write("Dear Package Owner,\n") f.write("You have currently registered the following to packages,\n") f.write("which differ only in case:\n\n%s\n%s\n\n" % (package1, package2)) f.write("As a recent policy change, we are now rejecting this kind of\n") f.write("setup. Please remove one of packages.\n\n") f.write("If you need assistance, please let me know.\n\n") f.write("Kind regards,\nMartin v. Loewis\n") f.close() lower = {} for name,version in store.get_packages(): lname = name.lower() if lname in lower: owner1 = owner_email(name) owner2 = owner_email(lower[lname]) owners = owner1.intersection(owner2) if owners: mail_dup(owners.pop(),name,lower[lname]) else: print "Distinct dup", name, lower[lname], owner1, owner2 lower[lname] = name
<commit_before><commit_msg>Add script to email users of name-conflicting packages.<commit_after>
import sys, os, urllib, StringIO, traceback, cgi, binascii, getopt import store, config try: config = config.Config(sys.argv[1], 'webui') except IndexError: print "Usage: find_dups.py config.ini" raise SystemExit store = store.Store(config) store.open() def owner_email(p): result = set() for r,u in store.get_package_roles(p): if r == 'Owner': result.add(store.get_user(u)['email']) return result def mail_dup(email, package1, package2): email = "martin@v.loewis.de" f = os.popen("/usr/lib/sendmail "+email, "w") f.write("To: %s\n" % email) f.write("From: martin@v.loewis.de\n") f.write("Subject: Please cleanup PyPI package names\n\n") f.write("Dear Package Owner,\n") f.write("You have currently registered the following to packages,\n") f.write("which differ only in case:\n\n%s\n%s\n\n" % (package1, package2)) f.write("As a recent policy change, we are now rejecting this kind of\n") f.write("setup. Please remove one of packages.\n\n") f.write("If you need assistance, please let me know.\n\n") f.write("Kind regards,\nMartin v. Loewis\n") f.close() lower = {} for name,version in store.get_packages(): lname = name.lower() if lname in lower: owner1 = owner_email(name) owner2 = owner_email(lower[lname]) owners = owner1.intersection(owner2) if owners: mail_dup(owners.pop(),name,lower[lname]) else: print "Distinct dup", name, lower[lname], owner1, owner2 lower[lname] = name
Add script to email users of name-conflicting packages.import sys, os, urllib, StringIO, traceback, cgi, binascii, getopt import store, config try: config = config.Config(sys.argv[1], 'webui') except IndexError: print "Usage: find_dups.py config.ini" raise SystemExit store = store.Store(config) store.open() def owner_email(p): result = set() for r,u in store.get_package_roles(p): if r == 'Owner': result.add(store.get_user(u)['email']) return result def mail_dup(email, package1, package2): email = "martin@v.loewis.de" f = os.popen("/usr/lib/sendmail "+email, "w") f.write("To: %s\n" % email) f.write("From: martin@v.loewis.de\n") f.write("Subject: Please cleanup PyPI package names\n\n") f.write("Dear Package Owner,\n") f.write("You have currently registered the following to packages,\n") f.write("which differ only in case:\n\n%s\n%s\n\n" % (package1, package2)) f.write("As a recent policy change, we are now rejecting this kind of\n") f.write("setup. Please remove one of packages.\n\n") f.write("If you need assistance, please let me know.\n\n") f.write("Kind regards,\nMartin v. Loewis\n") f.close() lower = {} for name,version in store.get_packages(): lname = name.lower() if lname in lower: owner1 = owner_email(name) owner2 = owner_email(lower[lname]) owners = owner1.intersection(owner2) if owners: mail_dup(owners.pop(),name,lower[lname]) else: print "Distinct dup", name, lower[lname], owner1, owner2 lower[lname] = name
<commit_before><commit_msg>Add script to email users of name-conflicting packages.<commit_after>import sys, os, urllib, StringIO, traceback, cgi, binascii, getopt import store, config try: config = config.Config(sys.argv[1], 'webui') except IndexError: print "Usage: find_dups.py config.ini" raise SystemExit store = store.Store(config) store.open() def owner_email(p): result = set() for r,u in store.get_package_roles(p): if r == 'Owner': result.add(store.get_user(u)['email']) return result def mail_dup(email, package1, package2): email = "martin@v.loewis.de" f = os.popen("/usr/lib/sendmail "+email, "w") f.write("To: %s\n" % email) f.write("From: martin@v.loewis.de\n") f.write("Subject: Please cleanup PyPI package names\n\n") f.write("Dear Package Owner,\n") f.write("You have currently registered the following to packages,\n") f.write("which differ only in case:\n\n%s\n%s\n\n" % (package1, package2)) f.write("As a recent policy change, we are now rejecting this kind of\n") f.write("setup. Please remove one of packages.\n\n") f.write("If you need assistance, please let me know.\n\n") f.write("Kind regards,\nMartin v. Loewis\n") f.close() lower = {} for name,version in store.get_packages(): lname = name.lower() if lname in lower: owner1 = owner_email(name) owner2 = owner_email(lower[lname]) owners = owner1.intersection(owner2) if owners: mail_dup(owners.pop(),name,lower[lname]) else: print "Distinct dup", name, lower[lname], owner1, owner2 lower[lname] = name
b5100e8ed538a2ffc9933efe1ef8f19e5e9f6fa8
regulations/views/notice_home.py
regulations/views/notice_home.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from operator import itemgetter import logging from django.http import Http404 from django.template.response import TemplateResponse from django.views.generic.base import View from regulations.generator.api_reader import ApiReader from regulations.views.preamble import ( notice_data, CommentState) logger = logging.getLogger(__name__) class NoticeHomeView(View): """ Basic view that provides a list of regulations and notices to the context. """ template_name = None # We should probably have a default notice template. def get(self, request, *args, **kwargs): notices = ApiReader().notices().get("results", []) context = {} notices_meta = [] for notice in notices: try: if notice.get("document_number"): _, meta, _ = notice_data(notice["document_number"]) notices_meta.append(meta) except Http404: pass notices_meta = sorted(notices_meta, key=itemgetter("publication_date"), reverse=True) context["notices"] = notices_meta # Django templates won't show contents of CommentState as an Enum, so: context["comment_state"] = {state.name: state.value for state in CommentState} assert self.template_name template = self.template_name return TemplateResponse(request=request, template=template, context=context)
Add view for notice and comment homepage.
Add view for notice and comment homepage.
Python
cc0-1.0
18F/regulations-site,eregs/regulations-site,eregs/regulations-site,eregs/regulations-site,tadhg-ohiggins/regulations-site,tadhg-ohiggins/regulations-site,18F/regulations-site,tadhg-ohiggins/regulations-site,eregs/regulations-site,18F/regulations-site,tadhg-ohiggins/regulations-site,18F/regulations-site
Add view for notice and comment homepage.
# -*- coding: utf-8 -*- from __future__ import unicode_literals from operator import itemgetter import logging from django.http import Http404 from django.template.response import TemplateResponse from django.views.generic.base import View from regulations.generator.api_reader import ApiReader from regulations.views.preamble import ( notice_data, CommentState) logger = logging.getLogger(__name__) class NoticeHomeView(View): """ Basic view that provides a list of regulations and notices to the context. """ template_name = None # We should probably have a default notice template. def get(self, request, *args, **kwargs): notices = ApiReader().notices().get("results", []) context = {} notices_meta = [] for notice in notices: try: if notice.get("document_number"): _, meta, _ = notice_data(notice["document_number"]) notices_meta.append(meta) except Http404: pass notices_meta = sorted(notices_meta, key=itemgetter("publication_date"), reverse=True) context["notices"] = notices_meta # Django templates won't show contents of CommentState as an Enum, so: context["comment_state"] = {state.name: state.value for state in CommentState} assert self.template_name template = self.template_name return TemplateResponse(request=request, template=template, context=context)
<commit_before><commit_msg>Add view for notice and comment homepage.<commit_after>
# -*- coding: utf-8 -*- from __future__ import unicode_literals from operator import itemgetter import logging from django.http import Http404 from django.template.response import TemplateResponse from django.views.generic.base import View from regulations.generator.api_reader import ApiReader from regulations.views.preamble import ( notice_data, CommentState) logger = logging.getLogger(__name__) class NoticeHomeView(View): """ Basic view that provides a list of regulations and notices to the context. """ template_name = None # We should probably have a default notice template. def get(self, request, *args, **kwargs): notices = ApiReader().notices().get("results", []) context = {} notices_meta = [] for notice in notices: try: if notice.get("document_number"): _, meta, _ = notice_data(notice["document_number"]) notices_meta.append(meta) except Http404: pass notices_meta = sorted(notices_meta, key=itemgetter("publication_date"), reverse=True) context["notices"] = notices_meta # Django templates won't show contents of CommentState as an Enum, so: context["comment_state"] = {state.name: state.value for state in CommentState} assert self.template_name template = self.template_name return TemplateResponse(request=request, template=template, context=context)
Add view for notice and comment homepage.# -*- coding: utf-8 -*- from __future__ import unicode_literals from operator import itemgetter import logging from django.http import Http404 from django.template.response import TemplateResponse from django.views.generic.base import View from regulations.generator.api_reader import ApiReader from regulations.views.preamble import ( notice_data, CommentState) logger = logging.getLogger(__name__) class NoticeHomeView(View): """ Basic view that provides a list of regulations and notices to the context. """ template_name = None # We should probably have a default notice template. def get(self, request, *args, **kwargs): notices = ApiReader().notices().get("results", []) context = {} notices_meta = [] for notice in notices: try: if notice.get("document_number"): _, meta, _ = notice_data(notice["document_number"]) notices_meta.append(meta) except Http404: pass notices_meta = sorted(notices_meta, key=itemgetter("publication_date"), reverse=True) context["notices"] = notices_meta # Django templates won't show contents of CommentState as an Enum, so: context["comment_state"] = {state.name: state.value for state in CommentState} assert self.template_name template = self.template_name return TemplateResponse(request=request, template=template, context=context)
<commit_before><commit_msg>Add view for notice and comment homepage.<commit_after># -*- coding: utf-8 -*- from __future__ import unicode_literals from operator import itemgetter import logging from django.http import Http404 from django.template.response import TemplateResponse from django.views.generic.base import View from regulations.generator.api_reader import ApiReader from regulations.views.preamble import ( notice_data, CommentState) logger = logging.getLogger(__name__) class NoticeHomeView(View): """ Basic view that provides a list of regulations and notices to the context. """ template_name = None # We should probably have a default notice template. def get(self, request, *args, **kwargs): notices = ApiReader().notices().get("results", []) context = {} notices_meta = [] for notice in notices: try: if notice.get("document_number"): _, meta, _ = notice_data(notice["document_number"]) notices_meta.append(meta) except Http404: pass notices_meta = sorted(notices_meta, key=itemgetter("publication_date"), reverse=True) context["notices"] = notices_meta # Django templates won't show contents of CommentState as an Enum, so: context["comment_state"] = {state.name: state.value for state in CommentState} assert self.template_name template = self.template_name return TemplateResponse(request=request, template=template, context=context)
36746f17e138efedb615eedfd672b7efb81888e2
karabo_data/tests/test_writer.py
karabo_data/tests/test_writer.py
import os.path as osp from tempfile import TemporaryDirectory from testpath import assert_isfile from karabo_data import RunDirectory, H5File def test_write_selected(mock_fxe_run): with TemporaryDirectory() as td: new_file = osp.join(td, 'test.h5') with RunDirectory(mock_fxe_run) as run: run.select('SPB_XTD9_XGM/*').write(new_file) assert_isfile(new_file) with H5File(new_file) as f: assert f.control_sources == {'SPB_XTD9_XGM/DOOCS/MAIN'} assert f.instrument_sources == {'SPB_XTD9_XGM/DOOCS/MAIN:output'} s = f.get_series('SPB_XTD9_XGM/DOOCS/MAIN', 'beamPosition.ixPos.value') # This should have concatenated the two sequence files (400 + 80) assert len(s) == 480 a = f.get_array('SPB_XTD9_XGM/DOOCS/MAIN:output', 'data.intensityTD') assert a.shape == (480, 1000)
Add test of writing a file
Add test of writing a file
Python
bsd-3-clause
European-XFEL/h5tools-py
Add test of writing a file
import os.path as osp from tempfile import TemporaryDirectory from testpath import assert_isfile from karabo_data import RunDirectory, H5File def test_write_selected(mock_fxe_run): with TemporaryDirectory() as td: new_file = osp.join(td, 'test.h5') with RunDirectory(mock_fxe_run) as run: run.select('SPB_XTD9_XGM/*').write(new_file) assert_isfile(new_file) with H5File(new_file) as f: assert f.control_sources == {'SPB_XTD9_XGM/DOOCS/MAIN'} assert f.instrument_sources == {'SPB_XTD9_XGM/DOOCS/MAIN:output'} s = f.get_series('SPB_XTD9_XGM/DOOCS/MAIN', 'beamPosition.ixPos.value') # This should have concatenated the two sequence files (400 + 80) assert len(s) == 480 a = f.get_array('SPB_XTD9_XGM/DOOCS/MAIN:output', 'data.intensityTD') assert a.shape == (480, 1000)
<commit_before><commit_msg>Add test of writing a file<commit_after>
import os.path as osp from tempfile import TemporaryDirectory from testpath import assert_isfile from karabo_data import RunDirectory, H5File def test_write_selected(mock_fxe_run): with TemporaryDirectory() as td: new_file = osp.join(td, 'test.h5') with RunDirectory(mock_fxe_run) as run: run.select('SPB_XTD9_XGM/*').write(new_file) assert_isfile(new_file) with H5File(new_file) as f: assert f.control_sources == {'SPB_XTD9_XGM/DOOCS/MAIN'} assert f.instrument_sources == {'SPB_XTD9_XGM/DOOCS/MAIN:output'} s = f.get_series('SPB_XTD9_XGM/DOOCS/MAIN', 'beamPosition.ixPos.value') # This should have concatenated the two sequence files (400 + 80) assert len(s) == 480 a = f.get_array('SPB_XTD9_XGM/DOOCS/MAIN:output', 'data.intensityTD') assert a.shape == (480, 1000)
Add test of writing a fileimport os.path as osp from tempfile import TemporaryDirectory from testpath import assert_isfile from karabo_data import RunDirectory, H5File def test_write_selected(mock_fxe_run): with TemporaryDirectory() as td: new_file = osp.join(td, 'test.h5') with RunDirectory(mock_fxe_run) as run: run.select('SPB_XTD9_XGM/*').write(new_file) assert_isfile(new_file) with H5File(new_file) as f: assert f.control_sources == {'SPB_XTD9_XGM/DOOCS/MAIN'} assert f.instrument_sources == {'SPB_XTD9_XGM/DOOCS/MAIN:output'} s = f.get_series('SPB_XTD9_XGM/DOOCS/MAIN', 'beamPosition.ixPos.value') # This should have concatenated the two sequence files (400 + 80) assert len(s) == 480 a = f.get_array('SPB_XTD9_XGM/DOOCS/MAIN:output', 'data.intensityTD') assert a.shape == (480, 1000)
<commit_before><commit_msg>Add test of writing a file<commit_after>import os.path as osp from tempfile import TemporaryDirectory from testpath import assert_isfile from karabo_data import RunDirectory, H5File def test_write_selected(mock_fxe_run): with TemporaryDirectory() as td: new_file = osp.join(td, 'test.h5') with RunDirectory(mock_fxe_run) as run: run.select('SPB_XTD9_XGM/*').write(new_file) assert_isfile(new_file) with H5File(new_file) as f: assert f.control_sources == {'SPB_XTD9_XGM/DOOCS/MAIN'} assert f.instrument_sources == {'SPB_XTD9_XGM/DOOCS/MAIN:output'} s = f.get_series('SPB_XTD9_XGM/DOOCS/MAIN', 'beamPosition.ixPos.value') # This should have concatenated the two sequence files (400 + 80) assert len(s) == 480 a = f.get_array('SPB_XTD9_XGM/DOOCS/MAIN:output', 'data.intensityTD') assert a.shape == (480, 1000)
c7d61482938dd175e6aa6d607e9275eefd02bc2b
liwc_statistics.py
liwc_statistics.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """Script to generate statistics on LIWC entities. The script calculates how many LIWC words are found. This script can be used to compare the differences in numbers of words found for the modern and historic versions of LIWC. """ from bs4 import BeautifulSoup from lxml import etree from collections import Counter import argparse import string from emotools.bs4_helpers import note, word from emotools.liwc_helpers import load_liwc if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('file_name', help='the name of the FoLiA XML file to ' 'be processed') parser.add_argument('dic', help='the liwc dictionary to be used') args = parser.parse_args() if args.dic == 'LIWC_Dutch_dictionary.dic': encoding = 'latin1' else: encoding = 'utf8' liwc_dict, liwc_categories = load_liwc(args.dic, encoding) # Only words inside events are counted (this means title (i.e. words inside # heading tags) are not countes). This is also what is stored in # Elasticsearch. There are small differences between the total number of # words in ES and the total number of words as counted by this Script # (~tens of words difference). It is not clear why this is the case. It # probably has to do with the tokenizer (analyzer) used by ES vs. the # tokenizer used to generate the folia documents. act_tag = '{http://ilk.uvt.nl/folia}div' event_tag = '{http://ilk.uvt.nl/folia}event' sentence_tag = '{http://ilk.uvt.nl/folia}s' word_tag = '{http://ilk.uvt.nl/folia}w' text_content_tag = '{http://ilk.uvt.nl/folia}t' num_words = 0 liwc_count = Counter() context = etree.iterparse(args.file_name, events=('end',), tag=event_tag, huge_tree=True) for event, elem in context: # ignore subevents if not elem.getparent().tag == event_tag: event_xml = BeautifulSoup(etree.tostring(elem), 'xml') for element in event_xml.descendants: if word(element) and not note(element.parent.parent): w = element.t.string.lower() if w not in string.punctuation: num_words += 1 if w in liwc_dict.keys(): #print w for cat in liwc_dict[w]: liwc_count[liwc_categories[cat]] += 1 print 'Total # words\t{}\n'.format(num_words) print 'Category\tPercentage\tFrequency' cats = liwc_categories.values() cats.sort() for cat in cats: freq = liwc_count[cat] percentage = (freq/(num_words+0.0))*100 print '{}\t{:.2f}\t{}'.format(cat, percentage, freq)
Add script to generate statistics on liwc entities in texts
Add script to generate statistics on liwc entities in texts The script count the frequencies of liwc words given a liwc dictionary (modern or historic).
Python
apache-2.0
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
Add script to generate statistics on liwc entities in texts The script count the frequencies of liwc words given a liwc dictionary (modern or historic).
#!/usr/bin/env python # -*- coding: utf-8 -*- """Script to generate statistics on LIWC entities. The script calculates how many LIWC words are found. This script can be used to compare the differences in numbers of words found for the modern and historic versions of LIWC. """ from bs4 import BeautifulSoup from lxml import etree from collections import Counter import argparse import string from emotools.bs4_helpers import note, word from emotools.liwc_helpers import load_liwc if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('file_name', help='the name of the FoLiA XML file to ' 'be processed') parser.add_argument('dic', help='the liwc dictionary to be used') args = parser.parse_args() if args.dic == 'LIWC_Dutch_dictionary.dic': encoding = 'latin1' else: encoding = 'utf8' liwc_dict, liwc_categories = load_liwc(args.dic, encoding) # Only words inside events are counted (this means title (i.e. words inside # heading tags) are not countes). This is also what is stored in # Elasticsearch. There are small differences between the total number of # words in ES and the total number of words as counted by this Script # (~tens of words difference). It is not clear why this is the case. It # probably has to do with the tokenizer (analyzer) used by ES vs. the # tokenizer used to generate the folia documents. act_tag = '{http://ilk.uvt.nl/folia}div' event_tag = '{http://ilk.uvt.nl/folia}event' sentence_tag = '{http://ilk.uvt.nl/folia}s' word_tag = '{http://ilk.uvt.nl/folia}w' text_content_tag = '{http://ilk.uvt.nl/folia}t' num_words = 0 liwc_count = Counter() context = etree.iterparse(args.file_name, events=('end',), tag=event_tag, huge_tree=True) for event, elem in context: # ignore subevents if not elem.getparent().tag == event_tag: event_xml = BeautifulSoup(etree.tostring(elem), 'xml') for element in event_xml.descendants: if word(element) and not note(element.parent.parent): w = element.t.string.lower() if w not in string.punctuation: num_words += 1 if w in liwc_dict.keys(): #print w for cat in liwc_dict[w]: liwc_count[liwc_categories[cat]] += 1 print 'Total # words\t{}\n'.format(num_words) print 'Category\tPercentage\tFrequency' cats = liwc_categories.values() cats.sort() for cat in cats: freq = liwc_count[cat] percentage = (freq/(num_words+0.0))*100 print '{}\t{:.2f}\t{}'.format(cat, percentage, freq)
<commit_before><commit_msg>Add script to generate statistics on liwc entities in texts The script count the frequencies of liwc words given a liwc dictionary (modern or historic).<commit_after>
#!/usr/bin/env python # -*- coding: utf-8 -*- """Script to generate statistics on LIWC entities. The script calculates how many LIWC words are found. This script can be used to compare the differences in numbers of words found for the modern and historic versions of LIWC. """ from bs4 import BeautifulSoup from lxml import etree from collections import Counter import argparse import string from emotools.bs4_helpers import note, word from emotools.liwc_helpers import load_liwc if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('file_name', help='the name of the FoLiA XML file to ' 'be processed') parser.add_argument('dic', help='the liwc dictionary to be used') args = parser.parse_args() if args.dic == 'LIWC_Dutch_dictionary.dic': encoding = 'latin1' else: encoding = 'utf8' liwc_dict, liwc_categories = load_liwc(args.dic, encoding) # Only words inside events are counted (this means title (i.e. words inside # heading tags) are not countes). This is also what is stored in # Elasticsearch. There are small differences between the total number of # words in ES and the total number of words as counted by this Script # (~tens of words difference). It is not clear why this is the case. It # probably has to do with the tokenizer (analyzer) used by ES vs. the # tokenizer used to generate the folia documents. act_tag = '{http://ilk.uvt.nl/folia}div' event_tag = '{http://ilk.uvt.nl/folia}event' sentence_tag = '{http://ilk.uvt.nl/folia}s' word_tag = '{http://ilk.uvt.nl/folia}w' text_content_tag = '{http://ilk.uvt.nl/folia}t' num_words = 0 liwc_count = Counter() context = etree.iterparse(args.file_name, events=('end',), tag=event_tag, huge_tree=True) for event, elem in context: # ignore subevents if not elem.getparent().tag == event_tag: event_xml = BeautifulSoup(etree.tostring(elem), 'xml') for element in event_xml.descendants: if word(element) and not note(element.parent.parent): w = element.t.string.lower() if w not in string.punctuation: num_words += 1 if w in liwc_dict.keys(): #print w for cat in liwc_dict[w]: liwc_count[liwc_categories[cat]] += 1 print 'Total # words\t{}\n'.format(num_words) print 'Category\tPercentage\tFrequency' cats = liwc_categories.values() cats.sort() for cat in cats: freq = liwc_count[cat] percentage = (freq/(num_words+0.0))*100 print '{}\t{:.2f}\t{}'.format(cat, percentage, freq)
Add script to generate statistics on liwc entities in texts The script count the frequencies of liwc words given a liwc dictionary (modern or historic).#!/usr/bin/env python # -*- coding: utf-8 -*- """Script to generate statistics on LIWC entities. The script calculates how many LIWC words are found. This script can be used to compare the differences in numbers of words found for the modern and historic versions of LIWC. """ from bs4 import BeautifulSoup from lxml import etree from collections import Counter import argparse import string from emotools.bs4_helpers import note, word from emotools.liwc_helpers import load_liwc if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('file_name', help='the name of the FoLiA XML file to ' 'be processed') parser.add_argument('dic', help='the liwc dictionary to be used') args = parser.parse_args() if args.dic == 'LIWC_Dutch_dictionary.dic': encoding = 'latin1' else: encoding = 'utf8' liwc_dict, liwc_categories = load_liwc(args.dic, encoding) # Only words inside events are counted (this means title (i.e. words inside # heading tags) are not countes). This is also what is stored in # Elasticsearch. There are small differences between the total number of # words in ES and the total number of words as counted by this Script # (~tens of words difference). It is not clear why this is the case. It # probably has to do with the tokenizer (analyzer) used by ES vs. the # tokenizer used to generate the folia documents. act_tag = '{http://ilk.uvt.nl/folia}div' event_tag = '{http://ilk.uvt.nl/folia}event' sentence_tag = '{http://ilk.uvt.nl/folia}s' word_tag = '{http://ilk.uvt.nl/folia}w' text_content_tag = '{http://ilk.uvt.nl/folia}t' num_words = 0 liwc_count = Counter() context = etree.iterparse(args.file_name, events=('end',), tag=event_tag, huge_tree=True) for event, elem in context: # ignore subevents if not elem.getparent().tag == event_tag: event_xml = BeautifulSoup(etree.tostring(elem), 'xml') for element in event_xml.descendants: if word(element) and not note(element.parent.parent): w = element.t.string.lower() if w not in string.punctuation: num_words += 1 if w in liwc_dict.keys(): #print w for cat in liwc_dict[w]: liwc_count[liwc_categories[cat]] += 1 print 'Total # words\t{}\n'.format(num_words) print 'Category\tPercentage\tFrequency' cats = liwc_categories.values() cats.sort() for cat in cats: freq = liwc_count[cat] percentage = (freq/(num_words+0.0))*100 print '{}\t{:.2f}\t{}'.format(cat, percentage, freq)
<commit_before><commit_msg>Add script to generate statistics on liwc entities in texts The script count the frequencies of liwc words given a liwc dictionary (modern or historic).<commit_after>#!/usr/bin/env python # -*- coding: utf-8 -*- """Script to generate statistics on LIWC entities. The script calculates how many LIWC words are found. This script can be used to compare the differences in numbers of words found for the modern and historic versions of LIWC. """ from bs4 import BeautifulSoup from lxml import etree from collections import Counter import argparse import string from emotools.bs4_helpers import note, word from emotools.liwc_helpers import load_liwc if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('file_name', help='the name of the FoLiA XML file to ' 'be processed') parser.add_argument('dic', help='the liwc dictionary to be used') args = parser.parse_args() if args.dic == 'LIWC_Dutch_dictionary.dic': encoding = 'latin1' else: encoding = 'utf8' liwc_dict, liwc_categories = load_liwc(args.dic, encoding) # Only words inside events are counted (this means title (i.e. words inside # heading tags) are not countes). This is also what is stored in # Elasticsearch. There are small differences between the total number of # words in ES and the total number of words as counted by this Script # (~tens of words difference). It is not clear why this is the case. It # probably has to do with the tokenizer (analyzer) used by ES vs. the # tokenizer used to generate the folia documents. act_tag = '{http://ilk.uvt.nl/folia}div' event_tag = '{http://ilk.uvt.nl/folia}event' sentence_tag = '{http://ilk.uvt.nl/folia}s' word_tag = '{http://ilk.uvt.nl/folia}w' text_content_tag = '{http://ilk.uvt.nl/folia}t' num_words = 0 liwc_count = Counter() context = etree.iterparse(args.file_name, events=('end',), tag=event_tag, huge_tree=True) for event, elem in context: # ignore subevents if not elem.getparent().tag == event_tag: event_xml = BeautifulSoup(etree.tostring(elem), 'xml') for element in event_xml.descendants: if word(element) and not note(element.parent.parent): w = element.t.string.lower() if w not in string.punctuation: num_words += 1 if w in liwc_dict.keys(): #print w for cat in liwc_dict[w]: liwc_count[liwc_categories[cat]] += 1 print 'Total # words\t{}\n'.format(num_words) print 'Category\tPercentage\tFrequency' cats = liwc_categories.values() cats.sort() for cat in cats: freq = liwc_count[cat] percentage = (freq/(num_words+0.0))*100 print '{}\t{:.2f}\t{}'.format(cat, percentage, freq)
d67d6d70cb7504912c95c1b1c7e4bc078d9a746d
examples/shadow_root_test.py
examples/shadow_root_test.py
from seleniumbase import BaseCase class ShadowRootTest(BaseCase): def test_shadow_root(self): self.open("https://react-shadow.herokuapp.com/Patagonia") self.click("section.weather::shadow div::shadow button") self.assert_element('section.weather::shadow img[alt="Patagonia"]') weather = self.get_text("section.weather::shadow h1") self.post_message(weather) self.click('section.weather::shadow a[href="/Kyoto"]') self.assert_element('section.weather::shadow img[alt="Kyoto"]') weather = self.get_text("section.weather::shadow h1") self.post_message(weather)
Add a simple Shadow DOM / Shadow Root test
Add a simple Shadow DOM / Shadow Root test
Python
mit
seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase
Add a simple Shadow DOM / Shadow Root test
from seleniumbase import BaseCase class ShadowRootTest(BaseCase): def test_shadow_root(self): self.open("https://react-shadow.herokuapp.com/Patagonia") self.click("section.weather::shadow div::shadow button") self.assert_element('section.weather::shadow img[alt="Patagonia"]') weather = self.get_text("section.weather::shadow h1") self.post_message(weather) self.click('section.weather::shadow a[href="/Kyoto"]') self.assert_element('section.weather::shadow img[alt="Kyoto"]') weather = self.get_text("section.weather::shadow h1") self.post_message(weather)
<commit_before><commit_msg>Add a simple Shadow DOM / Shadow Root test<commit_after>
from seleniumbase import BaseCase class ShadowRootTest(BaseCase): def test_shadow_root(self): self.open("https://react-shadow.herokuapp.com/Patagonia") self.click("section.weather::shadow div::shadow button") self.assert_element('section.weather::shadow img[alt="Patagonia"]') weather = self.get_text("section.weather::shadow h1") self.post_message(weather) self.click('section.weather::shadow a[href="/Kyoto"]') self.assert_element('section.weather::shadow img[alt="Kyoto"]') weather = self.get_text("section.weather::shadow h1") self.post_message(weather)
Add a simple Shadow DOM / Shadow Root testfrom seleniumbase import BaseCase class ShadowRootTest(BaseCase): def test_shadow_root(self): self.open("https://react-shadow.herokuapp.com/Patagonia") self.click("section.weather::shadow div::shadow button") self.assert_element('section.weather::shadow img[alt="Patagonia"]') weather = self.get_text("section.weather::shadow h1") self.post_message(weather) self.click('section.weather::shadow a[href="/Kyoto"]') self.assert_element('section.weather::shadow img[alt="Kyoto"]') weather = self.get_text("section.weather::shadow h1") self.post_message(weather)
<commit_before><commit_msg>Add a simple Shadow DOM / Shadow Root test<commit_after>from seleniumbase import BaseCase class ShadowRootTest(BaseCase): def test_shadow_root(self): self.open("https://react-shadow.herokuapp.com/Patagonia") self.click("section.weather::shadow div::shadow button") self.assert_element('section.weather::shadow img[alt="Patagonia"]') weather = self.get_text("section.weather::shadow h1") self.post_message(weather) self.click('section.weather::shadow a[href="/Kyoto"]') self.assert_element('section.weather::shadow img[alt="Kyoto"]') weather = self.get_text("section.weather::shadow h1") self.post_message(weather)
13c567621bd380c77ac2bb08fd736f3362996fec
designate/storage/impl_sqlalchemy/migrate_repo/versions/046_add_indices.py
designate/storage/impl_sqlalchemy/migrate_repo/versions/046_add_indices.py
# Copyright (c) 2014 Rackspace Inc. # # Author: Tim Simmons <tim.simmons@rackspace.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Index, MetaData, Table meta = MetaData() def index_exists(index): table = index[1]._get_table() cols = sorted([str(x).split('.')[1] for x in index[1:]]) for idx in table.indexes: if sorted(idx.columns.keys()) == cols: return True return False def upgrade(migrate_engine): meta.bind = migrate_engine zones_table = Table('domains', meta, autoload=True) recordsets_table = Table('recordsets', meta, autoload=True) records_table = Table('records', meta, autoload=True) indices = [ ['zone_deleted', zones_table.c.deleted], ['zone_tenant_deleted', zones_table.c.tenant_id, zones_table.c.deleted], ['rrset_type_domainid', recordsets_table.c.type, recordsets_table.c.domain_id], ['recordset_type_name', recordsets_table.c.type, recordsets_table.c.name], ['records_tenant', records_table.c.tenant_id] ] for ind in indices: if not index_exists(ind): index = Index(*ind) index.create(migrate_engine) def downgrade(migrate_engine): meta.bind = migrate_engine zones_table = Table('domains', meta, autoload=True) recordsets_table = Table('recordsets', meta, autoload=True) records_table = Table('records', meta, autoload=True) indices = [ ['zone_deleted', zones_table.c.deleted], ['zone_tenant_deleted', zones_table.c.tenant_id, zones_table.c.deleted], ['rrset_type_domainid', recordsets_table.c.type, recordsets_table.c.domain_id], ['recordset_type_name', recordsets_table.c.type, recordsets_table.c.name], ['records_tenant', records_table.c.tenant_id] ] for ind in indices: if index_exists(ind): index = Index(*ind) index.drop(migrate_engine)
Add some helpful SQL indices
Add some helpful SQL indices Adds a few SQL indices that improve performance significantly as the Designate database grows in size. Change-Id: If68b3f7883487e5379e7ab45f51a27ef7ecb2f58 Blueprint: add-indices
Python
apache-2.0
ionrock/designate,ionrock/designate,kiall/designate-py3,ramsateesh/designate,kiall/designate-py3,ramsateesh/designate,openstack/designate,ramsateesh/designate,tonyli71/designate,cneill/designate-testing,muraliselva10/designate,cneill/designate,grahamhayes/designate,muraliselva10/designate,grahamhayes/designate,cneill/designate,muraliselva10/designate,openstack/designate,tonyli71/designate,kiall/designate-py3,cneill/designate-testing,cneill/designate,ionrock/designate,kiall/designate-py3,cneill/designate,openstack/designate,kiall/designate-py3,cneill/designate,cneill/designate-testing,grahamhayes/designate,tonyli71/designate
Add some helpful SQL indices Adds a few SQL indices that improve performance significantly as the Designate database grows in size. Change-Id: If68b3f7883487e5379e7ab45f51a27ef7ecb2f58 Blueprint: add-indices
# Copyright (c) 2014 Rackspace Inc. # # Author: Tim Simmons <tim.simmons@rackspace.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Index, MetaData, Table meta = MetaData() def index_exists(index): table = index[1]._get_table() cols = sorted([str(x).split('.')[1] for x in index[1:]]) for idx in table.indexes: if sorted(idx.columns.keys()) == cols: return True return False def upgrade(migrate_engine): meta.bind = migrate_engine zones_table = Table('domains', meta, autoload=True) recordsets_table = Table('recordsets', meta, autoload=True) records_table = Table('records', meta, autoload=True) indices = [ ['zone_deleted', zones_table.c.deleted], ['zone_tenant_deleted', zones_table.c.tenant_id, zones_table.c.deleted], ['rrset_type_domainid', recordsets_table.c.type, recordsets_table.c.domain_id], ['recordset_type_name', recordsets_table.c.type, recordsets_table.c.name], ['records_tenant', records_table.c.tenant_id] ] for ind in indices: if not index_exists(ind): index = Index(*ind) index.create(migrate_engine) def downgrade(migrate_engine): meta.bind = migrate_engine zones_table = Table('domains', meta, autoload=True) recordsets_table = Table('recordsets', meta, autoload=True) records_table = Table('records', meta, autoload=True) indices = [ ['zone_deleted', zones_table.c.deleted], ['zone_tenant_deleted', zones_table.c.tenant_id, zones_table.c.deleted], ['rrset_type_domainid', recordsets_table.c.type, recordsets_table.c.domain_id], ['recordset_type_name', recordsets_table.c.type, recordsets_table.c.name], ['records_tenant', records_table.c.tenant_id] ] for ind in indices: if index_exists(ind): index = Index(*ind) index.drop(migrate_engine)
<commit_before><commit_msg>Add some helpful SQL indices Adds a few SQL indices that improve performance significantly as the Designate database grows in size. Change-Id: If68b3f7883487e5379e7ab45f51a27ef7ecb2f58 Blueprint: add-indices<commit_after>
# Copyright (c) 2014 Rackspace Inc. # # Author: Tim Simmons <tim.simmons@rackspace.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Index, MetaData, Table meta = MetaData() def index_exists(index): table = index[1]._get_table() cols = sorted([str(x).split('.')[1] for x in index[1:]]) for idx in table.indexes: if sorted(idx.columns.keys()) == cols: return True return False def upgrade(migrate_engine): meta.bind = migrate_engine zones_table = Table('domains', meta, autoload=True) recordsets_table = Table('recordsets', meta, autoload=True) records_table = Table('records', meta, autoload=True) indices = [ ['zone_deleted', zones_table.c.deleted], ['zone_tenant_deleted', zones_table.c.tenant_id, zones_table.c.deleted], ['rrset_type_domainid', recordsets_table.c.type, recordsets_table.c.domain_id], ['recordset_type_name', recordsets_table.c.type, recordsets_table.c.name], ['records_tenant', records_table.c.tenant_id] ] for ind in indices: if not index_exists(ind): index = Index(*ind) index.create(migrate_engine) def downgrade(migrate_engine): meta.bind = migrate_engine zones_table = Table('domains', meta, autoload=True) recordsets_table = Table('recordsets', meta, autoload=True) records_table = Table('records', meta, autoload=True) indices = [ ['zone_deleted', zones_table.c.deleted], ['zone_tenant_deleted', zones_table.c.tenant_id, zones_table.c.deleted], ['rrset_type_domainid', recordsets_table.c.type, recordsets_table.c.domain_id], ['recordset_type_name', recordsets_table.c.type, recordsets_table.c.name], ['records_tenant', records_table.c.tenant_id] ] for ind in indices: if index_exists(ind): index = Index(*ind) index.drop(migrate_engine)
Add some helpful SQL indices Adds a few SQL indices that improve performance significantly as the Designate database grows in size. Change-Id: If68b3f7883487e5379e7ab45f51a27ef7ecb2f58 Blueprint: add-indices# Copyright (c) 2014 Rackspace Inc. # # Author: Tim Simmons <tim.simmons@rackspace.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Index, MetaData, Table meta = MetaData() def index_exists(index): table = index[1]._get_table() cols = sorted([str(x).split('.')[1] for x in index[1:]]) for idx in table.indexes: if sorted(idx.columns.keys()) == cols: return True return False def upgrade(migrate_engine): meta.bind = migrate_engine zones_table = Table('domains', meta, autoload=True) recordsets_table = Table('recordsets', meta, autoload=True) records_table = Table('records', meta, autoload=True) indices = [ ['zone_deleted', zones_table.c.deleted], ['zone_tenant_deleted', zones_table.c.tenant_id, zones_table.c.deleted], ['rrset_type_domainid', recordsets_table.c.type, recordsets_table.c.domain_id], ['recordset_type_name', recordsets_table.c.type, recordsets_table.c.name], ['records_tenant', records_table.c.tenant_id] ] for ind in indices: if not index_exists(ind): index = Index(*ind) index.create(migrate_engine) def downgrade(migrate_engine): meta.bind = migrate_engine zones_table = Table('domains', meta, autoload=True) recordsets_table = Table('recordsets', meta, autoload=True) records_table = Table('records', meta, autoload=True) indices = [ ['zone_deleted', zones_table.c.deleted], ['zone_tenant_deleted', zones_table.c.tenant_id, zones_table.c.deleted], ['rrset_type_domainid', recordsets_table.c.type, recordsets_table.c.domain_id], ['recordset_type_name', recordsets_table.c.type, recordsets_table.c.name], ['records_tenant', records_table.c.tenant_id] ] for ind in indices: if index_exists(ind): index = Index(*ind) index.drop(migrate_engine)
<commit_before><commit_msg>Add some helpful SQL indices Adds a few SQL indices that improve performance significantly as the Designate database grows in size. Change-Id: If68b3f7883487e5379e7ab45f51a27ef7ecb2f58 Blueprint: add-indices<commit_after># Copyright (c) 2014 Rackspace Inc. # # Author: Tim Simmons <tim.simmons@rackspace.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Index, MetaData, Table meta = MetaData() def index_exists(index): table = index[1]._get_table() cols = sorted([str(x).split('.')[1] for x in index[1:]]) for idx in table.indexes: if sorted(idx.columns.keys()) == cols: return True return False def upgrade(migrate_engine): meta.bind = migrate_engine zones_table = Table('domains', meta, autoload=True) recordsets_table = Table('recordsets', meta, autoload=True) records_table = Table('records', meta, autoload=True) indices = [ ['zone_deleted', zones_table.c.deleted], ['zone_tenant_deleted', zones_table.c.tenant_id, zones_table.c.deleted], ['rrset_type_domainid', recordsets_table.c.type, recordsets_table.c.domain_id], ['recordset_type_name', recordsets_table.c.type, recordsets_table.c.name], ['records_tenant', records_table.c.tenant_id] ] for ind in indices: if not index_exists(ind): index = Index(*ind) index.create(migrate_engine) def downgrade(migrate_engine): meta.bind = migrate_engine zones_table = Table('domains', meta, autoload=True) recordsets_table = Table('recordsets', meta, autoload=True) records_table = Table('records', meta, autoload=True) indices = [ ['zone_deleted', zones_table.c.deleted], ['zone_tenant_deleted', zones_table.c.tenant_id, zones_table.c.deleted], ['rrset_type_domainid', recordsets_table.c.type, recordsets_table.c.domain_id], ['recordset_type_name', recordsets_table.c.type, recordsets_table.c.name], ['records_tenant', records_table.c.tenant_id] ] for ind in indices: if index_exists(ind): index = Index(*ind) index.drop(migrate_engine)
fd0685960b2df7632bd384634a8ddcd1cfbde606
gmn/src/d1_gmn/app/model_util.py
gmn/src/d1_gmn/app/model_util.py
# -*- coding: utf-8 -*- # This work was created by participants in the DataONE project, and is # jointly copyrighted by participating institutions in DataONE. For # more information on DataONE, see our web site at http://dataone.org. # # Copyright 2009-2016 DataONE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Database model utilities These are in a separate module because module classes can only be referenced in an active Django context. More general utilities can be used without an active context. Importing this module outside of Django context raises django.core.exceptions.AppRegistryNotReady: Apps aren't loaded yet. """ import logging import d1_gmn.app import d1_gmn.app.models def get_sci_model(pid): return d1_gmn.app.models.ScienceObject.objects.get(pid__did=pid) def get_pids_for_all_locally_stored_objects(): return d1_gmn.app.models.ScienceObject.objects.all().values_list( 'pid__did', flat=True ) def delete_unused_subjects(): """Delete any unused subjects from the database. This is not strictly required as any unused subjects will automatically be reused if needed in the future. """ # This causes Django to create a single join (check with query.query) query = d1_gmn.app.models.Subject.objects.all() query = query.filter(scienceobject_submitter__isnull=True) query = query.filter(scienceobject_rights_holder__isnull=True) query = query.filter(eventlog__isnull=True) query = query.filter(permission__isnull=True) query = query.filter(whitelistforcreateupdatedelete__isnull=True) logging.debug('Deleting {} unused subjects:'.format(query.count())) for s in query.all(): logging.debug(' {}'.format(s.subject)) query.delete()
Add utilities for working with Django models
Add utilities for working with Django models
Python
apache-2.0
DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python
Add utilities for working with Django models
# -*- coding: utf-8 -*- # This work was created by participants in the DataONE project, and is # jointly copyrighted by participating institutions in DataONE. For # more information on DataONE, see our web site at http://dataone.org. # # Copyright 2009-2016 DataONE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Database model utilities These are in a separate module because module classes can only be referenced in an active Django context. More general utilities can be used without an active context. Importing this module outside of Django context raises django.core.exceptions.AppRegistryNotReady: Apps aren't loaded yet. """ import logging import d1_gmn.app import d1_gmn.app.models def get_sci_model(pid): return d1_gmn.app.models.ScienceObject.objects.get(pid__did=pid) def get_pids_for_all_locally_stored_objects(): return d1_gmn.app.models.ScienceObject.objects.all().values_list( 'pid__did', flat=True ) def delete_unused_subjects(): """Delete any unused subjects from the database. This is not strictly required as any unused subjects will automatically be reused if needed in the future. """ # This causes Django to create a single join (check with query.query) query = d1_gmn.app.models.Subject.objects.all() query = query.filter(scienceobject_submitter__isnull=True) query = query.filter(scienceobject_rights_holder__isnull=True) query = query.filter(eventlog__isnull=True) query = query.filter(permission__isnull=True) query = query.filter(whitelistforcreateupdatedelete__isnull=True) logging.debug('Deleting {} unused subjects:'.format(query.count())) for s in query.all(): logging.debug(' {}'.format(s.subject)) query.delete()
<commit_before><commit_msg>Add utilities for working with Django models<commit_after>
# -*- coding: utf-8 -*- # This work was created by participants in the DataONE project, and is # jointly copyrighted by participating institutions in DataONE. For # more information on DataONE, see our web site at http://dataone.org. # # Copyright 2009-2016 DataONE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Database model utilities These are in a separate module because module classes can only be referenced in an active Django context. More general utilities can be used without an active context. Importing this module outside of Django context raises django.core.exceptions.AppRegistryNotReady: Apps aren't loaded yet. """ import logging import d1_gmn.app import d1_gmn.app.models def get_sci_model(pid): return d1_gmn.app.models.ScienceObject.objects.get(pid__did=pid) def get_pids_for_all_locally_stored_objects(): return d1_gmn.app.models.ScienceObject.objects.all().values_list( 'pid__did', flat=True ) def delete_unused_subjects(): """Delete any unused subjects from the database. This is not strictly required as any unused subjects will automatically be reused if needed in the future. """ # This causes Django to create a single join (check with query.query) query = d1_gmn.app.models.Subject.objects.all() query = query.filter(scienceobject_submitter__isnull=True) query = query.filter(scienceobject_rights_holder__isnull=True) query = query.filter(eventlog__isnull=True) query = query.filter(permission__isnull=True) query = query.filter(whitelistforcreateupdatedelete__isnull=True) logging.debug('Deleting {} unused subjects:'.format(query.count())) for s in query.all(): logging.debug(' {}'.format(s.subject)) query.delete()
Add utilities for working with Django models# -*- coding: utf-8 -*- # This work was created by participants in the DataONE project, and is # jointly copyrighted by participating institutions in DataONE. For # more information on DataONE, see our web site at http://dataone.org. # # Copyright 2009-2016 DataONE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Database model utilities These are in a separate module because module classes can only be referenced in an active Django context. More general utilities can be used without an active context. Importing this module outside of Django context raises django.core.exceptions.AppRegistryNotReady: Apps aren't loaded yet. """ import logging import d1_gmn.app import d1_gmn.app.models def get_sci_model(pid): return d1_gmn.app.models.ScienceObject.objects.get(pid__did=pid) def get_pids_for_all_locally_stored_objects(): return d1_gmn.app.models.ScienceObject.objects.all().values_list( 'pid__did', flat=True ) def delete_unused_subjects(): """Delete any unused subjects from the database. This is not strictly required as any unused subjects will automatically be reused if needed in the future. """ # This causes Django to create a single join (check with query.query) query = d1_gmn.app.models.Subject.objects.all() query = query.filter(scienceobject_submitter__isnull=True) query = query.filter(scienceobject_rights_holder__isnull=True) query = query.filter(eventlog__isnull=True) query = query.filter(permission__isnull=True) query = query.filter(whitelistforcreateupdatedelete__isnull=True) logging.debug('Deleting {} unused subjects:'.format(query.count())) for s in query.all(): logging.debug(' {}'.format(s.subject)) query.delete()
<commit_before><commit_msg>Add utilities for working with Django models<commit_after># -*- coding: utf-8 -*- # This work was created by participants in the DataONE project, and is # jointly copyrighted by participating institutions in DataONE. For # more information on DataONE, see our web site at http://dataone.org. # # Copyright 2009-2016 DataONE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Database model utilities These are in a separate module because module classes can only be referenced in an active Django context. More general utilities can be used without an active context. Importing this module outside of Django context raises django.core.exceptions.AppRegistryNotReady: Apps aren't loaded yet. """ import logging import d1_gmn.app import d1_gmn.app.models def get_sci_model(pid): return d1_gmn.app.models.ScienceObject.objects.get(pid__did=pid) def get_pids_for_all_locally_stored_objects(): return d1_gmn.app.models.ScienceObject.objects.all().values_list( 'pid__did', flat=True ) def delete_unused_subjects(): """Delete any unused subjects from the database. This is not strictly required as any unused subjects will automatically be reused if needed in the future. """ # This causes Django to create a single join (check with query.query) query = d1_gmn.app.models.Subject.objects.all() query = query.filter(scienceobject_submitter__isnull=True) query = query.filter(scienceobject_rights_holder__isnull=True) query = query.filter(eventlog__isnull=True) query = query.filter(permission__isnull=True) query = query.filter(whitelistforcreateupdatedelete__isnull=True) logging.debug('Deleting {} unused subjects:'.format(query.count())) for s in query.all(): logging.debug(' {}'.format(s.subject)) query.delete()
e55f72c62cefbbda7e0e213779dedb8ec51b9a5a
bottleneck/tests/memory_test.py
bottleneck/tests/memory_test.py
import numpy as np import sys import bottleneck as bn import pytest @pytest.mark.skipif( sys.platform.startswith("win"), reason="resource module not available on windows" ) def test_memory_leak(): import resource arr = np.arange(1).reshape((1, 1)) starting = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss for i in range(1000): for axis in [None, 0, 1]: bn.nansum(arr, axis=axis) bn.nanargmax(arr, axis=axis) bn.nanargmin(arr, axis=axis) bn.nanmedian(arr, axis=axis) bn.nansum(arr, axis=axis) bn.nanmean(arr, axis=axis) bn.nanmin(arr, axis=axis) bn.nanmax(arr, axis=axis) bn.nanvar(arr, axis=axis) ending = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss diff = ending - starting diff_bytes = diff * resource.getpagesize() print(diff_bytes) # For 1.3.0 release, this had value of ~100kB assert diff_bytes == 0
Add memory leak regression test
TST: Add memory leak regression test
Python
bsd-2-clause
pydata/bottleneck,pydata/bottleneck,kwgoodman/bottleneck,kwgoodman/bottleneck,pydata/bottleneck,kwgoodman/bottleneck
TST: Add memory leak regression test
import numpy as np import sys import bottleneck as bn import pytest @pytest.mark.skipif( sys.platform.startswith("win"), reason="resource module not available on windows" ) def test_memory_leak(): import resource arr = np.arange(1).reshape((1, 1)) starting = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss for i in range(1000): for axis in [None, 0, 1]: bn.nansum(arr, axis=axis) bn.nanargmax(arr, axis=axis) bn.nanargmin(arr, axis=axis) bn.nanmedian(arr, axis=axis) bn.nansum(arr, axis=axis) bn.nanmean(arr, axis=axis) bn.nanmin(arr, axis=axis) bn.nanmax(arr, axis=axis) bn.nanvar(arr, axis=axis) ending = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss diff = ending - starting diff_bytes = diff * resource.getpagesize() print(diff_bytes) # For 1.3.0 release, this had value of ~100kB assert diff_bytes == 0
<commit_before><commit_msg>TST: Add memory leak regression test<commit_after>
import numpy as np import sys import bottleneck as bn import pytest @pytest.mark.skipif( sys.platform.startswith("win"), reason="resource module not available on windows" ) def test_memory_leak(): import resource arr = np.arange(1).reshape((1, 1)) starting = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss for i in range(1000): for axis in [None, 0, 1]: bn.nansum(arr, axis=axis) bn.nanargmax(arr, axis=axis) bn.nanargmin(arr, axis=axis) bn.nanmedian(arr, axis=axis) bn.nansum(arr, axis=axis) bn.nanmean(arr, axis=axis) bn.nanmin(arr, axis=axis) bn.nanmax(arr, axis=axis) bn.nanvar(arr, axis=axis) ending = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss diff = ending - starting diff_bytes = diff * resource.getpagesize() print(diff_bytes) # For 1.3.0 release, this had value of ~100kB assert diff_bytes == 0
TST: Add memory leak regression testimport numpy as np import sys import bottleneck as bn import pytest @pytest.mark.skipif( sys.platform.startswith("win"), reason="resource module not available on windows" ) def test_memory_leak(): import resource arr = np.arange(1).reshape((1, 1)) starting = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss for i in range(1000): for axis in [None, 0, 1]: bn.nansum(arr, axis=axis) bn.nanargmax(arr, axis=axis) bn.nanargmin(arr, axis=axis) bn.nanmedian(arr, axis=axis) bn.nansum(arr, axis=axis) bn.nanmean(arr, axis=axis) bn.nanmin(arr, axis=axis) bn.nanmax(arr, axis=axis) bn.nanvar(arr, axis=axis) ending = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss diff = ending - starting diff_bytes = diff * resource.getpagesize() print(diff_bytes) # For 1.3.0 release, this had value of ~100kB assert diff_bytes == 0
<commit_before><commit_msg>TST: Add memory leak regression test<commit_after>import numpy as np import sys import bottleneck as bn import pytest @pytest.mark.skipif( sys.platform.startswith("win"), reason="resource module not available on windows" ) def test_memory_leak(): import resource arr = np.arange(1).reshape((1, 1)) starting = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss for i in range(1000): for axis in [None, 0, 1]: bn.nansum(arr, axis=axis) bn.nanargmax(arr, axis=axis) bn.nanargmin(arr, axis=axis) bn.nanmedian(arr, axis=axis) bn.nansum(arr, axis=axis) bn.nanmean(arr, axis=axis) bn.nanmin(arr, axis=axis) bn.nanmax(arr, axis=axis) bn.nanvar(arr, axis=axis) ending = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss diff = ending - starting diff_bytes = diff * resource.getpagesize() print(diff_bytes) # For 1.3.0 release, this had value of ~100kB assert diff_bytes == 0
635b8b61be67a2c0f38ec95b30d9848dcd30de58
test_primes.py
test_primes.py
#!/usr/bin/env python2 if __name__ == '__main__': from find_primes import find_primes assert find_primes(50) == [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47] print 'All tests passed!'
Add a simple test of the trial division algorithm.
Add a simple test of the trial division algorithm.
Python
mit
ipqb/bootcamp-primes-activity
Add a simple test of the trial division algorithm.
#!/usr/bin/env python2 if __name__ == '__main__': from find_primes import find_primes assert find_primes(50) == [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47] print 'All tests passed!'
<commit_before><commit_msg>Add a simple test of the trial division algorithm.<commit_after>
#!/usr/bin/env python2 if __name__ == '__main__': from find_primes import find_primes assert find_primes(50) == [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47] print 'All tests passed!'
Add a simple test of the trial division algorithm.#!/usr/bin/env python2 if __name__ == '__main__': from find_primes import find_primes assert find_primes(50) == [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47] print 'All tests passed!'
<commit_before><commit_msg>Add a simple test of the trial division algorithm.<commit_after>#!/usr/bin/env python2 if __name__ == '__main__': from find_primes import find_primes assert find_primes(50) == [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47] print 'All tests passed!'
dbf436758f4b55010dcea6cea4416741ce21df8a
tests/conftest.py
tests/conftest.py
# Copyright (c) 2022. The Pycroft Authors. See the AUTHORS file. # This file is part of the Pycroft project and licensed under the terms of # the Apache License, Version 2.0. See the LICENSE file for details from typing import cast import pytest from coverage.annotate import os from sqlalchemy import event from sqlalchemy.ext.declarative import DeferredReflection from sqlalchemy.future import create_engine from sqlalchemy.orm import scoped_session, sessionmaker from sqlalchemy.pool import SingletonThreadPool from pycroft.model import drop_db_model, create_db_model from pycroft.model.session import set_scoped_session, Session @pytest.fixture(scope='session') def engine(): try: uri = os.environ['PYCROFT_DB_URI'] except KeyError: raise RuntimeError("Environment variable PYCROFT_DB_URI must be " "set to an SQLalchemy connection string.") return create_engine(uri, poolclass=SingletonThreadPool, future=True) @pytest.fixture(scope='session') def clean_engine(engine): connection = engine.connect() drop_db_model(connection) create_db_model(connection) connection.commit() return engine @pytest.fixture def connection(clean_engine): engine = clean_engine connection = engine.connect() # this turns our connection into „transactional state“ # henceforth, every session binding to this connection will participate in this transaction. # see https://docs.sqlalchemy.org/en/14/orm/session_transaction.html#joining-a-session-into-an-external-transaction-such-as-for-test-suites transaction = connection.begin() # outer, non-ORM transaction DeferredReflection.prepare(engine) yield connection transaction.rollback() connection.close() @pytest.fixture() def session(connection): """Provides a session to a created database. Rolled back after use """ nested = connection.begin_nested() s = scoped_session(sessionmaker(bind=connection, future=True)) set_scoped_session(s) session = cast(Session, s()) # see the comment above @event.listens_for(session, "after_transaction_end") def end_savepoint(session, transaction): nonlocal nested if not nested.is_active: nested = connection.begin_nested() yield session # close_all_sessions() session.rollback() Session.remove() # if the transaction is still associated, this means it has e.g. pending trigger events. transaction_associated = nested.connection._transaction == nested if transaction_associated: nested.rollback()
Add `session` and `connection` fixtures
Add `session` and `connection` fixtures
Python
apache-2.0
agdsn/pycroft,agdsn/pycroft,agdsn/pycroft,agdsn/pycroft,agdsn/pycroft
Add `session` and `connection` fixtures
# Copyright (c) 2022. The Pycroft Authors. See the AUTHORS file. # This file is part of the Pycroft project and licensed under the terms of # the Apache License, Version 2.0. See the LICENSE file for details from typing import cast import pytest from coverage.annotate import os from sqlalchemy import event from sqlalchemy.ext.declarative import DeferredReflection from sqlalchemy.future import create_engine from sqlalchemy.orm import scoped_session, sessionmaker from sqlalchemy.pool import SingletonThreadPool from pycroft.model import drop_db_model, create_db_model from pycroft.model.session import set_scoped_session, Session @pytest.fixture(scope='session') def engine(): try: uri = os.environ['PYCROFT_DB_URI'] except KeyError: raise RuntimeError("Environment variable PYCROFT_DB_URI must be " "set to an SQLalchemy connection string.") return create_engine(uri, poolclass=SingletonThreadPool, future=True) @pytest.fixture(scope='session') def clean_engine(engine): connection = engine.connect() drop_db_model(connection) create_db_model(connection) connection.commit() return engine @pytest.fixture def connection(clean_engine): engine = clean_engine connection = engine.connect() # this turns our connection into „transactional state“ # henceforth, every session binding to this connection will participate in this transaction. # see https://docs.sqlalchemy.org/en/14/orm/session_transaction.html#joining-a-session-into-an-external-transaction-such-as-for-test-suites transaction = connection.begin() # outer, non-ORM transaction DeferredReflection.prepare(engine) yield connection transaction.rollback() connection.close() @pytest.fixture() def session(connection): """Provides a session to a created database. Rolled back after use """ nested = connection.begin_nested() s = scoped_session(sessionmaker(bind=connection, future=True)) set_scoped_session(s) session = cast(Session, s()) # see the comment above @event.listens_for(session, "after_transaction_end") def end_savepoint(session, transaction): nonlocal nested if not nested.is_active: nested = connection.begin_nested() yield session # close_all_sessions() session.rollback() Session.remove() # if the transaction is still associated, this means it has e.g. pending trigger events. transaction_associated = nested.connection._transaction == nested if transaction_associated: nested.rollback()
<commit_before><commit_msg>Add `session` and `connection` fixtures<commit_after>
# Copyright (c) 2022. The Pycroft Authors. See the AUTHORS file. # This file is part of the Pycroft project and licensed under the terms of # the Apache License, Version 2.0. See the LICENSE file for details from typing import cast import pytest from coverage.annotate import os from sqlalchemy import event from sqlalchemy.ext.declarative import DeferredReflection from sqlalchemy.future import create_engine from sqlalchemy.orm import scoped_session, sessionmaker from sqlalchemy.pool import SingletonThreadPool from pycroft.model import drop_db_model, create_db_model from pycroft.model.session import set_scoped_session, Session @pytest.fixture(scope='session') def engine(): try: uri = os.environ['PYCROFT_DB_URI'] except KeyError: raise RuntimeError("Environment variable PYCROFT_DB_URI must be " "set to an SQLalchemy connection string.") return create_engine(uri, poolclass=SingletonThreadPool, future=True) @pytest.fixture(scope='session') def clean_engine(engine): connection = engine.connect() drop_db_model(connection) create_db_model(connection) connection.commit() return engine @pytest.fixture def connection(clean_engine): engine = clean_engine connection = engine.connect() # this turns our connection into „transactional state“ # henceforth, every session binding to this connection will participate in this transaction. # see https://docs.sqlalchemy.org/en/14/orm/session_transaction.html#joining-a-session-into-an-external-transaction-such-as-for-test-suites transaction = connection.begin() # outer, non-ORM transaction DeferredReflection.prepare(engine) yield connection transaction.rollback() connection.close() @pytest.fixture() def session(connection): """Provides a session to a created database. Rolled back after use """ nested = connection.begin_nested() s = scoped_session(sessionmaker(bind=connection, future=True)) set_scoped_session(s) session = cast(Session, s()) # see the comment above @event.listens_for(session, "after_transaction_end") def end_savepoint(session, transaction): nonlocal nested if not nested.is_active: nested = connection.begin_nested() yield session # close_all_sessions() session.rollback() Session.remove() # if the transaction is still associated, this means it has e.g. pending trigger events. transaction_associated = nested.connection._transaction == nested if transaction_associated: nested.rollback()
Add `session` and `connection` fixtures# Copyright (c) 2022. The Pycroft Authors. See the AUTHORS file. # This file is part of the Pycroft project and licensed under the terms of # the Apache License, Version 2.0. See the LICENSE file for details from typing import cast import pytest from coverage.annotate import os from sqlalchemy import event from sqlalchemy.ext.declarative import DeferredReflection from sqlalchemy.future import create_engine from sqlalchemy.orm import scoped_session, sessionmaker from sqlalchemy.pool import SingletonThreadPool from pycroft.model import drop_db_model, create_db_model from pycroft.model.session import set_scoped_session, Session @pytest.fixture(scope='session') def engine(): try: uri = os.environ['PYCROFT_DB_URI'] except KeyError: raise RuntimeError("Environment variable PYCROFT_DB_URI must be " "set to an SQLalchemy connection string.") return create_engine(uri, poolclass=SingletonThreadPool, future=True) @pytest.fixture(scope='session') def clean_engine(engine): connection = engine.connect() drop_db_model(connection) create_db_model(connection) connection.commit() return engine @pytest.fixture def connection(clean_engine): engine = clean_engine connection = engine.connect() # this turns our connection into „transactional state“ # henceforth, every session binding to this connection will participate in this transaction. # see https://docs.sqlalchemy.org/en/14/orm/session_transaction.html#joining-a-session-into-an-external-transaction-such-as-for-test-suites transaction = connection.begin() # outer, non-ORM transaction DeferredReflection.prepare(engine) yield connection transaction.rollback() connection.close() @pytest.fixture() def session(connection): """Provides a session to a created database. Rolled back after use """ nested = connection.begin_nested() s = scoped_session(sessionmaker(bind=connection, future=True)) set_scoped_session(s) session = cast(Session, s()) # see the comment above @event.listens_for(session, "after_transaction_end") def end_savepoint(session, transaction): nonlocal nested if not nested.is_active: nested = connection.begin_nested() yield session # close_all_sessions() session.rollback() Session.remove() # if the transaction is still associated, this means it has e.g. pending trigger events. transaction_associated = nested.connection._transaction == nested if transaction_associated: nested.rollback()
<commit_before><commit_msg>Add `session` and `connection` fixtures<commit_after># Copyright (c) 2022. The Pycroft Authors. See the AUTHORS file. # This file is part of the Pycroft project and licensed under the terms of # the Apache License, Version 2.0. See the LICENSE file for details from typing import cast import pytest from coverage.annotate import os from sqlalchemy import event from sqlalchemy.ext.declarative import DeferredReflection from sqlalchemy.future import create_engine from sqlalchemy.orm import scoped_session, sessionmaker from sqlalchemy.pool import SingletonThreadPool from pycroft.model import drop_db_model, create_db_model from pycroft.model.session import set_scoped_session, Session @pytest.fixture(scope='session') def engine(): try: uri = os.environ['PYCROFT_DB_URI'] except KeyError: raise RuntimeError("Environment variable PYCROFT_DB_URI must be " "set to an SQLalchemy connection string.") return create_engine(uri, poolclass=SingletonThreadPool, future=True) @pytest.fixture(scope='session') def clean_engine(engine): connection = engine.connect() drop_db_model(connection) create_db_model(connection) connection.commit() return engine @pytest.fixture def connection(clean_engine): engine = clean_engine connection = engine.connect() # this turns our connection into „transactional state“ # henceforth, every session binding to this connection will participate in this transaction. # see https://docs.sqlalchemy.org/en/14/orm/session_transaction.html#joining-a-session-into-an-external-transaction-such-as-for-test-suites transaction = connection.begin() # outer, non-ORM transaction DeferredReflection.prepare(engine) yield connection transaction.rollback() connection.close() @pytest.fixture() def session(connection): """Provides a session to a created database. Rolled back after use """ nested = connection.begin_nested() s = scoped_session(sessionmaker(bind=connection, future=True)) set_scoped_session(s) session = cast(Session, s()) # see the comment above @event.listens_for(session, "after_transaction_end") def end_savepoint(session, transaction): nonlocal nested if not nested.is_active: nested = connection.begin_nested() yield session # close_all_sessions() session.rollback() Session.remove() # if the transaction is still associated, this means it has e.g. pending trigger events. transaction_associated = nested.connection._transaction == nested if transaction_associated: nested.rollback()
087abbfa1f45f76cc86e4fc658d231af8a949264
tests/test_env.py
tests/test_env.py
import sys from botbot import env, envchecks def test_EnvironmentChecker_constructor(): assert env.EnvironmentChecker(sys.stdout) def test_env_check_constructor_register(): ec = env.EnvironmentChecker(sys.stdout, envchecks.ALLENVCHECKS) assert ec.checks def test_env_check_register_multiple_args(): ec = env.EnvironmentChecker(sys.stdout) ec.register(*envchecks.ALLENVCHECKS) assert len(ec.checks) == 2 def test_env_check_single_iter_arg(): ec = env.EnvironmentChecker(sys.stdout) ec.register(envchecks.ALLENVCHECKS) assert len(ec.checks) == 2
Add tests for EnvironmentChecker object
Add tests for EnvironmentChecker object
Python
mit
jackstanek/BotBot,jackstanek/BotBot
Add tests for EnvironmentChecker object
import sys from botbot import env, envchecks def test_EnvironmentChecker_constructor(): assert env.EnvironmentChecker(sys.stdout) def test_env_check_constructor_register(): ec = env.EnvironmentChecker(sys.stdout, envchecks.ALLENVCHECKS) assert ec.checks def test_env_check_register_multiple_args(): ec = env.EnvironmentChecker(sys.stdout) ec.register(*envchecks.ALLENVCHECKS) assert len(ec.checks) == 2 def test_env_check_single_iter_arg(): ec = env.EnvironmentChecker(sys.stdout) ec.register(envchecks.ALLENVCHECKS) assert len(ec.checks) == 2
<commit_before><commit_msg>Add tests for EnvironmentChecker object<commit_after>
import sys from botbot import env, envchecks def test_EnvironmentChecker_constructor(): assert env.EnvironmentChecker(sys.stdout) def test_env_check_constructor_register(): ec = env.EnvironmentChecker(sys.stdout, envchecks.ALLENVCHECKS) assert ec.checks def test_env_check_register_multiple_args(): ec = env.EnvironmentChecker(sys.stdout) ec.register(*envchecks.ALLENVCHECKS) assert len(ec.checks) == 2 def test_env_check_single_iter_arg(): ec = env.EnvironmentChecker(sys.stdout) ec.register(envchecks.ALLENVCHECKS) assert len(ec.checks) == 2
Add tests for EnvironmentChecker objectimport sys from botbot import env, envchecks def test_EnvironmentChecker_constructor(): assert env.EnvironmentChecker(sys.stdout) def test_env_check_constructor_register(): ec = env.EnvironmentChecker(sys.stdout, envchecks.ALLENVCHECKS) assert ec.checks def test_env_check_register_multiple_args(): ec = env.EnvironmentChecker(sys.stdout) ec.register(*envchecks.ALLENVCHECKS) assert len(ec.checks) == 2 def test_env_check_single_iter_arg(): ec = env.EnvironmentChecker(sys.stdout) ec.register(envchecks.ALLENVCHECKS) assert len(ec.checks) == 2
<commit_before><commit_msg>Add tests for EnvironmentChecker object<commit_after>import sys from botbot import env, envchecks def test_EnvironmentChecker_constructor(): assert env.EnvironmentChecker(sys.stdout) def test_env_check_constructor_register(): ec = env.EnvironmentChecker(sys.stdout, envchecks.ALLENVCHECKS) assert ec.checks def test_env_check_register_multiple_args(): ec = env.EnvironmentChecker(sys.stdout) ec.register(*envchecks.ALLENVCHECKS) assert len(ec.checks) == 2 def test_env_check_single_iter_arg(): ec = env.EnvironmentChecker(sys.stdout) ec.register(envchecks.ALLENVCHECKS) assert len(ec.checks) == 2
77cb10ac2d524c43f43387d6c62f757c3e37fbbe
py/maximum-swap.py
py/maximum-swap.py
class Solution(object): def maximumSwap(self, num): """ :type num: int :rtype: int """ counter = [0] * 10 snum = str(num) lnum = len(snum) for d in map(int, snum): counter[d] += 1 for i, d in enumerate(map(int, snum)): for di in xrange(9, d, -1): if counter[di] > 0: for j in xrange(lnum - 1, i, -1): if int(snum[j]) == di: return int(snum[:i] + snum[j] + snum[i + 1:j] + snum[i] + snum[j + 1:]) counter[d] -= 1 return num
Add py solution for 670. Maximum Swap
Add py solution for 670. Maximum Swap 670. Maximum Swap: https://leetcode.com/problems/maximum-swap/
Python
apache-2.0
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
Add py solution for 670. Maximum Swap 670. Maximum Swap: https://leetcode.com/problems/maximum-swap/
class Solution(object): def maximumSwap(self, num): """ :type num: int :rtype: int """ counter = [0] * 10 snum = str(num) lnum = len(snum) for d in map(int, snum): counter[d] += 1 for i, d in enumerate(map(int, snum)): for di in xrange(9, d, -1): if counter[di] > 0: for j in xrange(lnum - 1, i, -1): if int(snum[j]) == di: return int(snum[:i] + snum[j] + snum[i + 1:j] + snum[i] + snum[j + 1:]) counter[d] -= 1 return num
<commit_before><commit_msg>Add py solution for 670. Maximum Swap 670. Maximum Swap: https://leetcode.com/problems/maximum-swap/<commit_after>
class Solution(object): def maximumSwap(self, num): """ :type num: int :rtype: int """ counter = [0] * 10 snum = str(num) lnum = len(snum) for d in map(int, snum): counter[d] += 1 for i, d in enumerate(map(int, snum)): for di in xrange(9, d, -1): if counter[di] > 0: for j in xrange(lnum - 1, i, -1): if int(snum[j]) == di: return int(snum[:i] + snum[j] + snum[i + 1:j] + snum[i] + snum[j + 1:]) counter[d] -= 1 return num
Add py solution for 670. Maximum Swap 670. Maximum Swap: https://leetcode.com/problems/maximum-swap/class Solution(object): def maximumSwap(self, num): """ :type num: int :rtype: int """ counter = [0] * 10 snum = str(num) lnum = len(snum) for d in map(int, snum): counter[d] += 1 for i, d in enumerate(map(int, snum)): for di in xrange(9, d, -1): if counter[di] > 0: for j in xrange(lnum - 1, i, -1): if int(snum[j]) == di: return int(snum[:i] + snum[j] + snum[i + 1:j] + snum[i] + snum[j + 1:]) counter[d] -= 1 return num
<commit_before><commit_msg>Add py solution for 670. Maximum Swap 670. Maximum Swap: https://leetcode.com/problems/maximum-swap/<commit_after>class Solution(object): def maximumSwap(self, num): """ :type num: int :rtype: int """ counter = [0] * 10 snum = str(num) lnum = len(snum) for d in map(int, snum): counter[d] += 1 for i, d in enumerate(map(int, snum)): for di in xrange(9, d, -1): if counter[di] > 0: for j in xrange(lnum - 1, i, -1): if int(snum[j]) == di: return int(snum[:i] + snum[j] + snum[i + 1:j] + snum[i] + snum[j + 1:]) counter[d] -= 1 return num
54749556c7c689bfd2bee85f60a69676cc74035b
capomastro_wsgi.py
capomastro_wsgi.py
""" WSGI config for capomastro project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/ """ import os app_root = os.path.dirname(os.path.realpath(__file__)) activate_this = os.path.join(app_root, 'bin', 'activate_this.py') execfile(activate_this, dict(__file__=activate_this)) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "capomastro.settings") from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
Add temporary bootstrapping to get the service working with how the charm presently expects to run the app.
Add temporary bootstrapping to get the service working with how the charm presently expects to run the app.
Python
mit
timrchavez/capomastro,timrchavez/capomastro
Add temporary bootstrapping to get the service working with how the charm presently expects to run the app.
""" WSGI config for capomastro project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/ """ import os app_root = os.path.dirname(os.path.realpath(__file__)) activate_this = os.path.join(app_root, 'bin', 'activate_this.py') execfile(activate_this, dict(__file__=activate_this)) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "capomastro.settings") from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
<commit_before><commit_msg>Add temporary bootstrapping to get the service working with how the charm presently expects to run the app.<commit_after>
""" WSGI config for capomastro project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/ """ import os app_root = os.path.dirname(os.path.realpath(__file__)) activate_this = os.path.join(app_root, 'bin', 'activate_this.py') execfile(activate_this, dict(__file__=activate_this)) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "capomastro.settings") from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
Add temporary bootstrapping to get the service working with how the charm presently expects to run the app.""" WSGI config for capomastro project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/ """ import os app_root = os.path.dirname(os.path.realpath(__file__)) activate_this = os.path.join(app_root, 'bin', 'activate_this.py') execfile(activate_this, dict(__file__=activate_this)) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "capomastro.settings") from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
<commit_before><commit_msg>Add temporary bootstrapping to get the service working with how the charm presently expects to run the app.<commit_after>""" WSGI config for capomastro project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/ """ import os app_root = os.path.dirname(os.path.realpath(__file__)) activate_this = os.path.join(app_root, 'bin', 'activate_this.py') execfile(activate_this, dict(__file__=activate_this)) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "capomastro.settings") from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
186e8424a16188825210b4c05656ae3b8fcc28c2
provpy/provpyexample_Elements.py
provpy/provpyexample_Elements.py
import json from provpy import * # Define your namespaces (see provpyexample_PROVURIRef_PROVNamespace.py) FOAF = PROVNamespace("http://xmlns.com/foaf/0.1/") ex = PROVNamespace("http://www.example.com/") dcterms = PROVNamespace("http://purl.org/dc/terms/") xsd = PROVNamespace('http://www.w3.org/2001/XMLSchema-datatypes#') prov = PROVNamespace("http://www.w3.org/ns/prov-dm/") # create a provenance container examplegraph = PROVContainer() # Set the default namespace name examplegraph.set_default_namespace("http://www.example.com/") #add the other namespaces into the container examplegraph.add_namespace("dcterms","http://purl.org/dc/terms/") examplegraph.add_namespace("foaf","http://xmlns.com/foaf/0.1/") # add entities, first define the attributes in a dictionary attrdict = {"type": "File", ex["path"]: "/shared/crime.txt", ex["creator"]: "Alice"} # then create the entity # If you give the id as a string, it will be treated as a localname # under the default namespace e0 = Entity(id="e0",attributes=attrdict) # you can then add the entity into the provenance container examplegraph.add(e0) # define the attributes for the next entity lit0 = PROVLiteral("2011-11-16T16:06:00",xsd["dateTime"]) attrdict ={prov["type"]: ex["File"], ex["path"]: "/shared/crime.txt", dcterms["creator"]: FOAF['Alice'], ex["content"]: "", dcterms["create"]: lit0} # create the entity, note this time we give the id as a PROVURIRef e1 = Entity(FOAF['Foo'],attributes=attrdict) examplegraph.add(e1) # add activities # You can give the attributes during the creation if there are not many a0 = Activity(id=None,starttime=datetime.datetime(2008, 7, 6, 5, 4, 3),attributes={prov["recipeLink"]: ex["create-file"]}) examplegraph.add(a0) # You can have the JSON of the container with the to_provJSON() function # as container is also a provenance record print json.dumps(examplegraph.to_provJSON(),indent=4)
Add an example file demonstrating defining, adding and serializing prov elements in a prov container
Add an example file demonstrating defining, adding and serializing prov elements in a prov container
Python
mit
satra/prov,krischer/prov,trungdong/prov,krischer/prov
Add an example file demonstrating defining, adding and serializing prov elements in a prov container
import json from provpy import * # Define your namespaces (see provpyexample_PROVURIRef_PROVNamespace.py) FOAF = PROVNamespace("http://xmlns.com/foaf/0.1/") ex = PROVNamespace("http://www.example.com/") dcterms = PROVNamespace("http://purl.org/dc/terms/") xsd = PROVNamespace('http://www.w3.org/2001/XMLSchema-datatypes#') prov = PROVNamespace("http://www.w3.org/ns/prov-dm/") # create a provenance container examplegraph = PROVContainer() # Set the default namespace name examplegraph.set_default_namespace("http://www.example.com/") #add the other namespaces into the container examplegraph.add_namespace("dcterms","http://purl.org/dc/terms/") examplegraph.add_namespace("foaf","http://xmlns.com/foaf/0.1/") # add entities, first define the attributes in a dictionary attrdict = {"type": "File", ex["path"]: "/shared/crime.txt", ex["creator"]: "Alice"} # then create the entity # If you give the id as a string, it will be treated as a localname # under the default namespace e0 = Entity(id="e0",attributes=attrdict) # you can then add the entity into the provenance container examplegraph.add(e0) # define the attributes for the next entity lit0 = PROVLiteral("2011-11-16T16:06:00",xsd["dateTime"]) attrdict ={prov["type"]: ex["File"], ex["path"]: "/shared/crime.txt", dcterms["creator"]: FOAF['Alice'], ex["content"]: "", dcterms["create"]: lit0} # create the entity, note this time we give the id as a PROVURIRef e1 = Entity(FOAF['Foo'],attributes=attrdict) examplegraph.add(e1) # add activities # You can give the attributes during the creation if there are not many a0 = Activity(id=None,starttime=datetime.datetime(2008, 7, 6, 5, 4, 3),attributes={prov["recipeLink"]: ex["create-file"]}) examplegraph.add(a0) # You can have the JSON of the container with the to_provJSON() function # as container is also a provenance record print json.dumps(examplegraph.to_provJSON(),indent=4)
<commit_before><commit_msg>Add an example file demonstrating defining, adding and serializing prov elements in a prov container<commit_after>
import json from provpy import * # Define your namespaces (see provpyexample_PROVURIRef_PROVNamespace.py) FOAF = PROVNamespace("http://xmlns.com/foaf/0.1/") ex = PROVNamespace("http://www.example.com/") dcterms = PROVNamespace("http://purl.org/dc/terms/") xsd = PROVNamespace('http://www.w3.org/2001/XMLSchema-datatypes#') prov = PROVNamespace("http://www.w3.org/ns/prov-dm/") # create a provenance container examplegraph = PROVContainer() # Set the default namespace name examplegraph.set_default_namespace("http://www.example.com/") #add the other namespaces into the container examplegraph.add_namespace("dcterms","http://purl.org/dc/terms/") examplegraph.add_namespace("foaf","http://xmlns.com/foaf/0.1/") # add entities, first define the attributes in a dictionary attrdict = {"type": "File", ex["path"]: "/shared/crime.txt", ex["creator"]: "Alice"} # then create the entity # If you give the id as a string, it will be treated as a localname # under the default namespace e0 = Entity(id="e0",attributes=attrdict) # you can then add the entity into the provenance container examplegraph.add(e0) # define the attributes for the next entity lit0 = PROVLiteral("2011-11-16T16:06:00",xsd["dateTime"]) attrdict ={prov["type"]: ex["File"], ex["path"]: "/shared/crime.txt", dcterms["creator"]: FOAF['Alice'], ex["content"]: "", dcterms["create"]: lit0} # create the entity, note this time we give the id as a PROVURIRef e1 = Entity(FOAF['Foo'],attributes=attrdict) examplegraph.add(e1) # add activities # You can give the attributes during the creation if there are not many a0 = Activity(id=None,starttime=datetime.datetime(2008, 7, 6, 5, 4, 3),attributes={prov["recipeLink"]: ex["create-file"]}) examplegraph.add(a0) # You can have the JSON of the container with the to_provJSON() function # as container is also a provenance record print json.dumps(examplegraph.to_provJSON(),indent=4)
Add an example file demonstrating defining, adding and serializing prov elements in a prov containerimport json from provpy import * # Define your namespaces (see provpyexample_PROVURIRef_PROVNamespace.py) FOAF = PROVNamespace("http://xmlns.com/foaf/0.1/") ex = PROVNamespace("http://www.example.com/") dcterms = PROVNamespace("http://purl.org/dc/terms/") xsd = PROVNamespace('http://www.w3.org/2001/XMLSchema-datatypes#') prov = PROVNamespace("http://www.w3.org/ns/prov-dm/") # create a provenance container examplegraph = PROVContainer() # Set the default namespace name examplegraph.set_default_namespace("http://www.example.com/") #add the other namespaces into the container examplegraph.add_namespace("dcterms","http://purl.org/dc/terms/") examplegraph.add_namespace("foaf","http://xmlns.com/foaf/0.1/") # add entities, first define the attributes in a dictionary attrdict = {"type": "File", ex["path"]: "/shared/crime.txt", ex["creator"]: "Alice"} # then create the entity # If you give the id as a string, it will be treated as a localname # under the default namespace e0 = Entity(id="e0",attributes=attrdict) # you can then add the entity into the provenance container examplegraph.add(e0) # define the attributes for the next entity lit0 = PROVLiteral("2011-11-16T16:06:00",xsd["dateTime"]) attrdict ={prov["type"]: ex["File"], ex["path"]: "/shared/crime.txt", dcterms["creator"]: FOAF['Alice'], ex["content"]: "", dcterms["create"]: lit0} # create the entity, note this time we give the id as a PROVURIRef e1 = Entity(FOAF['Foo'],attributes=attrdict) examplegraph.add(e1) # add activities # You can give the attributes during the creation if there are not many a0 = Activity(id=None,starttime=datetime.datetime(2008, 7, 6, 5, 4, 3),attributes={prov["recipeLink"]: ex["create-file"]}) examplegraph.add(a0) # You can have the JSON of the container with the to_provJSON() function # as container is also a provenance record print json.dumps(examplegraph.to_provJSON(),indent=4)
<commit_before><commit_msg>Add an example file demonstrating defining, adding and serializing prov elements in a prov container<commit_after>import json from provpy import * # Define your namespaces (see provpyexample_PROVURIRef_PROVNamespace.py) FOAF = PROVNamespace("http://xmlns.com/foaf/0.1/") ex = PROVNamespace("http://www.example.com/") dcterms = PROVNamespace("http://purl.org/dc/terms/") xsd = PROVNamespace('http://www.w3.org/2001/XMLSchema-datatypes#') prov = PROVNamespace("http://www.w3.org/ns/prov-dm/") # create a provenance container examplegraph = PROVContainer() # Set the default namespace name examplegraph.set_default_namespace("http://www.example.com/") #add the other namespaces into the container examplegraph.add_namespace("dcterms","http://purl.org/dc/terms/") examplegraph.add_namespace("foaf","http://xmlns.com/foaf/0.1/") # add entities, first define the attributes in a dictionary attrdict = {"type": "File", ex["path"]: "/shared/crime.txt", ex["creator"]: "Alice"} # then create the entity # If you give the id as a string, it will be treated as a localname # under the default namespace e0 = Entity(id="e0",attributes=attrdict) # you can then add the entity into the provenance container examplegraph.add(e0) # define the attributes for the next entity lit0 = PROVLiteral("2011-11-16T16:06:00",xsd["dateTime"]) attrdict ={prov["type"]: ex["File"], ex["path"]: "/shared/crime.txt", dcterms["creator"]: FOAF['Alice'], ex["content"]: "", dcterms["create"]: lit0} # create the entity, note this time we give the id as a PROVURIRef e1 = Entity(FOAF['Foo'],attributes=attrdict) examplegraph.add(e1) # add activities # You can give the attributes during the creation if there are not many a0 = Activity(id=None,starttime=datetime.datetime(2008, 7, 6, 5, 4, 3),attributes={prov["recipeLink"]: ex["create-file"]}) examplegraph.add(a0) # You can have the JSON of the container with the to_provJSON() function # as container is also a provenance record print json.dumps(examplegraph.to_provJSON(),indent=4)
c44c26949b896840fe22e4f947decee1ac7b2382
examples/customizable_hotkey.py
examples/customizable_hotkey.py
import keyboard print('Press and release your desired shortcut: ') shortcut = keyboard.read_shortcut() print('Shortcut selected:', shortcut) def on_triggered(): print("Triggered!") keyboard.add_hotkey(shortcut, on_triggered) print("Press ESC to stop.") keyboard.wait('esc')
Add example for customizable hotkeys
Add example for customizable hotkeys
Python
mit
glitchassassin/keyboard,boppreh/keyboard
Add example for customizable hotkeys
import keyboard print('Press and release your desired shortcut: ') shortcut = keyboard.read_shortcut() print('Shortcut selected:', shortcut) def on_triggered(): print("Triggered!") keyboard.add_hotkey(shortcut, on_triggered) print("Press ESC to stop.") keyboard.wait('esc')
<commit_before><commit_msg>Add example for customizable hotkeys<commit_after>
import keyboard print('Press and release your desired shortcut: ') shortcut = keyboard.read_shortcut() print('Shortcut selected:', shortcut) def on_triggered(): print("Triggered!") keyboard.add_hotkey(shortcut, on_triggered) print("Press ESC to stop.") keyboard.wait('esc')
Add example for customizable hotkeysimport keyboard print('Press and release your desired shortcut: ') shortcut = keyboard.read_shortcut() print('Shortcut selected:', shortcut) def on_triggered(): print("Triggered!") keyboard.add_hotkey(shortcut, on_triggered) print("Press ESC to stop.") keyboard.wait('esc')
<commit_before><commit_msg>Add example for customizable hotkeys<commit_after>import keyboard print('Press and release your desired shortcut: ') shortcut = keyboard.read_shortcut() print('Shortcut selected:', shortcut) def on_triggered(): print("Triggered!") keyboard.add_hotkey(shortcut, on_triggered) print("Press ESC to stop.") keyboard.wait('esc')
e68e9bc397d760a0d16acc695a1815141ad3658c
elmo/moon_tracker/utils.py
elmo/moon_tracker/utils.py
def user_can_view_scans(user, moon): return ( user_can_delete_scans(user, moon) or user.has_perm('eve_sde.can_view_scans', moon.planet.system) or user.has_perm('eve_sde.can_view_scans', moon.planet.system.constellation) or user.has_perm('eve_sde.can_view_scans', moon.planet.system.constellation.region) ) def user_can_add_scans(user, moon): return ( user_can_delete_scans(user, moon) or user.has_perm('eve_sde.can_add_scans', moon.planet.system) or user.has_perm('eve_sde.can_add_scans', moon.planet.system.constellation) or user.has_perm('eve_sde.can_add_scans', moon.planet.system.constellation.region) ) def user_can_delete_scans(user, moon): return ( user.has_perm('eve_sde.can_delete_scans', moon.planet.system) or user.has_perm('eve_sde.can_delete_scans', moon.planet.system.constellation) or user.has_perm('eve_sde.can_delete_scans', moon.planet.system.constellation.region) )
Add helper functions for permissions.
Add helper functions for permissions.
Python
mit
StephenSwat/eve_lunar_mining_organiser,StephenSwat/eve_lunar_mining_organiser
Add helper functions for permissions.
def user_can_view_scans(user, moon): return ( user_can_delete_scans(user, moon) or user.has_perm('eve_sde.can_view_scans', moon.planet.system) or user.has_perm('eve_sde.can_view_scans', moon.planet.system.constellation) or user.has_perm('eve_sde.can_view_scans', moon.planet.system.constellation.region) ) def user_can_add_scans(user, moon): return ( user_can_delete_scans(user, moon) or user.has_perm('eve_sde.can_add_scans', moon.planet.system) or user.has_perm('eve_sde.can_add_scans', moon.planet.system.constellation) or user.has_perm('eve_sde.can_add_scans', moon.planet.system.constellation.region) ) def user_can_delete_scans(user, moon): return ( user.has_perm('eve_sde.can_delete_scans', moon.planet.system) or user.has_perm('eve_sde.can_delete_scans', moon.planet.system.constellation) or user.has_perm('eve_sde.can_delete_scans', moon.planet.system.constellation.region) )
<commit_before><commit_msg>Add helper functions for permissions.<commit_after>
def user_can_view_scans(user, moon): return ( user_can_delete_scans(user, moon) or user.has_perm('eve_sde.can_view_scans', moon.planet.system) or user.has_perm('eve_sde.can_view_scans', moon.planet.system.constellation) or user.has_perm('eve_sde.can_view_scans', moon.planet.system.constellation.region) ) def user_can_add_scans(user, moon): return ( user_can_delete_scans(user, moon) or user.has_perm('eve_sde.can_add_scans', moon.planet.system) or user.has_perm('eve_sde.can_add_scans', moon.planet.system.constellation) or user.has_perm('eve_sde.can_add_scans', moon.planet.system.constellation.region) ) def user_can_delete_scans(user, moon): return ( user.has_perm('eve_sde.can_delete_scans', moon.planet.system) or user.has_perm('eve_sde.can_delete_scans', moon.planet.system.constellation) or user.has_perm('eve_sde.can_delete_scans', moon.planet.system.constellation.region) )
Add helper functions for permissions.def user_can_view_scans(user, moon): return ( user_can_delete_scans(user, moon) or user.has_perm('eve_sde.can_view_scans', moon.planet.system) or user.has_perm('eve_sde.can_view_scans', moon.planet.system.constellation) or user.has_perm('eve_sde.can_view_scans', moon.planet.system.constellation.region) ) def user_can_add_scans(user, moon): return ( user_can_delete_scans(user, moon) or user.has_perm('eve_sde.can_add_scans', moon.planet.system) or user.has_perm('eve_sde.can_add_scans', moon.planet.system.constellation) or user.has_perm('eve_sde.can_add_scans', moon.planet.system.constellation.region) ) def user_can_delete_scans(user, moon): return ( user.has_perm('eve_sde.can_delete_scans', moon.planet.system) or user.has_perm('eve_sde.can_delete_scans', moon.planet.system.constellation) or user.has_perm('eve_sde.can_delete_scans', moon.planet.system.constellation.region) )
<commit_before><commit_msg>Add helper functions for permissions.<commit_after>def user_can_view_scans(user, moon): return ( user_can_delete_scans(user, moon) or user.has_perm('eve_sde.can_view_scans', moon.planet.system) or user.has_perm('eve_sde.can_view_scans', moon.planet.system.constellation) or user.has_perm('eve_sde.can_view_scans', moon.planet.system.constellation.region) ) def user_can_add_scans(user, moon): return ( user_can_delete_scans(user, moon) or user.has_perm('eve_sde.can_add_scans', moon.planet.system) or user.has_perm('eve_sde.can_add_scans', moon.planet.system.constellation) or user.has_perm('eve_sde.can_add_scans', moon.planet.system.constellation.region) ) def user_can_delete_scans(user, moon): return ( user.has_perm('eve_sde.can_delete_scans', moon.planet.system) or user.has_perm('eve_sde.can_delete_scans', moon.planet.system.constellation) or user.has_perm('eve_sde.can_delete_scans', moon.planet.system.constellation.region) )
10f5e0585d471fdfb79181956baaa83d3dac5008
test/test_match.py
test/test_match.py
# encoding: utf-8 """ Tests for the udiskie.match module. These tests are intended to demonstrate and ensure the correct usage of the config file used by udiskie for custom device options. """ import unittest import tempfile import shutil import os.path import gc from udiskie.match import OptionFilter, FilterMatcher class TestDev(object): def __init__(self, object_path, id_type, id_uuid): self.object_path = object_path self.id_type = id_type self.id_uuid = id_uuid class TestFilterMatcher(unittest.TestCase): """ Tests for the udiskie.match.FilterMatcher class. """ def setUp(self): """Create a temporary config file.""" self.base = tempfile.mkdtemp() self.config_file = os.path.join(self.base, 'filters.conf') with open(self.config_file, 'wt') as f: f.write(''' [mount_options] uuid.ignored-device = __ignore__ uuid.device-with-options = noatime,nouser fstype.vfat = ro,nouser''') self.filter_matcher = FilterMatcher.from_config_file(self.config_file) def tearDown(self): """Remove the config file.""" gc.collect() shutil.rmtree(self.base) def test_ignored(self): """Test the FilterMatcher.is_ignored() method.""" self.assertTrue( self.filter_matcher.is_ignored( TestDev('/ignore', 'vfat', 'ignored-device'))) self.assertFalse( self.filter_matcher.is_ignored( TestDev('/options', 'vfat', 'device-with-options'))) self.assertFalse( self.filter_matcher.is_ignored( TestDev('/nomatch', 'vfat', 'no-matching-id'))) def test_options(self): """Test the FilterMatcher.get_mount_options() method.""" self.assertItemsEqual( ['noatime', 'ro', 'nouser'], self.filter_matcher.get_mount_options( TestDev('/options', 'vfat', 'device-with-options'))) self.assertItemsEqual( ['noatime', 'nouser'], self.filter_matcher.get_mount_options( TestDev('/optonly', 'ext', 'device-with-options'))) self.assertItemsEqual( ['ro', 'nouser'], self.filter_matcher.get_mount_options( TestDev('/fsonly', 'vfat', 'no-matching-id'))) self.assertItemsEqual( [], self.filter_matcher.get_mount_options( TestDev('/nomatch', 'ext', 'no-matching-id')))
Add unit tests for udiskie.match
Add unit tests for udiskie.match I hope this proves my modifications to be working.
Python
mit
coldfix/udiskie,coldfix/udiskie,khardix/udiskie,mathstuf/udiskie,pstray/udiskie,pstray/udiskie
Add unit tests for udiskie.match I hope this proves my modifications to be working.
# encoding: utf-8 """ Tests for the udiskie.match module. These tests are intended to demonstrate and ensure the correct usage of the config file used by udiskie for custom device options. """ import unittest import tempfile import shutil import os.path import gc from udiskie.match import OptionFilter, FilterMatcher class TestDev(object): def __init__(self, object_path, id_type, id_uuid): self.object_path = object_path self.id_type = id_type self.id_uuid = id_uuid class TestFilterMatcher(unittest.TestCase): """ Tests for the udiskie.match.FilterMatcher class. """ def setUp(self): """Create a temporary config file.""" self.base = tempfile.mkdtemp() self.config_file = os.path.join(self.base, 'filters.conf') with open(self.config_file, 'wt') as f: f.write(''' [mount_options] uuid.ignored-device = __ignore__ uuid.device-with-options = noatime,nouser fstype.vfat = ro,nouser''') self.filter_matcher = FilterMatcher.from_config_file(self.config_file) def tearDown(self): """Remove the config file.""" gc.collect() shutil.rmtree(self.base) def test_ignored(self): """Test the FilterMatcher.is_ignored() method.""" self.assertTrue( self.filter_matcher.is_ignored( TestDev('/ignore', 'vfat', 'ignored-device'))) self.assertFalse( self.filter_matcher.is_ignored( TestDev('/options', 'vfat', 'device-with-options'))) self.assertFalse( self.filter_matcher.is_ignored( TestDev('/nomatch', 'vfat', 'no-matching-id'))) def test_options(self): """Test the FilterMatcher.get_mount_options() method.""" self.assertItemsEqual( ['noatime', 'ro', 'nouser'], self.filter_matcher.get_mount_options( TestDev('/options', 'vfat', 'device-with-options'))) self.assertItemsEqual( ['noatime', 'nouser'], self.filter_matcher.get_mount_options( TestDev('/optonly', 'ext', 'device-with-options'))) self.assertItemsEqual( ['ro', 'nouser'], self.filter_matcher.get_mount_options( TestDev('/fsonly', 'vfat', 'no-matching-id'))) self.assertItemsEqual( [], self.filter_matcher.get_mount_options( TestDev('/nomatch', 'ext', 'no-matching-id')))
<commit_before><commit_msg>Add unit tests for udiskie.match I hope this proves my modifications to be working.<commit_after>
# encoding: utf-8 """ Tests for the udiskie.match module. These tests are intended to demonstrate and ensure the correct usage of the config file used by udiskie for custom device options. """ import unittest import tempfile import shutil import os.path import gc from udiskie.match import OptionFilter, FilterMatcher class TestDev(object): def __init__(self, object_path, id_type, id_uuid): self.object_path = object_path self.id_type = id_type self.id_uuid = id_uuid class TestFilterMatcher(unittest.TestCase): """ Tests for the udiskie.match.FilterMatcher class. """ def setUp(self): """Create a temporary config file.""" self.base = tempfile.mkdtemp() self.config_file = os.path.join(self.base, 'filters.conf') with open(self.config_file, 'wt') as f: f.write(''' [mount_options] uuid.ignored-device = __ignore__ uuid.device-with-options = noatime,nouser fstype.vfat = ro,nouser''') self.filter_matcher = FilterMatcher.from_config_file(self.config_file) def tearDown(self): """Remove the config file.""" gc.collect() shutil.rmtree(self.base) def test_ignored(self): """Test the FilterMatcher.is_ignored() method.""" self.assertTrue( self.filter_matcher.is_ignored( TestDev('/ignore', 'vfat', 'ignored-device'))) self.assertFalse( self.filter_matcher.is_ignored( TestDev('/options', 'vfat', 'device-with-options'))) self.assertFalse( self.filter_matcher.is_ignored( TestDev('/nomatch', 'vfat', 'no-matching-id'))) def test_options(self): """Test the FilterMatcher.get_mount_options() method.""" self.assertItemsEqual( ['noatime', 'ro', 'nouser'], self.filter_matcher.get_mount_options( TestDev('/options', 'vfat', 'device-with-options'))) self.assertItemsEqual( ['noatime', 'nouser'], self.filter_matcher.get_mount_options( TestDev('/optonly', 'ext', 'device-with-options'))) self.assertItemsEqual( ['ro', 'nouser'], self.filter_matcher.get_mount_options( TestDev('/fsonly', 'vfat', 'no-matching-id'))) self.assertItemsEqual( [], self.filter_matcher.get_mount_options( TestDev('/nomatch', 'ext', 'no-matching-id')))
Add unit tests for udiskie.match I hope this proves my modifications to be working.# encoding: utf-8 """ Tests for the udiskie.match module. These tests are intended to demonstrate and ensure the correct usage of the config file used by udiskie for custom device options. """ import unittest import tempfile import shutil import os.path import gc from udiskie.match import OptionFilter, FilterMatcher class TestDev(object): def __init__(self, object_path, id_type, id_uuid): self.object_path = object_path self.id_type = id_type self.id_uuid = id_uuid class TestFilterMatcher(unittest.TestCase): """ Tests for the udiskie.match.FilterMatcher class. """ def setUp(self): """Create a temporary config file.""" self.base = tempfile.mkdtemp() self.config_file = os.path.join(self.base, 'filters.conf') with open(self.config_file, 'wt') as f: f.write(''' [mount_options] uuid.ignored-device = __ignore__ uuid.device-with-options = noatime,nouser fstype.vfat = ro,nouser''') self.filter_matcher = FilterMatcher.from_config_file(self.config_file) def tearDown(self): """Remove the config file.""" gc.collect() shutil.rmtree(self.base) def test_ignored(self): """Test the FilterMatcher.is_ignored() method.""" self.assertTrue( self.filter_matcher.is_ignored( TestDev('/ignore', 'vfat', 'ignored-device'))) self.assertFalse( self.filter_matcher.is_ignored( TestDev('/options', 'vfat', 'device-with-options'))) self.assertFalse( self.filter_matcher.is_ignored( TestDev('/nomatch', 'vfat', 'no-matching-id'))) def test_options(self): """Test the FilterMatcher.get_mount_options() method.""" self.assertItemsEqual( ['noatime', 'ro', 'nouser'], self.filter_matcher.get_mount_options( TestDev('/options', 'vfat', 'device-with-options'))) self.assertItemsEqual( ['noatime', 'nouser'], self.filter_matcher.get_mount_options( TestDev('/optonly', 'ext', 'device-with-options'))) self.assertItemsEqual( ['ro', 'nouser'], self.filter_matcher.get_mount_options( TestDev('/fsonly', 'vfat', 'no-matching-id'))) self.assertItemsEqual( [], self.filter_matcher.get_mount_options( TestDev('/nomatch', 'ext', 'no-matching-id')))
<commit_before><commit_msg>Add unit tests for udiskie.match I hope this proves my modifications to be working.<commit_after># encoding: utf-8 """ Tests for the udiskie.match module. These tests are intended to demonstrate and ensure the correct usage of the config file used by udiskie for custom device options. """ import unittest import tempfile import shutil import os.path import gc from udiskie.match import OptionFilter, FilterMatcher class TestDev(object): def __init__(self, object_path, id_type, id_uuid): self.object_path = object_path self.id_type = id_type self.id_uuid = id_uuid class TestFilterMatcher(unittest.TestCase): """ Tests for the udiskie.match.FilterMatcher class. """ def setUp(self): """Create a temporary config file.""" self.base = tempfile.mkdtemp() self.config_file = os.path.join(self.base, 'filters.conf') with open(self.config_file, 'wt') as f: f.write(''' [mount_options] uuid.ignored-device = __ignore__ uuid.device-with-options = noatime,nouser fstype.vfat = ro,nouser''') self.filter_matcher = FilterMatcher.from_config_file(self.config_file) def tearDown(self): """Remove the config file.""" gc.collect() shutil.rmtree(self.base) def test_ignored(self): """Test the FilterMatcher.is_ignored() method.""" self.assertTrue( self.filter_matcher.is_ignored( TestDev('/ignore', 'vfat', 'ignored-device'))) self.assertFalse( self.filter_matcher.is_ignored( TestDev('/options', 'vfat', 'device-with-options'))) self.assertFalse( self.filter_matcher.is_ignored( TestDev('/nomatch', 'vfat', 'no-matching-id'))) def test_options(self): """Test the FilterMatcher.get_mount_options() method.""" self.assertItemsEqual( ['noatime', 'ro', 'nouser'], self.filter_matcher.get_mount_options( TestDev('/options', 'vfat', 'device-with-options'))) self.assertItemsEqual( ['noatime', 'nouser'], self.filter_matcher.get_mount_options( TestDev('/optonly', 'ext', 'device-with-options'))) self.assertItemsEqual( ['ro', 'nouser'], self.filter_matcher.get_mount_options( TestDev('/fsonly', 'vfat', 'no-matching-id'))) self.assertItemsEqual( [], self.filter_matcher.get_mount_options( TestDev('/nomatch', 'ext', 'no-matching-id')))
c672e12779061a7999ce5802730fd2502a68a8ce
l10n_it_ipa/model/partner.py
l10n_it_ipa/model/partner.py
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2014 KTec S.r.l. # (<http://www.ktec.it>). # # Copyright (C) 2014 Associazione Odoo Italia # (<http://www.odoo-italia.org>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models from openerp import fields class res_partner(models.Model): _inherit = 'res.partner' ipa_code = fields.Char( 'IPA Code' )
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2014 KTec S.r.l. # (<http://www.ktec.it>). # # Copyright (C) 2014 Associazione Odoo Italia # (<http://www.odoo-italia.org>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models from openerp import fields class ResPartner(models.Model): _inherit = 'res.partner' ipa_code = fields.Char(string='IPA Code')
Use PascaleCase notation in class
Use PascaleCase notation in class
Python
agpl-3.0
linkitspa/l10n-italy,hurrinico/l10n-italy,ApuliaSoftware/l10n-italy,linkitspa/l10n-italy,luca-vercelli/l10n-italy,linkitspa/l10n-italy,maxhome1/l10n-italy,abstract-open-solutions/l10n-italy,alessandrocamilli/l10n-italy,scigghia/l10n-italy,odoo-isa/l10n-italy
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2014 KTec S.r.l. # (<http://www.ktec.it>). # # Copyright (C) 2014 Associazione Odoo Italia # (<http://www.odoo-italia.org>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models from openerp import fields class res_partner(models.Model): _inherit = 'res.partner' ipa_code = fields.Char( 'IPA Code' ) Use PascaleCase notation in class
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2014 KTec S.r.l. # (<http://www.ktec.it>). # # Copyright (C) 2014 Associazione Odoo Italia # (<http://www.odoo-italia.org>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models from openerp import fields class ResPartner(models.Model): _inherit = 'res.partner' ipa_code = fields.Char(string='IPA Code')
<commit_before># -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2014 KTec S.r.l. # (<http://www.ktec.it>). # # Copyright (C) 2014 Associazione Odoo Italia # (<http://www.odoo-italia.org>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models from openerp import fields class res_partner(models.Model): _inherit = 'res.partner' ipa_code = fields.Char( 'IPA Code' ) <commit_msg>Use PascaleCase notation in class<commit_after>
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2014 KTec S.r.l. # (<http://www.ktec.it>). # # Copyright (C) 2014 Associazione Odoo Italia # (<http://www.odoo-italia.org>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models from openerp import fields class ResPartner(models.Model): _inherit = 'res.partner' ipa_code = fields.Char(string='IPA Code')
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2014 KTec S.r.l. # (<http://www.ktec.it>). # # Copyright (C) 2014 Associazione Odoo Italia # (<http://www.odoo-italia.org>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models from openerp import fields class res_partner(models.Model): _inherit = 'res.partner' ipa_code = fields.Char( 'IPA Code' ) Use PascaleCase notation in class# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2014 KTec S.r.l. # (<http://www.ktec.it>). # # Copyright (C) 2014 Associazione Odoo Italia # (<http://www.odoo-italia.org>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models from openerp import fields class ResPartner(models.Model): _inherit = 'res.partner' ipa_code = fields.Char(string='IPA Code')
<commit_before># -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2014 KTec S.r.l. # (<http://www.ktec.it>). # # Copyright (C) 2014 Associazione Odoo Italia # (<http://www.odoo-italia.org>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models from openerp import fields class res_partner(models.Model): _inherit = 'res.partner' ipa_code = fields.Char( 'IPA Code' ) <commit_msg>Use PascaleCase notation in class<commit_after># -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2014 KTec S.r.l. # (<http://www.ktec.it>). # # Copyright (C) 2014 Associazione Odoo Italia # (<http://www.odoo-italia.org>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models from openerp import fields class ResPartner(models.Model): _inherit = 'res.partner' ipa_code = fields.Char(string='IPA Code')
be465be9b2224eadb93203f10ad4972d1d6e5dc4
python_apps/pypo/pure.py
python_apps/pypo/pure.py
import re def version_cmp(version1, version2): def normalize(v): return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")] return cmp(normalize(version1), normalize(version2))
Make sure Liquidsoap 1.1.1 is used
CC-5133: Make sure Liquidsoap 1.1.1 is used add missing file
Python
agpl-3.0
radiorabe/airtime,sourcefabric/Airtime,ReganDryke/airtime,radiorabe/airtime,comiconomenclaturist/libretime,radiorabe/airtime,sourcefabric/airtime,LibreTime/libretime,justvanbloom/airtime,comiconomenclaturist/libretime,thnkloud9/Airtime,Lapotor/libretime,justvanbloom/airtime,LibreTime/libretime,justvanbloom/airtime,ReganDryke/airtime,thnkloud9/Airtime,sourcefabric/airtime,ReganDryke/airtime,Lapotor/libretime,radiorabe/airtime,Ryex/airtime,ReganDryke/airtime,LibreTime/libretime,ReganDryke/airtime,justvanbloom/airtime,sourcefabric/Airtime,thnkloud9/Airtime,Lapotor/libretime,comiconomenclaturist/libretime,sourcefabric/Airtime,Lapotor/libretime,ReganDryke/airtime,Lapotor/libretime,Ryex/airtime,sourcefabric/Airtime,Ryex/airtime,radiorabe/airtime,Ryex/airtime,sourcefabric/Airtime,radiorabe/airtime,sourcefabric/airtime,LibreTime/libretime,sourcefabric/airtime,radiorabe/airtime,sourcefabric/airtime,thnkloud9/Airtime,sourcefabric/Airtime,Ryex/airtime,sourcefabric/airtime,justvanbloom/airtime,thnkloud9/Airtime,thnkloud9/Airtime,Lapotor/libretime,comiconomenclaturist/libretime,comiconomenclaturist/libretime,Ryex/airtime,justvanbloom/airtime,LibreTime/libretime,thnkloud9/Airtime,sourcefabric/Airtime,LibreTime/libretime,Ryex/airtime,justvanbloom/airtime,comiconomenclaturist/libretime,ReganDryke/airtime,sourcefabric/airtime,comiconomenclaturist/libretime
CC-5133: Make sure Liquidsoap 1.1.1 is used add missing file
import re def version_cmp(version1, version2): def normalize(v): return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")] return cmp(normalize(version1), normalize(version2))
<commit_before><commit_msg>CC-5133: Make sure Liquidsoap 1.1.1 is used add missing file<commit_after>
import re def version_cmp(version1, version2): def normalize(v): return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")] return cmp(normalize(version1), normalize(version2))
CC-5133: Make sure Liquidsoap 1.1.1 is used add missing fileimport re def version_cmp(version1, version2): def normalize(v): return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")] return cmp(normalize(version1), normalize(version2))
<commit_before><commit_msg>CC-5133: Make sure Liquidsoap 1.1.1 is used add missing file<commit_after>import re def version_cmp(version1, version2): def normalize(v): return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")] return cmp(normalize(version1), normalize(version2))