commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1ffc7f76e814d0395653989706a16d4b4797a44e
|
talks/management/commands/add_talks.py
|
talks/management/commands/add_talks.py
|
from django.core.management.base import BaseCommand
from events.models import Event
from cfp.models import PaperApplication
from talks.models import Talk
class Command(BaseCommand):
help = "Bulk add talks from application ids"
def add_arguments(self, parser):
parser.add_argument('event_id', type=int)
parser.add_argument('application_ids', type=int, nargs='+')
def handle(self, *args, **options):
event = Event.objects.get(pk=options['event_id'])
applications = PaperApplication.objects.filter(pk__in=options['application_ids'])
print("Event: {}".format(event))
print("\nApplications:")
for application in applications:
print("* {}".format(application))
print("\nAdd talks?")
input("Press any key to continue")
for application in applications:
talk, created = Talk.objects.get_or_create(event=event, application=application)
print("created" if created else "exists", talk)
|
Add command for bulk adding talks
|
Add command for bulk adding talks
|
Python
|
bsd-3-clause
|
WebCampZg/conference-web,WebCampZg/conference-web,WebCampZg/conference-web
|
Add command for bulk adding talks
|
from django.core.management.base import BaseCommand
from events.models import Event
from cfp.models import PaperApplication
from talks.models import Talk
class Command(BaseCommand):
help = "Bulk add talks from application ids"
def add_arguments(self, parser):
parser.add_argument('event_id', type=int)
parser.add_argument('application_ids', type=int, nargs='+')
def handle(self, *args, **options):
event = Event.objects.get(pk=options['event_id'])
applications = PaperApplication.objects.filter(pk__in=options['application_ids'])
print("Event: {}".format(event))
print("\nApplications:")
for application in applications:
print("* {}".format(application))
print("\nAdd talks?")
input("Press any key to continue")
for application in applications:
talk, created = Talk.objects.get_or_create(event=event, application=application)
print("created" if created else "exists", talk)
|
<commit_before><commit_msg>Add command for bulk adding talks<commit_after>
|
from django.core.management.base import BaseCommand
from events.models import Event
from cfp.models import PaperApplication
from talks.models import Talk
class Command(BaseCommand):
help = "Bulk add talks from application ids"
def add_arguments(self, parser):
parser.add_argument('event_id', type=int)
parser.add_argument('application_ids', type=int, nargs='+')
def handle(self, *args, **options):
event = Event.objects.get(pk=options['event_id'])
applications = PaperApplication.objects.filter(pk__in=options['application_ids'])
print("Event: {}".format(event))
print("\nApplications:")
for application in applications:
print("* {}".format(application))
print("\nAdd talks?")
input("Press any key to continue")
for application in applications:
talk, created = Talk.objects.get_or_create(event=event, application=application)
print("created" if created else "exists", talk)
|
Add command for bulk adding talksfrom django.core.management.base import BaseCommand
from events.models import Event
from cfp.models import PaperApplication
from talks.models import Talk
class Command(BaseCommand):
help = "Bulk add talks from application ids"
def add_arguments(self, parser):
parser.add_argument('event_id', type=int)
parser.add_argument('application_ids', type=int, nargs='+')
def handle(self, *args, **options):
event = Event.objects.get(pk=options['event_id'])
applications = PaperApplication.objects.filter(pk__in=options['application_ids'])
print("Event: {}".format(event))
print("\nApplications:")
for application in applications:
print("* {}".format(application))
print("\nAdd talks?")
input("Press any key to continue")
for application in applications:
talk, created = Talk.objects.get_or_create(event=event, application=application)
print("created" if created else "exists", talk)
|
<commit_before><commit_msg>Add command for bulk adding talks<commit_after>from django.core.management.base import BaseCommand
from events.models import Event
from cfp.models import PaperApplication
from talks.models import Talk
class Command(BaseCommand):
help = "Bulk add talks from application ids"
def add_arguments(self, parser):
parser.add_argument('event_id', type=int)
parser.add_argument('application_ids', type=int, nargs='+')
def handle(self, *args, **options):
event = Event.objects.get(pk=options['event_id'])
applications = PaperApplication.objects.filter(pk__in=options['application_ids'])
print("Event: {}".format(event))
print("\nApplications:")
for application in applications:
print("* {}".format(application))
print("\nAdd talks?")
input("Press any key to continue")
for application in applications:
talk, created = Talk.objects.get_or_create(event=event, application=application)
print("created" if created else "exists", talk)
|
|
f6d7bff264acd27984aaf2b60f76f6a9bec7ed34
|
RevitPyCVC/Test/externaleventexample.py
|
RevitPyCVC/Test/externaleventexample.py
|
from Autodesk.Revit.UI import IExternalEventHandler, IExternalApplication, Result, ExternalEvent, IExternalCommand
from Autodesk.Revit.DB import Form
class ExternalEventExample(IExternalEventHandler):
def execute(self, app):
TaskDialog.Show("External Event", "Click Close to close")
def GetName(self):
return "External Event Example"
class ExternalEventExampleApp(IExternalApplication):
def __init__(self, app = None, my_form):
self.app = app
self.my_form = my_form
def on_shutdown(self, application):
if(self.my_form is not None and self.my_form.Visible):
self.myForm.Close()
return Result.Succeeded
def on_startup(self, application):
self.my_form = None
return Result.Succeeded
def show_form(self, application):
if self.my_form == None or self.my_form.Disposed:
handler = ExternalEventExample()
ex_event = ExternalEvent.Create(handler)
self.my_form = ExternalEventExampleDialog(ex_event, handler)
self.my_form.Show()
class Command(IExternalCommand):
def execute(self, command_data, message, elements):
try:
ExternalEventExampleApp.show_form(command_data.Application)
return Result.Succeeded
except:
raise
return Result.Failed
class ExternalEventExampleDialog(Form):
def __init__(self, ex_event, handler):
self.ex_event = ex_event
self.handler = handler
def on_form_closed(self, e):
self.ex_event.Dispose()
self.ex_event = None
self.handler = None
super(on_form_closed(e))
def close_button_click(self, sender, e):
self.Close()
def show_messagebutton_click(self, sender, e):
self.ex_event.Raise()
|
Add GUI for 3D Rotation script
|
Add GUI for 3D Rotation script
|
Python
|
mit
|
Nahouhak/pythoncvc.net,Nahouhak/pythoncvc.net
|
Add GUI for 3D Rotation script
|
from Autodesk.Revit.UI import IExternalEventHandler, IExternalApplication, Result, ExternalEvent, IExternalCommand
from Autodesk.Revit.DB import Form
class ExternalEventExample(IExternalEventHandler):
def execute(self, app):
TaskDialog.Show("External Event", "Click Close to close")
def GetName(self):
return "External Event Example"
class ExternalEventExampleApp(IExternalApplication):
def __init__(self, app = None, my_form):
self.app = app
self.my_form = my_form
def on_shutdown(self, application):
if(self.my_form is not None and self.my_form.Visible):
self.myForm.Close()
return Result.Succeeded
def on_startup(self, application):
self.my_form = None
return Result.Succeeded
def show_form(self, application):
if self.my_form == None or self.my_form.Disposed:
handler = ExternalEventExample()
ex_event = ExternalEvent.Create(handler)
self.my_form = ExternalEventExampleDialog(ex_event, handler)
self.my_form.Show()
class Command(IExternalCommand):
def execute(self, command_data, message, elements):
try:
ExternalEventExampleApp.show_form(command_data.Application)
return Result.Succeeded
except:
raise
return Result.Failed
class ExternalEventExampleDialog(Form):
def __init__(self, ex_event, handler):
self.ex_event = ex_event
self.handler = handler
def on_form_closed(self, e):
self.ex_event.Dispose()
self.ex_event = None
self.handler = None
super(on_form_closed(e))
def close_button_click(self, sender, e):
self.Close()
def show_messagebutton_click(self, sender, e):
self.ex_event.Raise()
|
<commit_before><commit_msg>Add GUI for 3D Rotation script<commit_after>
|
from Autodesk.Revit.UI import IExternalEventHandler, IExternalApplication, Result, ExternalEvent, IExternalCommand
from Autodesk.Revit.DB import Form
class ExternalEventExample(IExternalEventHandler):
def execute(self, app):
TaskDialog.Show("External Event", "Click Close to close")
def GetName(self):
return "External Event Example"
class ExternalEventExampleApp(IExternalApplication):
def __init__(self, app = None, my_form):
self.app = app
self.my_form = my_form
def on_shutdown(self, application):
if(self.my_form is not None and self.my_form.Visible):
self.myForm.Close()
return Result.Succeeded
def on_startup(self, application):
self.my_form = None
return Result.Succeeded
def show_form(self, application):
if self.my_form == None or self.my_form.Disposed:
handler = ExternalEventExample()
ex_event = ExternalEvent.Create(handler)
self.my_form = ExternalEventExampleDialog(ex_event, handler)
self.my_form.Show()
class Command(IExternalCommand):
def execute(self, command_data, message, elements):
try:
ExternalEventExampleApp.show_form(command_data.Application)
return Result.Succeeded
except:
raise
return Result.Failed
class ExternalEventExampleDialog(Form):
def __init__(self, ex_event, handler):
self.ex_event = ex_event
self.handler = handler
def on_form_closed(self, e):
self.ex_event.Dispose()
self.ex_event = None
self.handler = None
super(on_form_closed(e))
def close_button_click(self, sender, e):
self.Close()
def show_messagebutton_click(self, sender, e):
self.ex_event.Raise()
|
Add GUI for 3D Rotation scriptfrom Autodesk.Revit.UI import IExternalEventHandler, IExternalApplication, Result, ExternalEvent, IExternalCommand
from Autodesk.Revit.DB import Form
class ExternalEventExample(IExternalEventHandler):
def execute(self, app):
TaskDialog.Show("External Event", "Click Close to close")
def GetName(self):
return "External Event Example"
class ExternalEventExampleApp(IExternalApplication):
def __init__(self, app = None, my_form):
self.app = app
self.my_form = my_form
def on_shutdown(self, application):
if(self.my_form is not None and self.my_form.Visible):
self.myForm.Close()
return Result.Succeeded
def on_startup(self, application):
self.my_form = None
return Result.Succeeded
def show_form(self, application):
if self.my_form == None or self.my_form.Disposed:
handler = ExternalEventExample()
ex_event = ExternalEvent.Create(handler)
self.my_form = ExternalEventExampleDialog(ex_event, handler)
self.my_form.Show()
class Command(IExternalCommand):
def execute(self, command_data, message, elements):
try:
ExternalEventExampleApp.show_form(command_data.Application)
return Result.Succeeded
except:
raise
return Result.Failed
class ExternalEventExampleDialog(Form):
def __init__(self, ex_event, handler):
self.ex_event = ex_event
self.handler = handler
def on_form_closed(self, e):
self.ex_event.Dispose()
self.ex_event = None
self.handler = None
super(on_form_closed(e))
def close_button_click(self, sender, e):
self.Close()
def show_messagebutton_click(self, sender, e):
self.ex_event.Raise()
|
<commit_before><commit_msg>Add GUI for 3D Rotation script<commit_after>from Autodesk.Revit.UI import IExternalEventHandler, IExternalApplication, Result, ExternalEvent, IExternalCommand
from Autodesk.Revit.DB import Form
class ExternalEventExample(IExternalEventHandler):
def execute(self, app):
TaskDialog.Show("External Event", "Click Close to close")
def GetName(self):
return "External Event Example"
class ExternalEventExampleApp(IExternalApplication):
def __init__(self, app = None, my_form):
self.app = app
self.my_form = my_form
def on_shutdown(self, application):
if(self.my_form is not None and self.my_form.Visible):
self.myForm.Close()
return Result.Succeeded
def on_startup(self, application):
self.my_form = None
return Result.Succeeded
def show_form(self, application):
if self.my_form == None or self.my_form.Disposed:
handler = ExternalEventExample()
ex_event = ExternalEvent.Create(handler)
self.my_form = ExternalEventExampleDialog(ex_event, handler)
self.my_form.Show()
class Command(IExternalCommand):
def execute(self, command_data, message, elements):
try:
ExternalEventExampleApp.show_form(command_data.Application)
return Result.Succeeded
except:
raise
return Result.Failed
class ExternalEventExampleDialog(Form):
def __init__(self, ex_event, handler):
self.ex_event = ex_event
self.handler = handler
def on_form_closed(self, e):
self.ex_event.Dispose()
self.ex_event = None
self.handler = None
super(on_form_closed(e))
def close_button_click(self, sender, e):
self.Close()
def show_messagebutton_click(self, sender, e):
self.ex_event.Raise()
|
|
8e66b89ac7a9003533afe34bf691ee17ec37d1f5
|
tests/test_publisher.py
|
tests/test_publisher.py
|
from lektor.publisher import Command
def test_Command_triggers_no_warnings(recwarn):
# This excercises the issue where publishing via rsync resulted
# in ResourceWarnings about unclosed streams.
# This is essentially how RsyncPublisher runs rsync.
with Command(["echo"]) as client:
for _ in client:
pass
# Delete our reference so that the Command instance gets garbage
# collected here. Otherwise, gc will not happen until after the
# test completes and warnings emitted during gc will not be captured
# by the recwarn fixture.
del client
for warning in recwarn.list:
print(warning)
assert len(recwarn) == 0
|
Add test to excercise unclosed file warnings
|
Add test to excercise unclosed file warnings
|
Python
|
bsd-3-clause
|
lektor/lektor,lektor/lektor,lektor/lektor,lektor/lektor
|
Add test to excercise unclosed file warnings
|
from lektor.publisher import Command
def test_Command_triggers_no_warnings(recwarn):
# This excercises the issue where publishing via rsync resulted
# in ResourceWarnings about unclosed streams.
# This is essentially how RsyncPublisher runs rsync.
with Command(["echo"]) as client:
for _ in client:
pass
# Delete our reference so that the Command instance gets garbage
# collected here. Otherwise, gc will not happen until after the
# test completes and warnings emitted during gc will not be captured
# by the recwarn fixture.
del client
for warning in recwarn.list:
print(warning)
assert len(recwarn) == 0
|
<commit_before><commit_msg>Add test to excercise unclosed file warnings<commit_after>
|
from lektor.publisher import Command
def test_Command_triggers_no_warnings(recwarn):
# This excercises the issue where publishing via rsync resulted
# in ResourceWarnings about unclosed streams.
# This is essentially how RsyncPublisher runs rsync.
with Command(["echo"]) as client:
for _ in client:
pass
# Delete our reference so that the Command instance gets garbage
# collected here. Otherwise, gc will not happen until after the
# test completes and warnings emitted during gc will not be captured
# by the recwarn fixture.
del client
for warning in recwarn.list:
print(warning)
assert len(recwarn) == 0
|
Add test to excercise unclosed file warningsfrom lektor.publisher import Command
def test_Command_triggers_no_warnings(recwarn):
# This excercises the issue where publishing via rsync resulted
# in ResourceWarnings about unclosed streams.
# This is essentially how RsyncPublisher runs rsync.
with Command(["echo"]) as client:
for _ in client:
pass
# Delete our reference so that the Command instance gets garbage
# collected here. Otherwise, gc will not happen until after the
# test completes and warnings emitted during gc will not be captured
# by the recwarn fixture.
del client
for warning in recwarn.list:
print(warning)
assert len(recwarn) == 0
|
<commit_before><commit_msg>Add test to excercise unclosed file warnings<commit_after>from lektor.publisher import Command
def test_Command_triggers_no_warnings(recwarn):
# This excercises the issue where publishing via rsync resulted
# in ResourceWarnings about unclosed streams.
# This is essentially how RsyncPublisher runs rsync.
with Command(["echo"]) as client:
for _ in client:
pass
# Delete our reference so that the Command instance gets garbage
# collected here. Otherwise, gc will not happen until after the
# test completes and warnings emitted during gc will not be captured
# by the recwarn fixture.
del client
for warning in recwarn.list:
print(warning)
assert len(recwarn) == 0
|
|
6c61e0a477a03dcd81d90bf828ba6c036c86b355
|
pydocstring/docstring.py
|
pydocstring/docstring.py
|
class Docstring:
"""Class for storing docstring information.
Following headers are used by this class:
* 'summary' - first line of the docstring
* 'extended' - blocks of extended description concerning the functionality of the code
* 'parameters' - parameters of a method
* 'other parameters' - parameters that are not commonly used
* 'returns' - returned value of a method
* 'yields' - generated value of a generator
* 'raises' - errors raised
* 'see also' - other related code
* 'notes' - blocks of other information about the code (e.g. implementation details)
* 'references' - references used
* 'examples' - examples of code usage
* 'attributes' - attributes of a class
* 'methods' - methods of a class/module
Attributes
----------
info : dict
Dictionary of the headers to contents under header.
Methods
-------
__init__(**headers_contents)
Initialize.
parse_google(docstring)
Return instance of Docstring that corresponds to given google docstring.
parse_numpy(docstring)
Return instance of Docstring that corresponds to given numpy docstring.
parse_instance(instance)
Return instance of Docstring that corresponds to provided instance.
make_google()
Return corresponding google docstring
make_numpy()
Return corresponding numpy docstring
Example
-------
For example, this docstring should be equivalent to
info = {'Attributes': {'info': {'type': ['dict'],
'docs':['Dictionary of the headers to contents under header.']}}}
info = {'Attributes': {'info': <ParamDocstring object>}}
"""
pass
class ParamDocstring:
"""Class for storing docstring information on parameters.
Attributes
----------
name : str
Name of the parameter.
types : list of str
Type of the parameters allowed.
docs : list of str
Documentations for the parameter.
Methods
-------
__init__(name, type=None, docs=None)
Initialize.
parse_google(docstring)
Return instance of Docstring that corresponds to given google docstring.
parse_numpy(docstring)
Return instance of Docstring that corresponds to given numpy docstring.
"""
pass
class MethodDocstring:
"""Class for storing docstring information on methods.
Attributes
----------
name : str
Name of the method.
call_signature : str
Call signature of the method.
docs : list of str
Documentations for the method.
Methods
-------
__init__(name, call_signature, docs=None)
Initialize.
parse_google(docstring)
Return instance of Docstring that corresponds to given google docstring.
parse_numpy(docstring)
Return instance of Docstring that corresponds to given numpy docstring.
"""
pass
|
Add general framework for coding
|
Add general framework for coding
using documentation
|
Python
|
mit
|
kimt33/pydocstring
|
Add general framework for coding
using documentation
|
class Docstring:
"""Class for storing docstring information.
Following headers are used by this class:
* 'summary' - first line of the docstring
* 'extended' - blocks of extended description concerning the functionality of the code
* 'parameters' - parameters of a method
* 'other parameters' - parameters that are not commonly used
* 'returns' - returned value of a method
* 'yields' - generated value of a generator
* 'raises' - errors raised
* 'see also' - other related code
* 'notes' - blocks of other information about the code (e.g. implementation details)
* 'references' - references used
* 'examples' - examples of code usage
* 'attributes' - attributes of a class
* 'methods' - methods of a class/module
Attributes
----------
info : dict
Dictionary of the headers to contents under header.
Methods
-------
__init__(**headers_contents)
Initialize.
parse_google(docstring)
Return instance of Docstring that corresponds to given google docstring.
parse_numpy(docstring)
Return instance of Docstring that corresponds to given numpy docstring.
parse_instance(instance)
Return instance of Docstring that corresponds to provided instance.
make_google()
Return corresponding google docstring
make_numpy()
Return corresponding numpy docstring
Example
-------
For example, this docstring should be equivalent to
info = {'Attributes': {'info': {'type': ['dict'],
'docs':['Dictionary of the headers to contents under header.']}}}
info = {'Attributes': {'info': <ParamDocstring object>}}
"""
pass
class ParamDocstring:
"""Class for storing docstring information on parameters.
Attributes
----------
name : str
Name of the parameter.
types : list of str
Type of the parameters allowed.
docs : list of str
Documentations for the parameter.
Methods
-------
__init__(name, type=None, docs=None)
Initialize.
parse_google(docstring)
Return instance of Docstring that corresponds to given google docstring.
parse_numpy(docstring)
Return instance of Docstring that corresponds to given numpy docstring.
"""
pass
class MethodDocstring:
"""Class for storing docstring information on methods.
Attributes
----------
name : str
Name of the method.
call_signature : str
Call signature of the method.
docs : list of str
Documentations for the method.
Methods
-------
__init__(name, call_signature, docs=None)
Initialize.
parse_google(docstring)
Return instance of Docstring that corresponds to given google docstring.
parse_numpy(docstring)
Return instance of Docstring that corresponds to given numpy docstring.
"""
pass
|
<commit_before><commit_msg>Add general framework for coding
using documentation<commit_after>
|
class Docstring:
"""Class for storing docstring information.
Following headers are used by this class:
* 'summary' - first line of the docstring
* 'extended' - blocks of extended description concerning the functionality of the code
* 'parameters' - parameters of a method
* 'other parameters' - parameters that are not commonly used
* 'returns' - returned value of a method
* 'yields' - generated value of a generator
* 'raises' - errors raised
* 'see also' - other related code
* 'notes' - blocks of other information about the code (e.g. implementation details)
* 'references' - references used
* 'examples' - examples of code usage
* 'attributes' - attributes of a class
* 'methods' - methods of a class/module
Attributes
----------
info : dict
Dictionary of the headers to contents under header.
Methods
-------
__init__(**headers_contents)
Initialize.
parse_google(docstring)
Return instance of Docstring that corresponds to given google docstring.
parse_numpy(docstring)
Return instance of Docstring that corresponds to given numpy docstring.
parse_instance(instance)
Return instance of Docstring that corresponds to provided instance.
make_google()
Return corresponding google docstring
make_numpy()
Return corresponding numpy docstring
Example
-------
For example, this docstring should be equivalent to
info = {'Attributes': {'info': {'type': ['dict'],
'docs':['Dictionary of the headers to contents under header.']}}}
info = {'Attributes': {'info': <ParamDocstring object>}}
"""
pass
class ParamDocstring:
"""Class for storing docstring information on parameters.
Attributes
----------
name : str
Name of the parameter.
types : list of str
Type of the parameters allowed.
docs : list of str
Documentations for the parameter.
Methods
-------
__init__(name, type=None, docs=None)
Initialize.
parse_google(docstring)
Return instance of Docstring that corresponds to given google docstring.
parse_numpy(docstring)
Return instance of Docstring that corresponds to given numpy docstring.
"""
pass
class MethodDocstring:
"""Class for storing docstring information on methods.
Attributes
----------
name : str
Name of the method.
call_signature : str
Call signature of the method.
docs : list of str
Documentations for the method.
Methods
-------
__init__(name, call_signature, docs=None)
Initialize.
parse_google(docstring)
Return instance of Docstring that corresponds to given google docstring.
parse_numpy(docstring)
Return instance of Docstring that corresponds to given numpy docstring.
"""
pass
|
Add general framework for coding
using documentationclass Docstring:
"""Class for storing docstring information.
Following headers are used by this class:
* 'summary' - first line of the docstring
* 'extended' - blocks of extended description concerning the functionality of the code
* 'parameters' - parameters of a method
* 'other parameters' - parameters that are not commonly used
* 'returns' - returned value of a method
* 'yields' - generated value of a generator
* 'raises' - errors raised
* 'see also' - other related code
* 'notes' - blocks of other information about the code (e.g. implementation details)
* 'references' - references used
* 'examples' - examples of code usage
* 'attributes' - attributes of a class
* 'methods' - methods of a class/module
Attributes
----------
info : dict
Dictionary of the headers to contents under header.
Methods
-------
__init__(**headers_contents)
Initialize.
parse_google(docstring)
Return instance of Docstring that corresponds to given google docstring.
parse_numpy(docstring)
Return instance of Docstring that corresponds to given numpy docstring.
parse_instance(instance)
Return instance of Docstring that corresponds to provided instance.
make_google()
Return corresponding google docstring
make_numpy()
Return corresponding numpy docstring
Example
-------
For example, this docstring should be equivalent to
info = {'Attributes': {'info': {'type': ['dict'],
'docs':['Dictionary of the headers to contents under header.']}}}
info = {'Attributes': {'info': <ParamDocstring object>}}
"""
pass
class ParamDocstring:
"""Class for storing docstring information on parameters.
Attributes
----------
name : str
Name of the parameter.
types : list of str
Type of the parameters allowed.
docs : list of str
Documentations for the parameter.
Methods
-------
__init__(name, type=None, docs=None)
Initialize.
parse_google(docstring)
Return instance of Docstring that corresponds to given google docstring.
parse_numpy(docstring)
Return instance of Docstring that corresponds to given numpy docstring.
"""
pass
class MethodDocstring:
"""Class for storing docstring information on methods.
Attributes
----------
name : str
Name of the method.
call_signature : str
Call signature of the method.
docs : list of str
Documentations for the method.
Methods
-------
__init__(name, call_signature, docs=None)
Initialize.
parse_google(docstring)
Return instance of Docstring that corresponds to given google docstring.
parse_numpy(docstring)
Return instance of Docstring that corresponds to given numpy docstring.
"""
pass
|
<commit_before><commit_msg>Add general framework for coding
using documentation<commit_after>class Docstring:
"""Class for storing docstring information.
Following headers are used by this class:
* 'summary' - first line of the docstring
* 'extended' - blocks of extended description concerning the functionality of the code
* 'parameters' - parameters of a method
* 'other parameters' - parameters that are not commonly used
* 'returns' - returned value of a method
* 'yields' - generated value of a generator
* 'raises' - errors raised
* 'see also' - other related code
* 'notes' - blocks of other information about the code (e.g. implementation details)
* 'references' - references used
* 'examples' - examples of code usage
* 'attributes' - attributes of a class
* 'methods' - methods of a class/module
Attributes
----------
info : dict
Dictionary of the headers to contents under header.
Methods
-------
__init__(**headers_contents)
Initialize.
parse_google(docstring)
Return instance of Docstring that corresponds to given google docstring.
parse_numpy(docstring)
Return instance of Docstring that corresponds to given numpy docstring.
parse_instance(instance)
Return instance of Docstring that corresponds to provided instance.
make_google()
Return corresponding google docstring
make_numpy()
Return corresponding numpy docstring
Example
-------
For example, this docstring should be equivalent to
info = {'Attributes': {'info': {'type': ['dict'],
'docs':['Dictionary of the headers to contents under header.']}}}
info = {'Attributes': {'info': <ParamDocstring object>}}
"""
pass
class ParamDocstring:
"""Class for storing docstring information on parameters.
Attributes
----------
name : str
Name of the parameter.
types : list of str
Type of the parameters allowed.
docs : list of str
Documentations for the parameter.
Methods
-------
__init__(name, type=None, docs=None)
Initialize.
parse_google(docstring)
Return instance of Docstring that corresponds to given google docstring.
parse_numpy(docstring)
Return instance of Docstring that corresponds to given numpy docstring.
"""
pass
class MethodDocstring:
"""Class for storing docstring information on methods.
Attributes
----------
name : str
Name of the method.
call_signature : str
Call signature of the method.
docs : list of str
Documentations for the method.
Methods
-------
__init__(name, call_signature, docs=None)
Initialize.
parse_google(docstring)
Return instance of Docstring that corresponds to given google docstring.
parse_numpy(docstring)
Return instance of Docstring that corresponds to given numpy docstring.
"""
pass
|
|
444f69848b43d25469b2babd55317e44744e41cb
|
tests/test_highlevel.py
|
tests/test_highlevel.py
|
import collections
import pathlib
import numpy as np
import pytest
import eccodes
SAMPLE_DATA_FOLDER = pathlib.Path(__file__).parent / "sample-data"
TEST_GRIB_DATA = SAMPLE_DATA_FOLDER / "tiggelam_cnmc_sfc.grib2"
TEST_GRIB_DATA2 = SAMPLE_DATA_FOLDER / "era5-levels-members.grib"
def test_filereader():
with eccodes.FileReader(TEST_GRIB_DATA) as reader:
count = len([None for _ in reader])
assert count == 7
def test_read_message():
with eccodes.FileReader(TEST_GRIB_DATA) as reader:
message = next(reader)
assert isinstance(message, eccodes.GRIBMessage)
def test_message_get():
with eccodes.FileReader(TEST_GRIB_DATA) as reader:
message = next(reader)
assert message.get("edition") == 2
assert message.get("nonexistent") is None
assert message.get("nonexistent", 42) == 42
assert message.get("centre", ktype=int) == 250
vals = message.get("values")
assert len(vals) == message.get("numberOfValues")
assert message["Ni"] == 511
with pytest.raises(KeyError):
message["invalid"]
def test_message_set():
with eccodes.FileReader(TEST_GRIB_DATA) as reader:
message = next(reader)
message.set("centre", "ecmf")
vals = np.arange(message.get("numberOfValues"), dtype=np.float32)
message.set_array("values", vals)
assert message.get("centre") == "ecmf"
assert np.all(message.get("values") == vals)
def test_message_iter():
with eccodes.FileReader(TEST_GRIB_DATA2) as reader:
message = next(reader)
keys = list(message)
assert len(keys) == 192
assert keys[-1] == "7777"
assert "centre" in keys
assert "shortName" in keys
keys2 = list(message.keys())
assert keys == keys2
items = collections.OrderedDict(message.items())
assert list(items.keys()) == keys
assert items["shortName"] == "z"
assert items["centre"] == "ecmf"
values = list(message.values())
assert values[keys.index("shortName")] == "z"
assert values[keys.index("centre")] == "ecmf"
assert values[-1] == "7777"
def test_message_copy():
with eccodes.FileReader(TEST_GRIB_DATA2) as reader:
message = next(reader)
message2 = message.copy()
assert list(message.keys()) == list(message2.keys())
|
Add tests for the high-level interface
|
Add tests for the high-level interface
|
Python
|
apache-2.0
|
ecmwf/eccodes-python,ecmwf/eccodes-python
|
Add tests for the high-level interface
|
import collections
import pathlib
import numpy as np
import pytest
import eccodes
SAMPLE_DATA_FOLDER = pathlib.Path(__file__).parent / "sample-data"
TEST_GRIB_DATA = SAMPLE_DATA_FOLDER / "tiggelam_cnmc_sfc.grib2"
TEST_GRIB_DATA2 = SAMPLE_DATA_FOLDER / "era5-levels-members.grib"
def test_filereader():
with eccodes.FileReader(TEST_GRIB_DATA) as reader:
count = len([None for _ in reader])
assert count == 7
def test_read_message():
with eccodes.FileReader(TEST_GRIB_DATA) as reader:
message = next(reader)
assert isinstance(message, eccodes.GRIBMessage)
def test_message_get():
with eccodes.FileReader(TEST_GRIB_DATA) as reader:
message = next(reader)
assert message.get("edition") == 2
assert message.get("nonexistent") is None
assert message.get("nonexistent", 42) == 42
assert message.get("centre", ktype=int) == 250
vals = message.get("values")
assert len(vals) == message.get("numberOfValues")
assert message["Ni"] == 511
with pytest.raises(KeyError):
message["invalid"]
def test_message_set():
with eccodes.FileReader(TEST_GRIB_DATA) as reader:
message = next(reader)
message.set("centre", "ecmf")
vals = np.arange(message.get("numberOfValues"), dtype=np.float32)
message.set_array("values", vals)
assert message.get("centre") == "ecmf"
assert np.all(message.get("values") == vals)
def test_message_iter():
with eccodes.FileReader(TEST_GRIB_DATA2) as reader:
message = next(reader)
keys = list(message)
assert len(keys) == 192
assert keys[-1] == "7777"
assert "centre" in keys
assert "shortName" in keys
keys2 = list(message.keys())
assert keys == keys2
items = collections.OrderedDict(message.items())
assert list(items.keys()) == keys
assert items["shortName"] == "z"
assert items["centre"] == "ecmf"
values = list(message.values())
assert values[keys.index("shortName")] == "z"
assert values[keys.index("centre")] == "ecmf"
assert values[-1] == "7777"
def test_message_copy():
with eccodes.FileReader(TEST_GRIB_DATA2) as reader:
message = next(reader)
message2 = message.copy()
assert list(message.keys()) == list(message2.keys())
|
<commit_before><commit_msg>Add tests for the high-level interface<commit_after>
|
import collections
import pathlib
import numpy as np
import pytest
import eccodes
SAMPLE_DATA_FOLDER = pathlib.Path(__file__).parent / "sample-data"
TEST_GRIB_DATA = SAMPLE_DATA_FOLDER / "tiggelam_cnmc_sfc.grib2"
TEST_GRIB_DATA2 = SAMPLE_DATA_FOLDER / "era5-levels-members.grib"
def test_filereader():
with eccodes.FileReader(TEST_GRIB_DATA) as reader:
count = len([None for _ in reader])
assert count == 7
def test_read_message():
with eccodes.FileReader(TEST_GRIB_DATA) as reader:
message = next(reader)
assert isinstance(message, eccodes.GRIBMessage)
def test_message_get():
with eccodes.FileReader(TEST_GRIB_DATA) as reader:
message = next(reader)
assert message.get("edition") == 2
assert message.get("nonexistent") is None
assert message.get("nonexistent", 42) == 42
assert message.get("centre", ktype=int) == 250
vals = message.get("values")
assert len(vals) == message.get("numberOfValues")
assert message["Ni"] == 511
with pytest.raises(KeyError):
message["invalid"]
def test_message_set():
with eccodes.FileReader(TEST_GRIB_DATA) as reader:
message = next(reader)
message.set("centre", "ecmf")
vals = np.arange(message.get("numberOfValues"), dtype=np.float32)
message.set_array("values", vals)
assert message.get("centre") == "ecmf"
assert np.all(message.get("values") == vals)
def test_message_iter():
with eccodes.FileReader(TEST_GRIB_DATA2) as reader:
message = next(reader)
keys = list(message)
assert len(keys) == 192
assert keys[-1] == "7777"
assert "centre" in keys
assert "shortName" in keys
keys2 = list(message.keys())
assert keys == keys2
items = collections.OrderedDict(message.items())
assert list(items.keys()) == keys
assert items["shortName"] == "z"
assert items["centre"] == "ecmf"
values = list(message.values())
assert values[keys.index("shortName")] == "z"
assert values[keys.index("centre")] == "ecmf"
assert values[-1] == "7777"
def test_message_copy():
with eccodes.FileReader(TEST_GRIB_DATA2) as reader:
message = next(reader)
message2 = message.copy()
assert list(message.keys()) == list(message2.keys())
|
Add tests for the high-level interfaceimport collections
import pathlib
import numpy as np
import pytest
import eccodes
SAMPLE_DATA_FOLDER = pathlib.Path(__file__).parent / "sample-data"
TEST_GRIB_DATA = SAMPLE_DATA_FOLDER / "tiggelam_cnmc_sfc.grib2"
TEST_GRIB_DATA2 = SAMPLE_DATA_FOLDER / "era5-levels-members.grib"
def test_filereader():
with eccodes.FileReader(TEST_GRIB_DATA) as reader:
count = len([None for _ in reader])
assert count == 7
def test_read_message():
with eccodes.FileReader(TEST_GRIB_DATA) as reader:
message = next(reader)
assert isinstance(message, eccodes.GRIBMessage)
def test_message_get():
with eccodes.FileReader(TEST_GRIB_DATA) as reader:
message = next(reader)
assert message.get("edition") == 2
assert message.get("nonexistent") is None
assert message.get("nonexistent", 42) == 42
assert message.get("centre", ktype=int) == 250
vals = message.get("values")
assert len(vals) == message.get("numberOfValues")
assert message["Ni"] == 511
with pytest.raises(KeyError):
message["invalid"]
def test_message_set():
with eccodes.FileReader(TEST_GRIB_DATA) as reader:
message = next(reader)
message.set("centre", "ecmf")
vals = np.arange(message.get("numberOfValues"), dtype=np.float32)
message.set_array("values", vals)
assert message.get("centre") == "ecmf"
assert np.all(message.get("values") == vals)
def test_message_iter():
with eccodes.FileReader(TEST_GRIB_DATA2) as reader:
message = next(reader)
keys = list(message)
assert len(keys) == 192
assert keys[-1] == "7777"
assert "centre" in keys
assert "shortName" in keys
keys2 = list(message.keys())
assert keys == keys2
items = collections.OrderedDict(message.items())
assert list(items.keys()) == keys
assert items["shortName"] == "z"
assert items["centre"] == "ecmf"
values = list(message.values())
assert values[keys.index("shortName")] == "z"
assert values[keys.index("centre")] == "ecmf"
assert values[-1] == "7777"
def test_message_copy():
with eccodes.FileReader(TEST_GRIB_DATA2) as reader:
message = next(reader)
message2 = message.copy()
assert list(message.keys()) == list(message2.keys())
|
<commit_before><commit_msg>Add tests for the high-level interface<commit_after>import collections
import pathlib
import numpy as np
import pytest
import eccodes
SAMPLE_DATA_FOLDER = pathlib.Path(__file__).parent / "sample-data"
TEST_GRIB_DATA = SAMPLE_DATA_FOLDER / "tiggelam_cnmc_sfc.grib2"
TEST_GRIB_DATA2 = SAMPLE_DATA_FOLDER / "era5-levels-members.grib"
def test_filereader():
with eccodes.FileReader(TEST_GRIB_DATA) as reader:
count = len([None for _ in reader])
assert count == 7
def test_read_message():
with eccodes.FileReader(TEST_GRIB_DATA) as reader:
message = next(reader)
assert isinstance(message, eccodes.GRIBMessage)
def test_message_get():
with eccodes.FileReader(TEST_GRIB_DATA) as reader:
message = next(reader)
assert message.get("edition") == 2
assert message.get("nonexistent") is None
assert message.get("nonexistent", 42) == 42
assert message.get("centre", ktype=int) == 250
vals = message.get("values")
assert len(vals) == message.get("numberOfValues")
assert message["Ni"] == 511
with pytest.raises(KeyError):
message["invalid"]
def test_message_set():
with eccodes.FileReader(TEST_GRIB_DATA) as reader:
message = next(reader)
message.set("centre", "ecmf")
vals = np.arange(message.get("numberOfValues"), dtype=np.float32)
message.set_array("values", vals)
assert message.get("centre") == "ecmf"
assert np.all(message.get("values") == vals)
def test_message_iter():
with eccodes.FileReader(TEST_GRIB_DATA2) as reader:
message = next(reader)
keys = list(message)
assert len(keys) == 192
assert keys[-1] == "7777"
assert "centre" in keys
assert "shortName" in keys
keys2 = list(message.keys())
assert keys == keys2
items = collections.OrderedDict(message.items())
assert list(items.keys()) == keys
assert items["shortName"] == "z"
assert items["centre"] == "ecmf"
values = list(message.values())
assert values[keys.index("shortName")] == "z"
assert values[keys.index("centre")] == "ecmf"
assert values[-1] == "7777"
def test_message_copy():
with eccodes.FileReader(TEST_GRIB_DATA2) as reader:
message = next(reader)
message2 = message.copy()
assert list(message.keys()) == list(message2.keys())
|
|
efac0ccf8357c5bee978513722331ee196b9936f
|
bin/resize_regions.py
|
bin/resize_regions.py
|
#!/usr/bin/env python
#
# resize_ranges
# Resizes ranges in a BED file around a center point
#
import sys
import argparse
import csv
def resize_row(row, width):
start, end = int(row[1]), int(row[2])
original_range = end-start
margin = (original_range - width) / 2
start = start + margin
end = end - margin
row[1] = start
row[2] = end
return row
def resize_ranges(input, output, width):
"""
Resizes regions in a bed file to the given width
:param input: An input stream or open file
:param output: An output stream
:param width: the desired width
:return:
"""
reader = csv.reader(input, delimiter='\t')
writer = csv.writer(output, delimiter='\t')
for row in reader:
writer.writerow(resize_row(row, width))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Resize ranges in a BED file')
parser.add_argument('inputfile', type=argparse.FileType('rb'))
parser.add_argument('width', type=int)
parser.add_argument_group()
args = parser.parse_args()
resize_ranges(args.inputfile, sys.stdout, args.width)
|
Add python script to resize regions
|
Add python script to resize regions
|
Python
|
mit
|
Duke-GCB/TrackHubGenerator,Duke-GCB/TrackHubGenerator
|
Add python script to resize regions
|
#!/usr/bin/env python
#
# resize_ranges
# Resizes ranges in a BED file around a center point
#
import sys
import argparse
import csv
def resize_row(row, width):
start, end = int(row[1]), int(row[2])
original_range = end-start
margin = (original_range - width) / 2
start = start + margin
end = end - margin
row[1] = start
row[2] = end
return row
def resize_ranges(input, output, width):
"""
Resizes regions in a bed file to the given width
:param input: An input stream or open file
:param output: An output stream
:param width: the desired width
:return:
"""
reader = csv.reader(input, delimiter='\t')
writer = csv.writer(output, delimiter='\t')
for row in reader:
writer.writerow(resize_row(row, width))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Resize ranges in a BED file')
parser.add_argument('inputfile', type=argparse.FileType('rb'))
parser.add_argument('width', type=int)
parser.add_argument_group()
args = parser.parse_args()
resize_ranges(args.inputfile, sys.stdout, args.width)
|
<commit_before><commit_msg>Add python script to resize regions<commit_after>
|
#!/usr/bin/env python
#
# resize_ranges
# Resizes ranges in a BED file around a center point
#
import sys
import argparse
import csv
def resize_row(row, width):
start, end = int(row[1]), int(row[2])
original_range = end-start
margin = (original_range - width) / 2
start = start + margin
end = end - margin
row[1] = start
row[2] = end
return row
def resize_ranges(input, output, width):
"""
Resizes regions in a bed file to the given width
:param input: An input stream or open file
:param output: An output stream
:param width: the desired width
:return:
"""
reader = csv.reader(input, delimiter='\t')
writer = csv.writer(output, delimiter='\t')
for row in reader:
writer.writerow(resize_row(row, width))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Resize ranges in a BED file')
parser.add_argument('inputfile', type=argparse.FileType('rb'))
parser.add_argument('width', type=int)
parser.add_argument_group()
args = parser.parse_args()
resize_ranges(args.inputfile, sys.stdout, args.width)
|
Add python script to resize regions#!/usr/bin/env python
#
# resize_ranges
# Resizes ranges in a BED file around a center point
#
import sys
import argparse
import csv
def resize_row(row, width):
start, end = int(row[1]), int(row[2])
original_range = end-start
margin = (original_range - width) / 2
start = start + margin
end = end - margin
row[1] = start
row[2] = end
return row
def resize_ranges(input, output, width):
"""
Resizes regions in a bed file to the given width
:param input: An input stream or open file
:param output: An output stream
:param width: the desired width
:return:
"""
reader = csv.reader(input, delimiter='\t')
writer = csv.writer(output, delimiter='\t')
for row in reader:
writer.writerow(resize_row(row, width))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Resize ranges in a BED file')
parser.add_argument('inputfile', type=argparse.FileType('rb'))
parser.add_argument('width', type=int)
parser.add_argument_group()
args = parser.parse_args()
resize_ranges(args.inputfile, sys.stdout, args.width)
|
<commit_before><commit_msg>Add python script to resize regions<commit_after>#!/usr/bin/env python
#
# resize_ranges
# Resizes ranges in a BED file around a center point
#
import sys
import argparse
import csv
def resize_row(row, width):
start, end = int(row[1]), int(row[2])
original_range = end-start
margin = (original_range - width) / 2
start = start + margin
end = end - margin
row[1] = start
row[2] = end
return row
def resize_ranges(input, output, width):
"""
Resizes regions in a bed file to the given width
:param input: An input stream or open file
:param output: An output stream
:param width: the desired width
:return:
"""
reader = csv.reader(input, delimiter='\t')
writer = csv.writer(output, delimiter='\t')
for row in reader:
writer.writerow(resize_row(row, width))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Resize ranges in a BED file')
parser.add_argument('inputfile', type=argparse.FileType('rb'))
parser.add_argument('width', type=int)
parser.add_argument_group()
args = parser.parse_args()
resize_ranges(args.inputfile, sys.stdout, args.width)
|
|
c96468a38c06a4c439d52754bfd93d86ca9aeace
|
tests/db.py
|
tests/db.py
|
'''Test case for MongoDB database backend
'''
import unittest
from lighty.db import fields, models
class ModelTestCase(unittest.TestCase):
'''Test case
'''
def testClassExtending(self):
'''Test is child class inherit all the field from parent class
'''
class Base(models.Model):
title = fields.CharField()
class Child(Base):
text = fields.TextField()
assert 'title' in Child._fields, (
'Error on model class extending: %s' % Child._fields)
assert isinstance(Child.title, fields.CharField), (
'Error on model class extending: %s' % Child.title)
def test():
suite = unittest.TestSuite()
suite.addTest(ModelTestCase('testClassExtending'))
return suite
|
Add test case for model class inheritance
|
Add test case for model class inheritance
|
Python
|
bsd-3-clause
|
GrAndSE/lighty
|
Add test case for model class inheritance
|
'''Test case for MongoDB database backend
'''
import unittest
from lighty.db import fields, models
class ModelTestCase(unittest.TestCase):
'''Test case
'''
def testClassExtending(self):
'''Test is child class inherit all the field from parent class
'''
class Base(models.Model):
title = fields.CharField()
class Child(Base):
text = fields.TextField()
assert 'title' in Child._fields, (
'Error on model class extending: %s' % Child._fields)
assert isinstance(Child.title, fields.CharField), (
'Error on model class extending: %s' % Child.title)
def test():
suite = unittest.TestSuite()
suite.addTest(ModelTestCase('testClassExtending'))
return suite
|
<commit_before><commit_msg>Add test case for model class inheritance<commit_after>
|
'''Test case for MongoDB database backend
'''
import unittest
from lighty.db import fields, models
class ModelTestCase(unittest.TestCase):
'''Test case
'''
def testClassExtending(self):
'''Test is child class inherit all the field from parent class
'''
class Base(models.Model):
title = fields.CharField()
class Child(Base):
text = fields.TextField()
assert 'title' in Child._fields, (
'Error on model class extending: %s' % Child._fields)
assert isinstance(Child.title, fields.CharField), (
'Error on model class extending: %s' % Child.title)
def test():
suite = unittest.TestSuite()
suite.addTest(ModelTestCase('testClassExtending'))
return suite
|
Add test case for model class inheritance'''Test case for MongoDB database backend
'''
import unittest
from lighty.db import fields, models
class ModelTestCase(unittest.TestCase):
'''Test case
'''
def testClassExtending(self):
'''Test is child class inherit all the field from parent class
'''
class Base(models.Model):
title = fields.CharField()
class Child(Base):
text = fields.TextField()
assert 'title' in Child._fields, (
'Error on model class extending: %s' % Child._fields)
assert isinstance(Child.title, fields.CharField), (
'Error on model class extending: %s' % Child.title)
def test():
suite = unittest.TestSuite()
suite.addTest(ModelTestCase('testClassExtending'))
return suite
|
<commit_before><commit_msg>Add test case for model class inheritance<commit_after>'''Test case for MongoDB database backend
'''
import unittest
from lighty.db import fields, models
class ModelTestCase(unittest.TestCase):
'''Test case
'''
def testClassExtending(self):
'''Test is child class inherit all the field from parent class
'''
class Base(models.Model):
title = fields.CharField()
class Child(Base):
text = fields.TextField()
assert 'title' in Child._fields, (
'Error on model class extending: %s' % Child._fields)
assert isinstance(Child.title, fields.CharField), (
'Error on model class extending: %s' % Child.title)
def test():
suite = unittest.TestSuite()
suite.addTest(ModelTestCase('testClassExtending'))
return suite
|
|
88473f3486a98a869928f0945998337084bfe3f3
|
Lib/test/outstanding_bugs.py
|
Lib/test/outstanding_bugs.py
|
#
# This file is for everybody to add tests for bugs that aren't
# fixed yet. Please add a test case and appropriate bug description.
#
# When you fix one of the bugs, please move the test to the correct
# test_ module.
#
import unittest
from test import test_support
class TestBug1385040(unittest.TestCase):
def testSyntaxError(self):
import compiler
# The following snippet gives a SyntaxError in the interpreter
#
# If you compile and exec it, the call foo(7) returns (7, 1)
self.assertRaises(SyntaxError, compiler.compile,
"def foo(a=1, b): return a, b\n\n", "<string>", "exec")
def test_main():
test_support.run_unittest(TestBug1385040)
|
Add a test file (which isn't run by regrtest) for bugs which aren't fixed yet.
|
Add a test file (which isn't run by regrtest) for bugs which
aren't fixed yet.
Includes a first test (for compiler).
|
Python
|
mit
|
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
|
Add a test file (which isn't run by regrtest) for bugs which
aren't fixed yet.
Includes a first test (for compiler).
|
#
# This file is for everybody to add tests for bugs that aren't
# fixed yet. Please add a test case and appropriate bug description.
#
# When you fix one of the bugs, please move the test to the correct
# test_ module.
#
import unittest
from test import test_support
class TestBug1385040(unittest.TestCase):
def testSyntaxError(self):
import compiler
# The following snippet gives a SyntaxError in the interpreter
#
# If you compile and exec it, the call foo(7) returns (7, 1)
self.assertRaises(SyntaxError, compiler.compile,
"def foo(a=1, b): return a, b\n\n", "<string>", "exec")
def test_main():
test_support.run_unittest(TestBug1385040)
|
<commit_before><commit_msg>Add a test file (which isn't run by regrtest) for bugs which
aren't fixed yet.
Includes a first test (for compiler).<commit_after>
|
#
# This file is for everybody to add tests for bugs that aren't
# fixed yet. Please add a test case and appropriate bug description.
#
# When you fix one of the bugs, please move the test to the correct
# test_ module.
#
import unittest
from test import test_support
class TestBug1385040(unittest.TestCase):
def testSyntaxError(self):
import compiler
# The following snippet gives a SyntaxError in the interpreter
#
# If you compile and exec it, the call foo(7) returns (7, 1)
self.assertRaises(SyntaxError, compiler.compile,
"def foo(a=1, b): return a, b\n\n", "<string>", "exec")
def test_main():
test_support.run_unittest(TestBug1385040)
|
Add a test file (which isn't run by regrtest) for bugs which
aren't fixed yet.
Includes a first test (for compiler).#
# This file is for everybody to add tests for bugs that aren't
# fixed yet. Please add a test case and appropriate bug description.
#
# When you fix one of the bugs, please move the test to the correct
# test_ module.
#
import unittest
from test import test_support
class TestBug1385040(unittest.TestCase):
def testSyntaxError(self):
import compiler
# The following snippet gives a SyntaxError in the interpreter
#
# If you compile and exec it, the call foo(7) returns (7, 1)
self.assertRaises(SyntaxError, compiler.compile,
"def foo(a=1, b): return a, b\n\n", "<string>", "exec")
def test_main():
test_support.run_unittest(TestBug1385040)
|
<commit_before><commit_msg>Add a test file (which isn't run by regrtest) for bugs which
aren't fixed yet.
Includes a first test (for compiler).<commit_after>#
# This file is for everybody to add tests for bugs that aren't
# fixed yet. Please add a test case and appropriate bug description.
#
# When you fix one of the bugs, please move the test to the correct
# test_ module.
#
import unittest
from test import test_support
class TestBug1385040(unittest.TestCase):
def testSyntaxError(self):
import compiler
# The following snippet gives a SyntaxError in the interpreter
#
# If you compile and exec it, the call foo(7) returns (7, 1)
self.assertRaises(SyntaxError, compiler.compile,
"def foo(a=1, b): return a, b\n\n", "<string>", "exec")
def test_main():
test_support.run_unittest(TestBug1385040)
|
|
431f14a19549d9d22fe35e87403695d68eb7f906
|
distarray/__init__.py
|
distarray/__init__.py
|
# encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
from distarray.__version__ import __version__
from distarray.run_tests import test
DISTARRAY_BASE_NAME = '__distarray__'
|
# encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
"""
DistArray: Distributed NumPy-like arrays
========================================
Documentation is available in docstrings and online at
http://distarray.readthedocs.org.
Check out the ``examples`` directory in the source distribution for several
example modules and IPython notebooks using DistArray.
"""
from distarray.__version__ import __version__
from distarray.run_tests import test
DISTARRAY_BASE_NAME = '__distarray__'
|
Add a package-level DistArray docstring.
|
Add a package-level DistArray docstring.
|
Python
|
bsd-3-clause
|
enthought/distarray,RaoUmer/distarray,RaoUmer/distarray,enthought/distarray
|
# encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
from distarray.__version__ import __version__
from distarray.run_tests import test
DISTARRAY_BASE_NAME = '__distarray__'
Add a package-level DistArray docstring.
|
# encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
"""
DistArray: Distributed NumPy-like arrays
========================================
Documentation is available in docstrings and online at
http://distarray.readthedocs.org.
Check out the ``examples`` directory in the source distribution for several
example modules and IPython notebooks using DistArray.
"""
from distarray.__version__ import __version__
from distarray.run_tests import test
DISTARRAY_BASE_NAME = '__distarray__'
|
<commit_before># encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
from distarray.__version__ import __version__
from distarray.run_tests import test
DISTARRAY_BASE_NAME = '__distarray__'
<commit_msg>Add a package-level DistArray docstring.<commit_after>
|
# encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
"""
DistArray: Distributed NumPy-like arrays
========================================
Documentation is available in docstrings and online at
http://distarray.readthedocs.org.
Check out the ``examples`` directory in the source distribution for several
example modules and IPython notebooks using DistArray.
"""
from distarray.__version__ import __version__
from distarray.run_tests import test
DISTARRAY_BASE_NAME = '__distarray__'
|
# encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
from distarray.__version__ import __version__
from distarray.run_tests import test
DISTARRAY_BASE_NAME = '__distarray__'
Add a package-level DistArray docstring.# encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
"""
DistArray: Distributed NumPy-like arrays
========================================
Documentation is available in docstrings and online at
http://distarray.readthedocs.org.
Check out the ``examples`` directory in the source distribution for several
example modules and IPython notebooks using DistArray.
"""
from distarray.__version__ import __version__
from distarray.run_tests import test
DISTARRAY_BASE_NAME = '__distarray__'
|
<commit_before># encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
from distarray.__version__ import __version__
from distarray.run_tests import test
DISTARRAY_BASE_NAME = '__distarray__'
<commit_msg>Add a package-level DistArray docstring.<commit_after># encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
"""
DistArray: Distributed NumPy-like arrays
========================================
Documentation is available in docstrings and online at
http://distarray.readthedocs.org.
Check out the ``examples`` directory in the source distribution for several
example modules and IPython notebooks using DistArray.
"""
from distarray.__version__ import __version__
from distarray.run_tests import test
DISTARRAY_BASE_NAME = '__distarray__'
|
7aff8534b2b579efbc403882ade471f6ce4f3157
|
dropout_rnn.py
|
dropout_rnn.py
|
import tensorflow as tf
import numpy as np
#First Option
keep_prob = 0.5
cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
cell_drop = tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=keep_prob)
multi_layer_cell = tf.contrib.rnn.MultiRNNCell([cell_drop] * n_layers)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
# Second option
import sys
is_training = (sys.argv[-1] == 'train')
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])
cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
if is_training:
cell = tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=keep_prob)
multi_layer_cell = tf.contrib.rnn.MultiRNNCell([cell] * n_layers)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
# [....] rest of graph
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
if is_training:
init.run()
for k in range(n_iterations):
# [.... train model]
save_path = saver.save(sess, '/tmp/my_model.ckpt')
else:
saver.restore(sess, '/tmp/my_model.ckpt')
# [...] use model
|
Add rudimentary code for dropout RNN
|
Add rudimentary code for dropout RNN
Non functioning code for illustration
|
Python
|
mit
|
KT12/hands_on_machine_learning
|
Add rudimentary code for dropout RNN
Non functioning code for illustration
|
import tensorflow as tf
import numpy as np
#First Option
keep_prob = 0.5
cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
cell_drop = tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=keep_prob)
multi_layer_cell = tf.contrib.rnn.MultiRNNCell([cell_drop] * n_layers)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
# Second option
import sys
is_training = (sys.argv[-1] == 'train')
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])
cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
if is_training:
cell = tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=keep_prob)
multi_layer_cell = tf.contrib.rnn.MultiRNNCell([cell] * n_layers)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
# [....] rest of graph
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
if is_training:
init.run()
for k in range(n_iterations):
# [.... train model]
save_path = saver.save(sess, '/tmp/my_model.ckpt')
else:
saver.restore(sess, '/tmp/my_model.ckpt')
# [...] use model
|
<commit_before><commit_msg>Add rudimentary code for dropout RNN
Non functioning code for illustration<commit_after>
|
import tensorflow as tf
import numpy as np
#First Option
keep_prob = 0.5
cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
cell_drop = tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=keep_prob)
multi_layer_cell = tf.contrib.rnn.MultiRNNCell([cell_drop] * n_layers)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
# Second option
import sys
is_training = (sys.argv[-1] == 'train')
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])
cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
if is_training:
cell = tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=keep_prob)
multi_layer_cell = tf.contrib.rnn.MultiRNNCell([cell] * n_layers)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
# [....] rest of graph
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
if is_training:
init.run()
for k in range(n_iterations):
# [.... train model]
save_path = saver.save(sess, '/tmp/my_model.ckpt')
else:
saver.restore(sess, '/tmp/my_model.ckpt')
# [...] use model
|
Add rudimentary code for dropout RNN
Non functioning code for illustrationimport tensorflow as tf
import numpy as np
#First Option
keep_prob = 0.5
cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
cell_drop = tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=keep_prob)
multi_layer_cell = tf.contrib.rnn.MultiRNNCell([cell_drop] * n_layers)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
# Second option
import sys
is_training = (sys.argv[-1] == 'train')
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])
cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
if is_training:
cell = tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=keep_prob)
multi_layer_cell = tf.contrib.rnn.MultiRNNCell([cell] * n_layers)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
# [....] rest of graph
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
if is_training:
init.run()
for k in range(n_iterations):
# [.... train model]
save_path = saver.save(sess, '/tmp/my_model.ckpt')
else:
saver.restore(sess, '/tmp/my_model.ckpt')
# [...] use model
|
<commit_before><commit_msg>Add rudimentary code for dropout RNN
Non functioning code for illustration<commit_after>import tensorflow as tf
import numpy as np
#First Option
keep_prob = 0.5
cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
cell_drop = tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=keep_prob)
multi_layer_cell = tf.contrib.rnn.MultiRNNCell([cell_drop] * n_layers)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
# Second option
import sys
is_training = (sys.argv[-1] == 'train')
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])
cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
if is_training:
cell = tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=keep_prob)
multi_layer_cell = tf.contrib.rnn.MultiRNNCell([cell] * n_layers)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
# [....] rest of graph
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
if is_training:
init.run()
for k in range(n_iterations):
# [.... train model]
save_path = saver.save(sess, '/tmp/my_model.ckpt')
else:
saver.restore(sess, '/tmp/my_model.ckpt')
# [...] use model
|
|
bff010be0ee1a8e512486777c47228449a766cd3
|
webhooks/admin.py
|
webhooks/admin.py
|
from django.contrib import admin
from .models import Webhook
admin.site.register(Webhook)
|
from django.contrib import admin
from .models import Webhook
class WebhookAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'event', 'url')
list_editable = ('event', 'url')
list_filter = ('event',)
admin.site.register(Webhook, WebhookAdmin)
|
Add custom ModelAdmin for easier editing from list view
|
Add custom ModelAdmin for easier editing from list view
|
Python
|
bsd-2-clause
|
chop-dbhi/django-webhooks,pombredanne/django-webhooks,chop-dbhi/django-webhooks,pombredanne/django-webhooks
|
from django.contrib import admin
from .models import Webhook
admin.site.register(Webhook)
Add custom ModelAdmin for easier editing from list view
|
from django.contrib import admin
from .models import Webhook
class WebhookAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'event', 'url')
list_editable = ('event', 'url')
list_filter = ('event',)
admin.site.register(Webhook, WebhookAdmin)
|
<commit_before>from django.contrib import admin
from .models import Webhook
admin.site.register(Webhook)
<commit_msg>Add custom ModelAdmin for easier editing from list view<commit_after>
|
from django.contrib import admin
from .models import Webhook
class WebhookAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'event', 'url')
list_editable = ('event', 'url')
list_filter = ('event',)
admin.site.register(Webhook, WebhookAdmin)
|
from django.contrib import admin
from .models import Webhook
admin.site.register(Webhook)
Add custom ModelAdmin for easier editing from list viewfrom django.contrib import admin
from .models import Webhook
class WebhookAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'event', 'url')
list_editable = ('event', 'url')
list_filter = ('event',)
admin.site.register(Webhook, WebhookAdmin)
|
<commit_before>from django.contrib import admin
from .models import Webhook
admin.site.register(Webhook)
<commit_msg>Add custom ModelAdmin for easier editing from list view<commit_after>from django.contrib import admin
from .models import Webhook
class WebhookAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'event', 'url')
list_editable = ('event', 'url')
list_filter = ('event',)
admin.site.register(Webhook, WebhookAdmin)
|
147218ee982598703ae59880892beb076552ea4c
|
fetch-binary-build.py
|
fetch-binary-build.py
|
import urllib2
import os
pieces = """
Security.hdrs.tar.gz
Security.root.tar.gz
SecurityTokend.hdrs.tar.gz
SecurityTokend.root.tar.gz
libsecurity_cdsa_client.hdrs.tar.gz
libsecurity_cdsa_client.root.tar.gz
libsecurity_cdsa_utilities.hdrs.tar.gz
libsecurity_cdsa_utilities.root.tar.gz
libsecurity_utilities.root.tar.gz
libsecurity_utilities.hdrs.tar.gz
"""
pieces = pieces.rstrip().lstrip().split("\n")
snowleopard = ["10A432", "10B504", "10C540", "10D573"]
snowleopard.sort()
snowleopard.reverse()
leopard = ["9A581", "9B18", "9C31", "9D34", "9E17", "9F33", "9G55", "9J61", "9L30"]
leopard.sort()
leopard.reverse()
ROOT = "http://src.macosforge.org/Roots/"
try:
os.mkdir("build-snowleopard")
except OSError:
pass
os.chdir("build-snowleopard")
for release in snowleopard:
for piece in pieces:
url = "%s%s/%s" % (ROOT, release, piece)
filename = piece
if not os.path.exists(filename):
try:
print "Trying %s" % (url)
req = urllib2.urlopen(url)
print "Fetching %s" % (url)
f = open(filename, "w")
f.write(req.read())
f.close()
except urllib2.HTTPError as problem:
if problem.getcode() != 404:
raise
|
Add script to fetch the binary pieces from Apple.
|
Add script to fetch the binary pieces from Apple.
|
Python
|
lgpl-2.1
|
dirkx/OpenSC.tokend,dirkx/OpenSC.tokend,dirkx/OpenSC.tokend,dirkx/OpenSC.tokend,dirkx/OpenSC.tokend,dirkx/OpenSC.tokend
|
Add script to fetch the binary pieces from Apple.
|
import urllib2
import os
pieces = """
Security.hdrs.tar.gz
Security.root.tar.gz
SecurityTokend.hdrs.tar.gz
SecurityTokend.root.tar.gz
libsecurity_cdsa_client.hdrs.tar.gz
libsecurity_cdsa_client.root.tar.gz
libsecurity_cdsa_utilities.hdrs.tar.gz
libsecurity_cdsa_utilities.root.tar.gz
libsecurity_utilities.root.tar.gz
libsecurity_utilities.hdrs.tar.gz
"""
pieces = pieces.rstrip().lstrip().split("\n")
snowleopard = ["10A432", "10B504", "10C540", "10D573"]
snowleopard.sort()
snowleopard.reverse()
leopard = ["9A581", "9B18", "9C31", "9D34", "9E17", "9F33", "9G55", "9J61", "9L30"]
leopard.sort()
leopard.reverse()
ROOT = "http://src.macosforge.org/Roots/"
try:
os.mkdir("build-snowleopard")
except OSError:
pass
os.chdir("build-snowleopard")
for release in snowleopard:
for piece in pieces:
url = "%s%s/%s" % (ROOT, release, piece)
filename = piece
if not os.path.exists(filename):
try:
print "Trying %s" % (url)
req = urllib2.urlopen(url)
print "Fetching %s" % (url)
f = open(filename, "w")
f.write(req.read())
f.close()
except urllib2.HTTPError as problem:
if problem.getcode() != 404:
raise
|
<commit_before><commit_msg>Add script to fetch the binary pieces from Apple.<commit_after>
|
import urllib2
import os
pieces = """
Security.hdrs.tar.gz
Security.root.tar.gz
SecurityTokend.hdrs.tar.gz
SecurityTokend.root.tar.gz
libsecurity_cdsa_client.hdrs.tar.gz
libsecurity_cdsa_client.root.tar.gz
libsecurity_cdsa_utilities.hdrs.tar.gz
libsecurity_cdsa_utilities.root.tar.gz
libsecurity_utilities.root.tar.gz
libsecurity_utilities.hdrs.tar.gz
"""
pieces = pieces.rstrip().lstrip().split("\n")
snowleopard = ["10A432", "10B504", "10C540", "10D573"]
snowleopard.sort()
snowleopard.reverse()
leopard = ["9A581", "9B18", "9C31", "9D34", "9E17", "9F33", "9G55", "9J61", "9L30"]
leopard.sort()
leopard.reverse()
ROOT = "http://src.macosforge.org/Roots/"
try:
os.mkdir("build-snowleopard")
except OSError:
pass
os.chdir("build-snowleopard")
for release in snowleopard:
for piece in pieces:
url = "%s%s/%s" % (ROOT, release, piece)
filename = piece
if not os.path.exists(filename):
try:
print "Trying %s" % (url)
req = urllib2.urlopen(url)
print "Fetching %s" % (url)
f = open(filename, "w")
f.write(req.read())
f.close()
except urllib2.HTTPError as problem:
if problem.getcode() != 404:
raise
|
Add script to fetch the binary pieces from Apple.import urllib2
import os
pieces = """
Security.hdrs.tar.gz
Security.root.tar.gz
SecurityTokend.hdrs.tar.gz
SecurityTokend.root.tar.gz
libsecurity_cdsa_client.hdrs.tar.gz
libsecurity_cdsa_client.root.tar.gz
libsecurity_cdsa_utilities.hdrs.tar.gz
libsecurity_cdsa_utilities.root.tar.gz
libsecurity_utilities.root.tar.gz
libsecurity_utilities.hdrs.tar.gz
"""
pieces = pieces.rstrip().lstrip().split("\n")
snowleopard = ["10A432", "10B504", "10C540", "10D573"]
snowleopard.sort()
snowleopard.reverse()
leopard = ["9A581", "9B18", "9C31", "9D34", "9E17", "9F33", "9G55", "9J61", "9L30"]
leopard.sort()
leopard.reverse()
ROOT = "http://src.macosforge.org/Roots/"
try:
os.mkdir("build-snowleopard")
except OSError:
pass
os.chdir("build-snowleopard")
for release in snowleopard:
for piece in pieces:
url = "%s%s/%s" % (ROOT, release, piece)
filename = piece
if not os.path.exists(filename):
try:
print "Trying %s" % (url)
req = urllib2.urlopen(url)
print "Fetching %s" % (url)
f = open(filename, "w")
f.write(req.read())
f.close()
except urllib2.HTTPError as problem:
if problem.getcode() != 404:
raise
|
<commit_before><commit_msg>Add script to fetch the binary pieces from Apple.<commit_after>import urllib2
import os
pieces = """
Security.hdrs.tar.gz
Security.root.tar.gz
SecurityTokend.hdrs.tar.gz
SecurityTokend.root.tar.gz
libsecurity_cdsa_client.hdrs.tar.gz
libsecurity_cdsa_client.root.tar.gz
libsecurity_cdsa_utilities.hdrs.tar.gz
libsecurity_cdsa_utilities.root.tar.gz
libsecurity_utilities.root.tar.gz
libsecurity_utilities.hdrs.tar.gz
"""
pieces = pieces.rstrip().lstrip().split("\n")
snowleopard = ["10A432", "10B504", "10C540", "10D573"]
snowleopard.sort()
snowleopard.reverse()
leopard = ["9A581", "9B18", "9C31", "9D34", "9E17", "9F33", "9G55", "9J61", "9L30"]
leopard.sort()
leopard.reverse()
ROOT = "http://src.macosforge.org/Roots/"
try:
os.mkdir("build-snowleopard")
except OSError:
pass
os.chdir("build-snowleopard")
for release in snowleopard:
for piece in pieces:
url = "%s%s/%s" % (ROOT, release, piece)
filename = piece
if not os.path.exists(filename):
try:
print "Trying %s" % (url)
req = urllib2.urlopen(url)
print "Fetching %s" % (url)
f = open(filename, "w")
f.write(req.read())
f.close()
except urllib2.HTTPError as problem:
if problem.getcode() != 404:
raise
|
|
5c37027a2d1e686523db6aa75556c4148d27ca70
|
yithlibraryserver/tests/test_config.py
|
yithlibraryserver/tests/test_config.py
|
import os
import unittest
from yithlibraryserver.config import read_setting_from_env
class ConfigTests(unittest.TestCase):
def test_read_setting_from_env(self):
settings = {
'foo_bar': '1',
}
self.assertEqual('1', read_setting_from_env(settings, 'foo_bar'))
self.assertEqual('default',
read_setting_from_env(settings, 'new_option', 'default'))
self.assertEqual(None,
read_setting_from_env(settings, 'new_option'))
os.environ['FOO_BAR'] = '2'
self.assertEqual('2', read_setting_from_env(settings, 'foo_bar'))
|
Add a test for the read_setting_from_env function
|
Add a test for the read_setting_from_env function
|
Python
|
agpl-3.0
|
lorenzogil/yith-library-server,Yaco-Sistemas/yith-library-server,lorenzogil/yith-library-server,Yaco-Sistemas/yith-library-server,Yaco-Sistemas/yith-library-server,lorenzogil/yith-library-server
|
Add a test for the read_setting_from_env function
|
import os
import unittest
from yithlibraryserver.config import read_setting_from_env
class ConfigTests(unittest.TestCase):
def test_read_setting_from_env(self):
settings = {
'foo_bar': '1',
}
self.assertEqual('1', read_setting_from_env(settings, 'foo_bar'))
self.assertEqual('default',
read_setting_from_env(settings, 'new_option', 'default'))
self.assertEqual(None,
read_setting_from_env(settings, 'new_option'))
os.environ['FOO_BAR'] = '2'
self.assertEqual('2', read_setting_from_env(settings, 'foo_bar'))
|
<commit_before><commit_msg>Add a test for the read_setting_from_env function<commit_after>
|
import os
import unittest
from yithlibraryserver.config import read_setting_from_env
class ConfigTests(unittest.TestCase):
def test_read_setting_from_env(self):
settings = {
'foo_bar': '1',
}
self.assertEqual('1', read_setting_from_env(settings, 'foo_bar'))
self.assertEqual('default',
read_setting_from_env(settings, 'new_option', 'default'))
self.assertEqual(None,
read_setting_from_env(settings, 'new_option'))
os.environ['FOO_BAR'] = '2'
self.assertEqual('2', read_setting_from_env(settings, 'foo_bar'))
|
Add a test for the read_setting_from_env functionimport os
import unittest
from yithlibraryserver.config import read_setting_from_env
class ConfigTests(unittest.TestCase):
def test_read_setting_from_env(self):
settings = {
'foo_bar': '1',
}
self.assertEqual('1', read_setting_from_env(settings, 'foo_bar'))
self.assertEqual('default',
read_setting_from_env(settings, 'new_option', 'default'))
self.assertEqual(None,
read_setting_from_env(settings, 'new_option'))
os.environ['FOO_BAR'] = '2'
self.assertEqual('2', read_setting_from_env(settings, 'foo_bar'))
|
<commit_before><commit_msg>Add a test for the read_setting_from_env function<commit_after>import os
import unittest
from yithlibraryserver.config import read_setting_from_env
class ConfigTests(unittest.TestCase):
def test_read_setting_from_env(self):
settings = {
'foo_bar': '1',
}
self.assertEqual('1', read_setting_from_env(settings, 'foo_bar'))
self.assertEqual('default',
read_setting_from_env(settings, 'new_option', 'default'))
self.assertEqual(None,
read_setting_from_env(settings, 'new_option'))
os.environ['FOO_BAR'] = '2'
self.assertEqual('2', read_setting_from_env(settings, 'foo_bar'))
|
|
5e74e5f527a5ce85f730afa5944e46d6bf843794
|
httpavail.py
|
httpavail.py
|
import sys, requests, argparse
from retrying import retry
argparser = argparse.ArgumentParser()
argparser.add_argument('url')
argparser.add_argument('-t', '--timeout', type=int, default=120)
argparser.add_argument('-d', '--delay', type=int, default=1)
args = argparser.parse_args()
@retry(stop_max_delay = args.timeout * 1000, wait_fixed = args.delay * 1000)
def check():
print('Checking', args.url)
sys.stdout.flush()
response = requests.get(args.url, timeout = 1)
response.raise_for_status()
return response.status_code
if check() == 200:
sys.exit(0)
else:
sys.exit(1)
|
Add http availability checker script
|
Add http availability checker script
|
Python
|
mit
|
ulich/docker-httpavail
|
Add http availability checker script
|
import sys, requests, argparse
from retrying import retry
argparser = argparse.ArgumentParser()
argparser.add_argument('url')
argparser.add_argument('-t', '--timeout', type=int, default=120)
argparser.add_argument('-d', '--delay', type=int, default=1)
args = argparser.parse_args()
@retry(stop_max_delay = args.timeout * 1000, wait_fixed = args.delay * 1000)
def check():
print('Checking', args.url)
sys.stdout.flush()
response = requests.get(args.url, timeout = 1)
response.raise_for_status()
return response.status_code
if check() == 200:
sys.exit(0)
else:
sys.exit(1)
|
<commit_before><commit_msg>Add http availability checker script<commit_after>
|
import sys, requests, argparse
from retrying import retry
argparser = argparse.ArgumentParser()
argparser.add_argument('url')
argparser.add_argument('-t', '--timeout', type=int, default=120)
argparser.add_argument('-d', '--delay', type=int, default=1)
args = argparser.parse_args()
@retry(stop_max_delay = args.timeout * 1000, wait_fixed = args.delay * 1000)
def check():
print('Checking', args.url)
sys.stdout.flush()
response = requests.get(args.url, timeout = 1)
response.raise_for_status()
return response.status_code
if check() == 200:
sys.exit(0)
else:
sys.exit(1)
|
Add http availability checker scriptimport sys, requests, argparse
from retrying import retry
argparser = argparse.ArgumentParser()
argparser.add_argument('url')
argparser.add_argument('-t', '--timeout', type=int, default=120)
argparser.add_argument('-d', '--delay', type=int, default=1)
args = argparser.parse_args()
@retry(stop_max_delay = args.timeout * 1000, wait_fixed = args.delay * 1000)
def check():
print('Checking', args.url)
sys.stdout.flush()
response = requests.get(args.url, timeout = 1)
response.raise_for_status()
return response.status_code
if check() == 200:
sys.exit(0)
else:
sys.exit(1)
|
<commit_before><commit_msg>Add http availability checker script<commit_after>import sys, requests, argparse
from retrying import retry
argparser = argparse.ArgumentParser()
argparser.add_argument('url')
argparser.add_argument('-t', '--timeout', type=int, default=120)
argparser.add_argument('-d', '--delay', type=int, default=1)
args = argparser.parse_args()
@retry(stop_max_delay = args.timeout * 1000, wait_fixed = args.delay * 1000)
def check():
print('Checking', args.url)
sys.stdout.flush()
response = requests.get(args.url, timeout = 1)
response.raise_for_status()
return response.status_code
if check() == 200:
sys.exit(0)
else:
sys.exit(1)
|
|
42e7c630e10d651108166646ae45d7b8682eee18
|
test/Test_sliding_window_tree.py
|
test/Test_sliding_window_tree.py
|
import unittest
import sliding_window_tree
# For now, use a *.remap.sam file (paired end reads aligned to a consensus sequence with indels removed).
SAM_FILENAME = "./data/TestSample-RT_S17.HIV1B-vif.remap.sam"
MAPQ_CUTOFF = 0 # alignment quality cutoff
MAX_PROP_N = 0 # maximum proportion of N bases in MSA-aligned sequence
READ_QUAL_CUTOFF = 20 # Phred quality score cutoff [0,40]
REFERENCE_FASTA = "./data/TestSample-RT_S17.HIV1B-vif.10.conseq"
MIN_WINDOW_BREADTH_COV_FRACTION = 0.5
MIN_WINDOW_DEPTH_COV = 2
class TestSlidingWindowTree(unittest.TestCase):
def test_process_windows(self):
sliding_window_tree.process_windows(sam_filename=SAM_FILENAME,
ref_fasta_filename=REFERENCE_FASTA,
mapping_cutoff=MAPQ_CUTOFF,
read_qual_cutoff=READ_QUAL_CUTOFF,
max_prop_N=MAX_PROP_N,
window_breadth_thresh=MIN_WINDOW_BREADTH_COV_FRACTION,
window_depth_thresh=MIN_WINDOW_DEPTH_COV)
if __name__ == '__main__':
unittest.main()
|
Add unit test for full pipeline to get sliding window dn/ds
|
Add unit test for full pipeline to get sliding window dn/ds
|
Python
|
bsd-2-clause
|
cfe-lab/Umberjack,cfe-lab/Umberjack
|
Add unit test for full pipeline to get sliding window dn/ds
|
import unittest
import sliding_window_tree
# For now, use a *.remap.sam file (paired end reads aligned to a consensus sequence with indels removed).
SAM_FILENAME = "./data/TestSample-RT_S17.HIV1B-vif.remap.sam"
MAPQ_CUTOFF = 0 # alignment quality cutoff
MAX_PROP_N = 0 # maximum proportion of N bases in MSA-aligned sequence
READ_QUAL_CUTOFF = 20 # Phred quality score cutoff [0,40]
REFERENCE_FASTA = "./data/TestSample-RT_S17.HIV1B-vif.10.conseq"
MIN_WINDOW_BREADTH_COV_FRACTION = 0.5
MIN_WINDOW_DEPTH_COV = 2
class TestSlidingWindowTree(unittest.TestCase):
def test_process_windows(self):
sliding_window_tree.process_windows(sam_filename=SAM_FILENAME,
ref_fasta_filename=REFERENCE_FASTA,
mapping_cutoff=MAPQ_CUTOFF,
read_qual_cutoff=READ_QUAL_CUTOFF,
max_prop_N=MAX_PROP_N,
window_breadth_thresh=MIN_WINDOW_BREADTH_COV_FRACTION,
window_depth_thresh=MIN_WINDOW_DEPTH_COV)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for full pipeline to get sliding window dn/ds<commit_after>
|
import unittest
import sliding_window_tree
# For now, use a *.remap.sam file (paired end reads aligned to a consensus sequence with indels removed).
SAM_FILENAME = "./data/TestSample-RT_S17.HIV1B-vif.remap.sam"
MAPQ_CUTOFF = 0 # alignment quality cutoff
MAX_PROP_N = 0 # maximum proportion of N bases in MSA-aligned sequence
READ_QUAL_CUTOFF = 20 # Phred quality score cutoff [0,40]
REFERENCE_FASTA = "./data/TestSample-RT_S17.HIV1B-vif.10.conseq"
MIN_WINDOW_BREADTH_COV_FRACTION = 0.5
MIN_WINDOW_DEPTH_COV = 2
class TestSlidingWindowTree(unittest.TestCase):
def test_process_windows(self):
sliding_window_tree.process_windows(sam_filename=SAM_FILENAME,
ref_fasta_filename=REFERENCE_FASTA,
mapping_cutoff=MAPQ_CUTOFF,
read_qual_cutoff=READ_QUAL_CUTOFF,
max_prop_N=MAX_PROP_N,
window_breadth_thresh=MIN_WINDOW_BREADTH_COV_FRACTION,
window_depth_thresh=MIN_WINDOW_DEPTH_COV)
if __name__ == '__main__':
unittest.main()
|
Add unit test for full pipeline to get sliding window dn/dsimport unittest
import sliding_window_tree
# For now, use a *.remap.sam file (paired end reads aligned to a consensus sequence with indels removed).
SAM_FILENAME = "./data/TestSample-RT_S17.HIV1B-vif.remap.sam"
MAPQ_CUTOFF = 0 # alignment quality cutoff
MAX_PROP_N = 0 # maximum proportion of N bases in MSA-aligned sequence
READ_QUAL_CUTOFF = 20 # Phred quality score cutoff [0,40]
REFERENCE_FASTA = "./data/TestSample-RT_S17.HIV1B-vif.10.conseq"
MIN_WINDOW_BREADTH_COV_FRACTION = 0.5
MIN_WINDOW_DEPTH_COV = 2
class TestSlidingWindowTree(unittest.TestCase):
def test_process_windows(self):
sliding_window_tree.process_windows(sam_filename=SAM_FILENAME,
ref_fasta_filename=REFERENCE_FASTA,
mapping_cutoff=MAPQ_CUTOFF,
read_qual_cutoff=READ_QUAL_CUTOFF,
max_prop_N=MAX_PROP_N,
window_breadth_thresh=MIN_WINDOW_BREADTH_COV_FRACTION,
window_depth_thresh=MIN_WINDOW_DEPTH_COV)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for full pipeline to get sliding window dn/ds<commit_after>import unittest
import sliding_window_tree
# For now, use a *.remap.sam file (paired end reads aligned to a consensus sequence with indels removed).
SAM_FILENAME = "./data/TestSample-RT_S17.HIV1B-vif.remap.sam"
MAPQ_CUTOFF = 0 # alignment quality cutoff
MAX_PROP_N = 0 # maximum proportion of N bases in MSA-aligned sequence
READ_QUAL_CUTOFF = 20 # Phred quality score cutoff [0,40]
REFERENCE_FASTA = "./data/TestSample-RT_S17.HIV1B-vif.10.conseq"
MIN_WINDOW_BREADTH_COV_FRACTION = 0.5
MIN_WINDOW_DEPTH_COV = 2
class TestSlidingWindowTree(unittest.TestCase):
def test_process_windows(self):
sliding_window_tree.process_windows(sam_filename=SAM_FILENAME,
ref_fasta_filename=REFERENCE_FASTA,
mapping_cutoff=MAPQ_CUTOFF,
read_qual_cutoff=READ_QUAL_CUTOFF,
max_prop_N=MAX_PROP_N,
window_breadth_thresh=MIN_WINDOW_BREADTH_COV_FRACTION,
window_depth_thresh=MIN_WINDOW_DEPTH_COV)
if __name__ == '__main__':
unittest.main()
|
|
b348d261468a21ef80fc0e42d0a8ebc25d2c6cea
|
generic_filter/itimer.py
|
generic_filter/itimer.py
|
# IPython log file
import genfilt as c
import genfilt_py as p
import numpy as np
image = np.random.rand(500, 500)
get_ipython().magic('timeit out = c.maximum_filter(image)')
get_ipython().magic('timeit out = p.maximum_filter(image)')
|
Add IPython script to run benchmarks
|
Add IPython script to run benchmarks
The `itimer.py` script uses IPython magic so it must be run using
`ipython -i`, *not* with the vanilla Python interpreter.
|
Python
|
mit
|
jni/performance-tests
|
Add IPython script to run benchmarks
The `itimer.py` script uses IPython magic so it must be run using
`ipython -i`, *not* with the vanilla Python interpreter.
|
# IPython log file
import genfilt as c
import genfilt_py as p
import numpy as np
image = np.random.rand(500, 500)
get_ipython().magic('timeit out = c.maximum_filter(image)')
get_ipython().magic('timeit out = p.maximum_filter(image)')
|
<commit_before><commit_msg>Add IPython script to run benchmarks
The `itimer.py` script uses IPython magic so it must be run using
`ipython -i`, *not* with the vanilla Python interpreter.<commit_after>
|
# IPython log file
import genfilt as c
import genfilt_py as p
import numpy as np
image = np.random.rand(500, 500)
get_ipython().magic('timeit out = c.maximum_filter(image)')
get_ipython().magic('timeit out = p.maximum_filter(image)')
|
Add IPython script to run benchmarks
The `itimer.py` script uses IPython magic so it must be run using
`ipython -i`, *not* with the vanilla Python interpreter.# IPython log file
import genfilt as c
import genfilt_py as p
import numpy as np
image = np.random.rand(500, 500)
get_ipython().magic('timeit out = c.maximum_filter(image)')
get_ipython().magic('timeit out = p.maximum_filter(image)')
|
<commit_before><commit_msg>Add IPython script to run benchmarks
The `itimer.py` script uses IPython magic so it must be run using
`ipython -i`, *not* with the vanilla Python interpreter.<commit_after># IPython log file
import genfilt as c
import genfilt_py as p
import numpy as np
image = np.random.rand(500, 500)
get_ipython().magic('timeit out = c.maximum_filter(image)')
get_ipython().magic('timeit out = p.maximum_filter(image)')
|
|
b66838605bac08fbd889c4efe9fc9f68407c8eb0
|
libgstc/python/gstcerror.py
|
libgstc/python/gstcerror.py
|
# GStreamer Daemon - gst-launch on steroids
# Python client library abstracting gstd interprocess communication
# Copyright (c) 2015-2019 RidgeRun, LLC (http://www.ridgerun.com)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import traceback
"""
GSTC - GstdError Class
"""
class GstdError(Exception):
"""Raised when Gstd IPC fails"""
pass
"""
GSTC - GstcError Class
"""
class GstcError(Exception):
"""Raised when the Gstd python client fails internally"""
pass
|
Add gstc error handler class
|
Add gstc error handler class
|
Python
|
lgpl-2.1
|
RidgeRun/gstd-1.x,RidgeRun/gstd-1.x,RidgeRun/gstd-1.x,RidgeRun/gstd-1.x
|
Add gstc error handler class
|
# GStreamer Daemon - gst-launch on steroids
# Python client library abstracting gstd interprocess communication
# Copyright (c) 2015-2019 RidgeRun, LLC (http://www.ridgerun.com)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import traceback
"""
GSTC - GstdError Class
"""
class GstdError(Exception):
"""Raised when Gstd IPC fails"""
pass
"""
GSTC - GstcError Class
"""
class GstcError(Exception):
"""Raised when the Gstd python client fails internally"""
pass
|
<commit_before><commit_msg>Add gstc error handler class<commit_after>
|
# GStreamer Daemon - gst-launch on steroids
# Python client library abstracting gstd interprocess communication
# Copyright (c) 2015-2019 RidgeRun, LLC (http://www.ridgerun.com)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import traceback
"""
GSTC - GstdError Class
"""
class GstdError(Exception):
"""Raised when Gstd IPC fails"""
pass
"""
GSTC - GstcError Class
"""
class GstcError(Exception):
"""Raised when the Gstd python client fails internally"""
pass
|
Add gstc error handler class# GStreamer Daemon - gst-launch on steroids
# Python client library abstracting gstd interprocess communication
# Copyright (c) 2015-2019 RidgeRun, LLC (http://www.ridgerun.com)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import traceback
"""
GSTC - GstdError Class
"""
class GstdError(Exception):
"""Raised when Gstd IPC fails"""
pass
"""
GSTC - GstcError Class
"""
class GstcError(Exception):
"""Raised when the Gstd python client fails internally"""
pass
|
<commit_before><commit_msg>Add gstc error handler class<commit_after># GStreamer Daemon - gst-launch on steroids
# Python client library abstracting gstd interprocess communication
# Copyright (c) 2015-2019 RidgeRun, LLC (http://www.ridgerun.com)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import traceback
"""
GSTC - GstdError Class
"""
class GstdError(Exception):
"""Raised when Gstd IPC fails"""
pass
"""
GSTC - GstcError Class
"""
class GstcError(Exception):
"""Raised when the Gstd python client fails internally"""
pass
|
|
ae65e73c8ec9564613aaa9acd3868b9fe15f9c63
|
IPython/utils/tempdir.py
|
IPython/utils/tempdir.py
|
"""TemporaryDirectory class, copied from Python 3.2.
This is copied from the stdlib and will be standard in Python 3.2 and onwards.
"""
# This code should only be used in Python versions < 3.2, since after that we
# can rely on the stdlib itself.
try:
from tempfile import TemporaryDirectory
except ImportError:
import os as _os
from tempfile import mkdtemp, template
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everthing contained
in it are removed.
"""
def __init__(self, suffix="", prefix=template, dir=None):
self.name = mkdtemp(suffix, prefix, dir)
self._closed = False
def __enter__(self):
return self.name
def cleanup(self):
if not self._closed:
self._rmtree(self.name)
self._closed = True
def __exit__(self, exc, value, tb):
self.cleanup()
__del__ = cleanup
# XXX (ncoghlan): The following code attempts to make
# this class tolerant of the module nulling out process
# that happens during CPython interpreter shutdown
# Alas, it doesn't actually manage it. See issue #10188
_listdir = staticmethod(_os.listdir)
_path_join = staticmethod(_os.path.join)
_isdir = staticmethod(_os.path.isdir)
_remove = staticmethod(_os.remove)
_rmdir = staticmethod(_os.rmdir)
_os_error = _os.error
def _rmtree(self, path):
# Essentially a stripped down version of shutil.rmtree. We can't
# use globals because they may be None'ed out at shutdown.
for name in self._listdir(path):
fullname = self._path_join(path, name)
try:
isdir = self._isdir(fullname)
except self._os_error:
isdir = False
if isdir:
self._rmtree(fullname)
else:
try:
self._remove(fullname)
except self._os_error:
pass
try:
self._rmdir(path)
except self._os_error:
pass
|
Add context manager for temporary directories from Python 3.2
|
Add context manager for temporary directories from Python 3.2
This is very useful in tests, and after writing my own version I found
out that python 3.2 has now a basically identical implementation to
mine, so I copied that instead. We can remove our copy once we're not
supporting python 2.x anymore.
|
Python
|
bsd-3-clause
|
ipython/ipython,ipython/ipython
|
Add context manager for temporary directories from Python 3.2
This is very useful in tests, and after writing my own version I found
out that python 3.2 has now a basically identical implementation to
mine, so I copied that instead. We can remove our copy once we're not
supporting python 2.x anymore.
|
"""TemporaryDirectory class, copied from Python 3.2.
This is copied from the stdlib and will be standard in Python 3.2 and onwards.
"""
# This code should only be used in Python versions < 3.2, since after that we
# can rely on the stdlib itself.
try:
from tempfile import TemporaryDirectory
except ImportError:
import os as _os
from tempfile import mkdtemp, template
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everthing contained
in it are removed.
"""
def __init__(self, suffix="", prefix=template, dir=None):
self.name = mkdtemp(suffix, prefix, dir)
self._closed = False
def __enter__(self):
return self.name
def cleanup(self):
if not self._closed:
self._rmtree(self.name)
self._closed = True
def __exit__(self, exc, value, tb):
self.cleanup()
__del__ = cleanup
# XXX (ncoghlan): The following code attempts to make
# this class tolerant of the module nulling out process
# that happens during CPython interpreter shutdown
# Alas, it doesn't actually manage it. See issue #10188
_listdir = staticmethod(_os.listdir)
_path_join = staticmethod(_os.path.join)
_isdir = staticmethod(_os.path.isdir)
_remove = staticmethod(_os.remove)
_rmdir = staticmethod(_os.rmdir)
_os_error = _os.error
def _rmtree(self, path):
# Essentially a stripped down version of shutil.rmtree. We can't
# use globals because they may be None'ed out at shutdown.
for name in self._listdir(path):
fullname = self._path_join(path, name)
try:
isdir = self._isdir(fullname)
except self._os_error:
isdir = False
if isdir:
self._rmtree(fullname)
else:
try:
self._remove(fullname)
except self._os_error:
pass
try:
self._rmdir(path)
except self._os_error:
pass
|
<commit_before><commit_msg>Add context manager for temporary directories from Python 3.2
This is very useful in tests, and after writing my own version I found
out that python 3.2 has now a basically identical implementation to
mine, so I copied that instead. We can remove our copy once we're not
supporting python 2.x anymore.<commit_after>
|
"""TemporaryDirectory class, copied from Python 3.2.
This is copied from the stdlib and will be standard in Python 3.2 and onwards.
"""
# This code should only be used in Python versions < 3.2, since after that we
# can rely on the stdlib itself.
try:
from tempfile import TemporaryDirectory
except ImportError:
import os as _os
from tempfile import mkdtemp, template
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everthing contained
in it are removed.
"""
def __init__(self, suffix="", prefix=template, dir=None):
self.name = mkdtemp(suffix, prefix, dir)
self._closed = False
def __enter__(self):
return self.name
def cleanup(self):
if not self._closed:
self._rmtree(self.name)
self._closed = True
def __exit__(self, exc, value, tb):
self.cleanup()
__del__ = cleanup
# XXX (ncoghlan): The following code attempts to make
# this class tolerant of the module nulling out process
# that happens during CPython interpreter shutdown
# Alas, it doesn't actually manage it. See issue #10188
_listdir = staticmethod(_os.listdir)
_path_join = staticmethod(_os.path.join)
_isdir = staticmethod(_os.path.isdir)
_remove = staticmethod(_os.remove)
_rmdir = staticmethod(_os.rmdir)
_os_error = _os.error
def _rmtree(self, path):
# Essentially a stripped down version of shutil.rmtree. We can't
# use globals because they may be None'ed out at shutdown.
for name in self._listdir(path):
fullname = self._path_join(path, name)
try:
isdir = self._isdir(fullname)
except self._os_error:
isdir = False
if isdir:
self._rmtree(fullname)
else:
try:
self._remove(fullname)
except self._os_error:
pass
try:
self._rmdir(path)
except self._os_error:
pass
|
Add context manager for temporary directories from Python 3.2
This is very useful in tests, and after writing my own version I found
out that python 3.2 has now a basically identical implementation to
mine, so I copied that instead. We can remove our copy once we're not
supporting python 2.x anymore."""TemporaryDirectory class, copied from Python 3.2.
This is copied from the stdlib and will be standard in Python 3.2 and onwards.
"""
# This code should only be used in Python versions < 3.2, since after that we
# can rely on the stdlib itself.
try:
from tempfile import TemporaryDirectory
except ImportError:
import os as _os
from tempfile import mkdtemp, template
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everthing contained
in it are removed.
"""
def __init__(self, suffix="", prefix=template, dir=None):
self.name = mkdtemp(suffix, prefix, dir)
self._closed = False
def __enter__(self):
return self.name
def cleanup(self):
if not self._closed:
self._rmtree(self.name)
self._closed = True
def __exit__(self, exc, value, tb):
self.cleanup()
__del__ = cleanup
# XXX (ncoghlan): The following code attempts to make
# this class tolerant of the module nulling out process
# that happens during CPython interpreter shutdown
# Alas, it doesn't actually manage it. See issue #10188
_listdir = staticmethod(_os.listdir)
_path_join = staticmethod(_os.path.join)
_isdir = staticmethod(_os.path.isdir)
_remove = staticmethod(_os.remove)
_rmdir = staticmethod(_os.rmdir)
_os_error = _os.error
def _rmtree(self, path):
# Essentially a stripped down version of shutil.rmtree. We can't
# use globals because they may be None'ed out at shutdown.
for name in self._listdir(path):
fullname = self._path_join(path, name)
try:
isdir = self._isdir(fullname)
except self._os_error:
isdir = False
if isdir:
self._rmtree(fullname)
else:
try:
self._remove(fullname)
except self._os_error:
pass
try:
self._rmdir(path)
except self._os_error:
pass
|
<commit_before><commit_msg>Add context manager for temporary directories from Python 3.2
This is very useful in tests, and after writing my own version I found
out that python 3.2 has now a basically identical implementation to
mine, so I copied that instead. We can remove our copy once we're not
supporting python 2.x anymore.<commit_after>"""TemporaryDirectory class, copied from Python 3.2.
This is copied from the stdlib and will be standard in Python 3.2 and onwards.
"""
# This code should only be used in Python versions < 3.2, since after that we
# can rely on the stdlib itself.
try:
from tempfile import TemporaryDirectory
except ImportError:
import os as _os
from tempfile import mkdtemp, template
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everthing contained
in it are removed.
"""
def __init__(self, suffix="", prefix=template, dir=None):
self.name = mkdtemp(suffix, prefix, dir)
self._closed = False
def __enter__(self):
return self.name
def cleanup(self):
if not self._closed:
self._rmtree(self.name)
self._closed = True
def __exit__(self, exc, value, tb):
self.cleanup()
__del__ = cleanup
# XXX (ncoghlan): The following code attempts to make
# this class tolerant of the module nulling out process
# that happens during CPython interpreter shutdown
# Alas, it doesn't actually manage it. See issue #10188
_listdir = staticmethod(_os.listdir)
_path_join = staticmethod(_os.path.join)
_isdir = staticmethod(_os.path.isdir)
_remove = staticmethod(_os.remove)
_rmdir = staticmethod(_os.rmdir)
_os_error = _os.error
def _rmtree(self, path):
# Essentially a stripped down version of shutil.rmtree. We can't
# use globals because they may be None'ed out at shutdown.
for name in self._listdir(path):
fullname = self._path_join(path, name)
try:
isdir = self._isdir(fullname)
except self._os_error:
isdir = False
if isdir:
self._rmtree(fullname)
else:
try:
self._remove(fullname)
except self._os_error:
pass
try:
self._rmdir(path)
except self._os_error:
pass
|
|
c7868451f5387f8c7d10303b268498633bbd4a2f
|
functional/tests/compute/v2/test_server.py
|
functional/tests/compute/v2/test_server.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from functional.common import test
class ServerTests(test.TestCase):
"""Functional tests for server. """
NAME = uuid.uuid4().hex
HEADERS = ['"Name"']
FIELDS = ['name']
@classmethod
def setUpClass(cls):
opts = cls.get_show_opts(cls.FIELDS)
# TODO(thowe): pull these values from clouds.yaml
flavor = '4'
image = 'cirros-0.3.4-x86_64-uec'
netid = ''
if netid:
nicargs = ' --nic net-id=' + netid
else:
nicargs = ''
raw_output = cls.openstack('server create --flavor ' + flavor +
' --image ' + image + nicargs + ' ' +
cls.NAME + opts)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
raw_output = cls.openstack('server delete ' + cls.NAME)
cls.assertOutput('', raw_output)
def test_server_list(self):
opts = self.get_list_opts(self.HEADERS)
raw_output = self.openstack('server list' + opts)
self.assertIn(self.NAME, raw_output)
def test_server_show(self):
opts = self.get_show_opts(self.FIELDS)
raw_output = self.openstack('server show ' + self.NAME + opts)
self.assertEqual(self.NAME + "\n", raw_output)
|
Add functional tests for server CRUD
|
Add functional tests for server CRUD
Change-Id: I77f292d47a9bea6a5b486ce513c0c19ec8c845dd
|
Python
|
apache-2.0
|
openstack/python-openstackclient,redhat-openstack/python-openstackclient,BjoernT/python-openstackclient,BjoernT/python-openstackclient,openstack/python-openstackclient,dtroyer/python-openstackclient,dtroyer/python-openstackclient,redhat-openstack/python-openstackclient
|
Add functional tests for server CRUD
Change-Id: I77f292d47a9bea6a5b486ce513c0c19ec8c845dd
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from functional.common import test
class ServerTests(test.TestCase):
"""Functional tests for server. """
NAME = uuid.uuid4().hex
HEADERS = ['"Name"']
FIELDS = ['name']
@classmethod
def setUpClass(cls):
opts = cls.get_show_opts(cls.FIELDS)
# TODO(thowe): pull these values from clouds.yaml
flavor = '4'
image = 'cirros-0.3.4-x86_64-uec'
netid = ''
if netid:
nicargs = ' --nic net-id=' + netid
else:
nicargs = ''
raw_output = cls.openstack('server create --flavor ' + flavor +
' --image ' + image + nicargs + ' ' +
cls.NAME + opts)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
raw_output = cls.openstack('server delete ' + cls.NAME)
cls.assertOutput('', raw_output)
def test_server_list(self):
opts = self.get_list_opts(self.HEADERS)
raw_output = self.openstack('server list' + opts)
self.assertIn(self.NAME, raw_output)
def test_server_show(self):
opts = self.get_show_opts(self.FIELDS)
raw_output = self.openstack('server show ' + self.NAME + opts)
self.assertEqual(self.NAME + "\n", raw_output)
|
<commit_before><commit_msg>Add functional tests for server CRUD
Change-Id: I77f292d47a9bea6a5b486ce513c0c19ec8c845dd<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from functional.common import test
class ServerTests(test.TestCase):
"""Functional tests for server. """
NAME = uuid.uuid4().hex
HEADERS = ['"Name"']
FIELDS = ['name']
@classmethod
def setUpClass(cls):
opts = cls.get_show_opts(cls.FIELDS)
# TODO(thowe): pull these values from clouds.yaml
flavor = '4'
image = 'cirros-0.3.4-x86_64-uec'
netid = ''
if netid:
nicargs = ' --nic net-id=' + netid
else:
nicargs = ''
raw_output = cls.openstack('server create --flavor ' + flavor +
' --image ' + image + nicargs + ' ' +
cls.NAME + opts)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
raw_output = cls.openstack('server delete ' + cls.NAME)
cls.assertOutput('', raw_output)
def test_server_list(self):
opts = self.get_list_opts(self.HEADERS)
raw_output = self.openstack('server list' + opts)
self.assertIn(self.NAME, raw_output)
def test_server_show(self):
opts = self.get_show_opts(self.FIELDS)
raw_output = self.openstack('server show ' + self.NAME + opts)
self.assertEqual(self.NAME + "\n", raw_output)
|
Add functional tests for server CRUD
Change-Id: I77f292d47a9bea6a5b486ce513c0c19ec8c845dd# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from functional.common import test
class ServerTests(test.TestCase):
"""Functional tests for server. """
NAME = uuid.uuid4().hex
HEADERS = ['"Name"']
FIELDS = ['name']
@classmethod
def setUpClass(cls):
opts = cls.get_show_opts(cls.FIELDS)
# TODO(thowe): pull these values from clouds.yaml
flavor = '4'
image = 'cirros-0.3.4-x86_64-uec'
netid = ''
if netid:
nicargs = ' --nic net-id=' + netid
else:
nicargs = ''
raw_output = cls.openstack('server create --flavor ' + flavor +
' --image ' + image + nicargs + ' ' +
cls.NAME + opts)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
raw_output = cls.openstack('server delete ' + cls.NAME)
cls.assertOutput('', raw_output)
def test_server_list(self):
opts = self.get_list_opts(self.HEADERS)
raw_output = self.openstack('server list' + opts)
self.assertIn(self.NAME, raw_output)
def test_server_show(self):
opts = self.get_show_opts(self.FIELDS)
raw_output = self.openstack('server show ' + self.NAME + opts)
self.assertEqual(self.NAME + "\n", raw_output)
|
<commit_before><commit_msg>Add functional tests for server CRUD
Change-Id: I77f292d47a9bea6a5b486ce513c0c19ec8c845dd<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from functional.common import test
class ServerTests(test.TestCase):
"""Functional tests for server. """
NAME = uuid.uuid4().hex
HEADERS = ['"Name"']
FIELDS = ['name']
@classmethod
def setUpClass(cls):
opts = cls.get_show_opts(cls.FIELDS)
# TODO(thowe): pull these values from clouds.yaml
flavor = '4'
image = 'cirros-0.3.4-x86_64-uec'
netid = ''
if netid:
nicargs = ' --nic net-id=' + netid
else:
nicargs = ''
raw_output = cls.openstack('server create --flavor ' + flavor +
' --image ' + image + nicargs + ' ' +
cls.NAME + opts)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
raw_output = cls.openstack('server delete ' + cls.NAME)
cls.assertOutput('', raw_output)
def test_server_list(self):
opts = self.get_list_opts(self.HEADERS)
raw_output = self.openstack('server list' + opts)
self.assertIn(self.NAME, raw_output)
def test_server_show(self):
opts = self.get_show_opts(self.FIELDS)
raw_output = self.openstack('server show ' + self.NAME + opts)
self.assertEqual(self.NAME + "\n", raw_output)
|
|
42a81521776c3e0fb1ad740bbf98e9a37e59bc1a
|
scripts/tests/test_remove_wiki_title_forward_slashes.py
|
scripts/tests/test_remove_wiki_title_forward_slashes.py
|
from nose.tools import *
from framework.mongo import database as db
from scripts.remove_wiki_title_forward_slashes import main
from tests.base import OsfTestCase
from tests.factories import NodeWikiFactory, ProjectFactory
class TestRemoveWikiTitleForwardSlashes(OsfTestCase):
def test_forward_slash_is_removed_from_wiki_title(self):
project = ProjectFactory()
wiki = NodeWikiFactory(node=project, is_current=True)
invalid_name = 'invalid/name'
db.nodewikipage.update({'_id': wiki._id}, {'$set': {'page_name': invalid_name}})
project.wiki_pages_current['invalid/name'] = project.wiki_pages_current[wiki.page_name]
project.wiki_pages_versions['invalid/name'] = project.wiki_pages_versions[wiki.page_name]
project.save()
main()
wiki.reload()
assert_equal(wiki.page_name, 'invalidname')
assert_in('invalidname', project.wiki_pages_current)
assert_in('invalidname', project.wiki_pages_versions)
def test_valid_wiki_title(self):
project = ProjectFactory()
wiki = NodeWikiFactory(node=project, is_current=True)
page_name = wiki.page_name
main()
wiki.reload()
assert_equal(page_name, wiki.page_name)
assert_in(page_name, project.wiki_pages_current)
assert_in(page_name, project.wiki_pages_versions)
|
Add tests for removing "/" from wiki titles
|
Add tests for removing "/" from wiki titles
|
Python
|
apache-2.0
|
abought/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,HalcyonChimera/osf.io,hmoco/osf.io,HalcyonChimera/osf.io,doublebits/osf.io,rdhyee/osf.io,monikagrabowska/osf.io,kch8qx/osf.io,Johnetordoff/osf.io,kwierman/osf.io,cwisecarver/osf.io,laurenrevere/osf.io,wearpants/osf.io,acshi/osf.io,emetsger/osf.io,TomHeatwole/osf.io,mattclark/osf.io,RomanZWang/osf.io,cwisecarver/osf.io,acshi/osf.io,cwisecarver/osf.io,zachjanicki/osf.io,pattisdr/osf.io,emetsger/osf.io,leb2dg/osf.io,abought/osf.io,felliott/osf.io,emetsger/osf.io,cslzchen/osf.io,mattclark/osf.io,icereval/osf.io,caneruguz/osf.io,mluke93/osf.io,rdhyee/osf.io,kch8qx/osf.io,TomBaxter/osf.io,sloria/osf.io,zachjanicki/osf.io,crcresearch/osf.io,abought/osf.io,baylee-d/osf.io,mfraezz/osf.io,alexschiller/osf.io,kch8qx/osf.io,cwisecarver/osf.io,mattclark/osf.io,caneruguz/osf.io,monikagrabowska/osf.io,icereval/osf.io,SSJohns/osf.io,RomanZWang/osf.io,wearpants/osf.io,DanielSBrown/osf.io,RomanZWang/osf.io,zachjanicki/osf.io,samchrisinger/osf.io,hmoco/osf.io,RomanZWang/osf.io,jnayak1/osf.io,chrisseto/osf.io,kch8qx/osf.io,brianjgeiger/osf.io,rdhyee/osf.io,amyshi188/osf.io,caseyrollins/osf.io,saradbowman/osf.io,monikagrabowska/osf.io,mluo613/osf.io,DanielSBrown/osf.io,CenterForOpenScience/osf.io,mfraezz/osf.io,sloria/osf.io,erinspace/osf.io,binoculars/osf.io,Nesiehr/osf.io,chennan47/osf.io,mluo613/osf.io,felliott/osf.io,mfraezz/osf.io,doublebits/osf.io,TomHeatwole/osf.io,binoculars/osf.io,HalcyonChimera/osf.io,Johnetordoff/osf.io,sloria/osf.io,cslzchen/osf.io,leb2dg/osf.io,rdhyee/osf.io,Nesiehr/osf.io,cslzchen/osf.io,abought/osf.io,caneruguz/osf.io,Nesiehr/osf.io,Nesiehr/osf.io,doublebits/osf.io,SSJohns/osf.io,kwierman/osf.io,HalcyonChimera/osf.io,baylee-d/osf.io,brianjgeiger/osf.io,TomBaxter/osf.io,hmoco/osf.io,doublebits/osf.io,wearpants/osf.io,aaxelb/osf.io,adlius/osf.io,wearpants/osf.io,zamattiac/osf.io,kwierman/osf.io,amyshi188/osf.io,zamattiac/osf.io,aaxelb/osf.io,aaxelb/osf.io,TomBaxter/osf.io,zachjanicki/osf.io,chrisseto/osf.io,RomanZWang/osf.io,mluo613/osf.io,TomHeatwole/osf.io,adlius/osf.io,SSJohns/osf.io,chrisseto/osf.io,samchrisinger/osf.io,felliott/osf.io,pattisdr/osf.io,zamattiac/osf.io,alexschiller/osf.io,kwierman/osf.io,erinspace/osf.io,caseyrollins/osf.io,cslzchen/osf.io,pattisdr/osf.io,DanielSBrown/osf.io,acshi/osf.io,doublebits/osf.io,icereval/osf.io,adlius/osf.io,acshi/osf.io,CenterForOpenScience/osf.io,jnayak1/osf.io,Johnetordoff/osf.io,adlius/osf.io,crcresearch/osf.io,monikagrabowska/osf.io,mfraezz/osf.io,zamattiac/osf.io,chrisseto/osf.io,acshi/osf.io,baylee-d/osf.io,TomHeatwole/osf.io,samchrisinger/osf.io,samchrisinger/osf.io,saradbowman/osf.io,alexschiller/osf.io,felliott/osf.io,alexschiller/osf.io,emetsger/osf.io,amyshi188/osf.io,jnayak1/osf.io,erinspace/osf.io,mluke93/osf.io,leb2dg/osf.io,brianjgeiger/osf.io,binoculars/osf.io,mluo613/osf.io,chennan47/osf.io,hmoco/osf.io,DanielSBrown/osf.io,CenterForOpenScience/osf.io,caneruguz/osf.io,leb2dg/osf.io,Johnetordoff/osf.io,amyshi188/osf.io,aaxelb/osf.io,crcresearch/osf.io,laurenrevere/osf.io,kch8qx/osf.io,SSJohns/osf.io,laurenrevere/osf.io,monikagrabowska/osf.io,caseyrollins/osf.io,alexschiller/osf.io,chennan47/osf.io,mluke93/osf.io,mluo613/osf.io,mluke93/osf.io,jnayak1/osf.io
|
Add tests for removing "/" from wiki titles
|
from nose.tools import *
from framework.mongo import database as db
from scripts.remove_wiki_title_forward_slashes import main
from tests.base import OsfTestCase
from tests.factories import NodeWikiFactory, ProjectFactory
class TestRemoveWikiTitleForwardSlashes(OsfTestCase):
def test_forward_slash_is_removed_from_wiki_title(self):
project = ProjectFactory()
wiki = NodeWikiFactory(node=project, is_current=True)
invalid_name = 'invalid/name'
db.nodewikipage.update({'_id': wiki._id}, {'$set': {'page_name': invalid_name}})
project.wiki_pages_current['invalid/name'] = project.wiki_pages_current[wiki.page_name]
project.wiki_pages_versions['invalid/name'] = project.wiki_pages_versions[wiki.page_name]
project.save()
main()
wiki.reload()
assert_equal(wiki.page_name, 'invalidname')
assert_in('invalidname', project.wiki_pages_current)
assert_in('invalidname', project.wiki_pages_versions)
def test_valid_wiki_title(self):
project = ProjectFactory()
wiki = NodeWikiFactory(node=project, is_current=True)
page_name = wiki.page_name
main()
wiki.reload()
assert_equal(page_name, wiki.page_name)
assert_in(page_name, project.wiki_pages_current)
assert_in(page_name, project.wiki_pages_versions)
|
<commit_before><commit_msg>Add tests for removing "/" from wiki titles<commit_after>
|
from nose.tools import *
from framework.mongo import database as db
from scripts.remove_wiki_title_forward_slashes import main
from tests.base import OsfTestCase
from tests.factories import NodeWikiFactory, ProjectFactory
class TestRemoveWikiTitleForwardSlashes(OsfTestCase):
def test_forward_slash_is_removed_from_wiki_title(self):
project = ProjectFactory()
wiki = NodeWikiFactory(node=project, is_current=True)
invalid_name = 'invalid/name'
db.nodewikipage.update({'_id': wiki._id}, {'$set': {'page_name': invalid_name}})
project.wiki_pages_current['invalid/name'] = project.wiki_pages_current[wiki.page_name]
project.wiki_pages_versions['invalid/name'] = project.wiki_pages_versions[wiki.page_name]
project.save()
main()
wiki.reload()
assert_equal(wiki.page_name, 'invalidname')
assert_in('invalidname', project.wiki_pages_current)
assert_in('invalidname', project.wiki_pages_versions)
def test_valid_wiki_title(self):
project = ProjectFactory()
wiki = NodeWikiFactory(node=project, is_current=True)
page_name = wiki.page_name
main()
wiki.reload()
assert_equal(page_name, wiki.page_name)
assert_in(page_name, project.wiki_pages_current)
assert_in(page_name, project.wiki_pages_versions)
|
Add tests for removing "/" from wiki titlesfrom nose.tools import *
from framework.mongo import database as db
from scripts.remove_wiki_title_forward_slashes import main
from tests.base import OsfTestCase
from tests.factories import NodeWikiFactory, ProjectFactory
class TestRemoveWikiTitleForwardSlashes(OsfTestCase):
def test_forward_slash_is_removed_from_wiki_title(self):
project = ProjectFactory()
wiki = NodeWikiFactory(node=project, is_current=True)
invalid_name = 'invalid/name'
db.nodewikipage.update({'_id': wiki._id}, {'$set': {'page_name': invalid_name}})
project.wiki_pages_current['invalid/name'] = project.wiki_pages_current[wiki.page_name]
project.wiki_pages_versions['invalid/name'] = project.wiki_pages_versions[wiki.page_name]
project.save()
main()
wiki.reload()
assert_equal(wiki.page_name, 'invalidname')
assert_in('invalidname', project.wiki_pages_current)
assert_in('invalidname', project.wiki_pages_versions)
def test_valid_wiki_title(self):
project = ProjectFactory()
wiki = NodeWikiFactory(node=project, is_current=True)
page_name = wiki.page_name
main()
wiki.reload()
assert_equal(page_name, wiki.page_name)
assert_in(page_name, project.wiki_pages_current)
assert_in(page_name, project.wiki_pages_versions)
|
<commit_before><commit_msg>Add tests for removing "/" from wiki titles<commit_after>from nose.tools import *
from framework.mongo import database as db
from scripts.remove_wiki_title_forward_slashes import main
from tests.base import OsfTestCase
from tests.factories import NodeWikiFactory, ProjectFactory
class TestRemoveWikiTitleForwardSlashes(OsfTestCase):
def test_forward_slash_is_removed_from_wiki_title(self):
project = ProjectFactory()
wiki = NodeWikiFactory(node=project, is_current=True)
invalid_name = 'invalid/name'
db.nodewikipage.update({'_id': wiki._id}, {'$set': {'page_name': invalid_name}})
project.wiki_pages_current['invalid/name'] = project.wiki_pages_current[wiki.page_name]
project.wiki_pages_versions['invalid/name'] = project.wiki_pages_versions[wiki.page_name]
project.save()
main()
wiki.reload()
assert_equal(wiki.page_name, 'invalidname')
assert_in('invalidname', project.wiki_pages_current)
assert_in('invalidname', project.wiki_pages_versions)
def test_valid_wiki_title(self):
project = ProjectFactory()
wiki = NodeWikiFactory(node=project, is_current=True)
page_name = wiki.page_name
main()
wiki.reload()
assert_equal(page_name, wiki.page_name)
assert_in(page_name, project.wiki_pages_current)
assert_in(page_name, project.wiki_pages_versions)
|
|
dbaca46d0f5a852e22d056a261f432b501d70d14
|
openstack_auth/tests/unit/test_password.py
|
openstack_auth/tests/unit/test_password.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from django import test
from openstack_auth import forms
from openstack_auth.tests.unit.test_auth import IsA
class ChangePasswordTests(test.TestCase):
@test.override_settings(
ALLOW_USERS_CHANGE_EXPIRED_PASSWORD=True,
AVAILABLE_REGIONS=[
("x", 'region1'),
("y", 'region2'),
], # we need at least two regions for the choice field to be visible
)
def test_change_password(self):
form_data = {
'region': '0',
'original_password': 'oldpwd',
'password': 'normalpwd',
'confirm_password': 'normalpwd',
}
initial = {
'user_id': 'user',
'region': '0',
}
form = forms.Password(form_data, initial=initial)
client = mock.Mock()
with mock.patch(
'openstack_auth.utils.get_session',
return_value=mock.sentinel.session
) as mock_get_session:
with mock.patch(
'openstack_auth.utils.get_keystone_client',
return_value=client
) as mock_get_keystone_client:
form.is_valid()
self.assertFalse(form.errors)
mock_get_session.assert_called_once_with(auth=IsA(forms.DummyAuth))
mock_get_keystone_client.assert_called_once_with()
client.Client.assert_called_once_with(
session=mock.sentinel.session,
user_id='user',
auth_url='x',
endpoint='x',
)
@test.override_settings(
AVAILABLE_REGIONS=[
("x", 'region1'),
("y", 'region2'),
],
)
def test_change_password_with_error(self):
form_data = {
'region': '0',
'original_password': 'oldpwd',
'password': 'normalpwd',
'confirm_password': 'normalpwd1',
}
initial = {
'user_id': 'user',
'region': '0',
}
form = forms.Password(form_data, initial=initial)
self.assertTrue(form.errors)
self.assertIn(['Passwords do not match.'], form.errors.values())
|
Add a unit test for the password change form
|
Add a unit test for the password change form
Change-Id: I5eeacefc3a0bd7d7f958f00befeb18e949c789db
|
Python
|
apache-2.0
|
openstack/horizon,openstack/horizon,openstack/horizon,openstack/horizon
|
Add a unit test for the password change form
Change-Id: I5eeacefc3a0bd7d7f958f00befeb18e949c789db
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from django import test
from openstack_auth import forms
from openstack_auth.tests.unit.test_auth import IsA
class ChangePasswordTests(test.TestCase):
@test.override_settings(
ALLOW_USERS_CHANGE_EXPIRED_PASSWORD=True,
AVAILABLE_REGIONS=[
("x", 'region1'),
("y", 'region2'),
], # we need at least two regions for the choice field to be visible
)
def test_change_password(self):
form_data = {
'region': '0',
'original_password': 'oldpwd',
'password': 'normalpwd',
'confirm_password': 'normalpwd',
}
initial = {
'user_id': 'user',
'region': '0',
}
form = forms.Password(form_data, initial=initial)
client = mock.Mock()
with mock.patch(
'openstack_auth.utils.get_session',
return_value=mock.sentinel.session
) as mock_get_session:
with mock.patch(
'openstack_auth.utils.get_keystone_client',
return_value=client
) as mock_get_keystone_client:
form.is_valid()
self.assertFalse(form.errors)
mock_get_session.assert_called_once_with(auth=IsA(forms.DummyAuth))
mock_get_keystone_client.assert_called_once_with()
client.Client.assert_called_once_with(
session=mock.sentinel.session,
user_id='user',
auth_url='x',
endpoint='x',
)
@test.override_settings(
AVAILABLE_REGIONS=[
("x", 'region1'),
("y", 'region2'),
],
)
def test_change_password_with_error(self):
form_data = {
'region': '0',
'original_password': 'oldpwd',
'password': 'normalpwd',
'confirm_password': 'normalpwd1',
}
initial = {
'user_id': 'user',
'region': '0',
}
form = forms.Password(form_data, initial=initial)
self.assertTrue(form.errors)
self.assertIn(['Passwords do not match.'], form.errors.values())
|
<commit_before><commit_msg>Add a unit test for the password change form
Change-Id: I5eeacefc3a0bd7d7f958f00befeb18e949c789db<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from django import test
from openstack_auth import forms
from openstack_auth.tests.unit.test_auth import IsA
class ChangePasswordTests(test.TestCase):
@test.override_settings(
ALLOW_USERS_CHANGE_EXPIRED_PASSWORD=True,
AVAILABLE_REGIONS=[
("x", 'region1'),
("y", 'region2'),
], # we need at least two regions for the choice field to be visible
)
def test_change_password(self):
form_data = {
'region': '0',
'original_password': 'oldpwd',
'password': 'normalpwd',
'confirm_password': 'normalpwd',
}
initial = {
'user_id': 'user',
'region': '0',
}
form = forms.Password(form_data, initial=initial)
client = mock.Mock()
with mock.patch(
'openstack_auth.utils.get_session',
return_value=mock.sentinel.session
) as mock_get_session:
with mock.patch(
'openstack_auth.utils.get_keystone_client',
return_value=client
) as mock_get_keystone_client:
form.is_valid()
self.assertFalse(form.errors)
mock_get_session.assert_called_once_with(auth=IsA(forms.DummyAuth))
mock_get_keystone_client.assert_called_once_with()
client.Client.assert_called_once_with(
session=mock.sentinel.session,
user_id='user',
auth_url='x',
endpoint='x',
)
@test.override_settings(
AVAILABLE_REGIONS=[
("x", 'region1'),
("y", 'region2'),
],
)
def test_change_password_with_error(self):
form_data = {
'region': '0',
'original_password': 'oldpwd',
'password': 'normalpwd',
'confirm_password': 'normalpwd1',
}
initial = {
'user_id': 'user',
'region': '0',
}
form = forms.Password(form_data, initial=initial)
self.assertTrue(form.errors)
self.assertIn(['Passwords do not match.'], form.errors.values())
|
Add a unit test for the password change form
Change-Id: I5eeacefc3a0bd7d7f958f00befeb18e949c789db# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from django import test
from openstack_auth import forms
from openstack_auth.tests.unit.test_auth import IsA
class ChangePasswordTests(test.TestCase):
@test.override_settings(
ALLOW_USERS_CHANGE_EXPIRED_PASSWORD=True,
AVAILABLE_REGIONS=[
("x", 'region1'),
("y", 'region2'),
], # we need at least two regions for the choice field to be visible
)
def test_change_password(self):
form_data = {
'region': '0',
'original_password': 'oldpwd',
'password': 'normalpwd',
'confirm_password': 'normalpwd',
}
initial = {
'user_id': 'user',
'region': '0',
}
form = forms.Password(form_data, initial=initial)
client = mock.Mock()
with mock.patch(
'openstack_auth.utils.get_session',
return_value=mock.sentinel.session
) as mock_get_session:
with mock.patch(
'openstack_auth.utils.get_keystone_client',
return_value=client
) as mock_get_keystone_client:
form.is_valid()
self.assertFalse(form.errors)
mock_get_session.assert_called_once_with(auth=IsA(forms.DummyAuth))
mock_get_keystone_client.assert_called_once_with()
client.Client.assert_called_once_with(
session=mock.sentinel.session,
user_id='user',
auth_url='x',
endpoint='x',
)
@test.override_settings(
AVAILABLE_REGIONS=[
("x", 'region1'),
("y", 'region2'),
],
)
def test_change_password_with_error(self):
form_data = {
'region': '0',
'original_password': 'oldpwd',
'password': 'normalpwd',
'confirm_password': 'normalpwd1',
}
initial = {
'user_id': 'user',
'region': '0',
}
form = forms.Password(form_data, initial=initial)
self.assertTrue(form.errors)
self.assertIn(['Passwords do not match.'], form.errors.values())
|
<commit_before><commit_msg>Add a unit test for the password change form
Change-Id: I5eeacefc3a0bd7d7f958f00befeb18e949c789db<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from django import test
from openstack_auth import forms
from openstack_auth.tests.unit.test_auth import IsA
class ChangePasswordTests(test.TestCase):
@test.override_settings(
ALLOW_USERS_CHANGE_EXPIRED_PASSWORD=True,
AVAILABLE_REGIONS=[
("x", 'region1'),
("y", 'region2'),
], # we need at least two regions for the choice field to be visible
)
def test_change_password(self):
form_data = {
'region': '0',
'original_password': 'oldpwd',
'password': 'normalpwd',
'confirm_password': 'normalpwd',
}
initial = {
'user_id': 'user',
'region': '0',
}
form = forms.Password(form_data, initial=initial)
client = mock.Mock()
with mock.patch(
'openstack_auth.utils.get_session',
return_value=mock.sentinel.session
) as mock_get_session:
with mock.patch(
'openstack_auth.utils.get_keystone_client',
return_value=client
) as mock_get_keystone_client:
form.is_valid()
self.assertFalse(form.errors)
mock_get_session.assert_called_once_with(auth=IsA(forms.DummyAuth))
mock_get_keystone_client.assert_called_once_with()
client.Client.assert_called_once_with(
session=mock.sentinel.session,
user_id='user',
auth_url='x',
endpoint='x',
)
@test.override_settings(
AVAILABLE_REGIONS=[
("x", 'region1'),
("y", 'region2'),
],
)
def test_change_password_with_error(self):
form_data = {
'region': '0',
'original_password': 'oldpwd',
'password': 'normalpwd',
'confirm_password': 'normalpwd1',
}
initial = {
'user_id': 'user',
'region': '0',
}
form = forms.Password(form_data, initial=initial)
self.assertTrue(form.errors)
self.assertIn(['Passwords do not match.'], form.errors.values())
|
|
c2dff4281ec50786eb82e729f1d6d5ce69376046
|
notes/s3utils.py
|
notes/s3utils.py
|
from storages.backends.s3boto import S3BotoStorage
StaticRootS3BotoStorage = lambda: S3BotoStorage(location='static')
MediaRootS3BotoStorage = lambda: S3BotoStorage(location='media')
|
Add missing S3 boto storage classes
|
Add missing S3 boto storage classes
|
Python
|
mit
|
creynaud/notes-server
|
Add missing S3 boto storage classes
|
from storages.backends.s3boto import S3BotoStorage
StaticRootS3BotoStorage = lambda: S3BotoStorage(location='static')
MediaRootS3BotoStorage = lambda: S3BotoStorage(location='media')
|
<commit_before><commit_msg>Add missing S3 boto storage classes<commit_after>
|
from storages.backends.s3boto import S3BotoStorage
StaticRootS3BotoStorage = lambda: S3BotoStorage(location='static')
MediaRootS3BotoStorage = lambda: S3BotoStorage(location='media')
|
Add missing S3 boto storage classesfrom storages.backends.s3boto import S3BotoStorage
StaticRootS3BotoStorage = lambda: S3BotoStorage(location='static')
MediaRootS3BotoStorage = lambda: S3BotoStorage(location='media')
|
<commit_before><commit_msg>Add missing S3 boto storage classes<commit_after>from storages.backends.s3boto import S3BotoStorage
StaticRootS3BotoStorage = lambda: S3BotoStorage(location='static')
MediaRootS3BotoStorage = lambda: S3BotoStorage(location='media')
|
|
7d32f720f45903761c05d4b622705551c742c425
|
profile_dgim.py
|
profile_dgim.py
|
import time
from dgim.utils import generate_random_stream
from dgim.dgim import Dgim
def profile_dgim(dgim, stream):
for elt in stream:
dgim.update(elt)
def main():
N = 100
r = 2
length = 1000000
dgim = Dgim(N=N, r=r)
stream = generate_random_stream(length=length)
time_start = time.time()
profile_dgim(dgim, stream)
time_stop = time.time()
print "Took: {}s".format(time_stop - time_start)
if __name__ == "__main__":
main()
|
Create a script to profile code.
|
Create a script to profile code.
|
Python
|
bsd-3-clause
|
simondolle/dgim,simondolle/dgim
|
Create a script to profile code.
|
import time
from dgim.utils import generate_random_stream
from dgim.dgim import Dgim
def profile_dgim(dgim, stream):
for elt in stream:
dgim.update(elt)
def main():
N = 100
r = 2
length = 1000000
dgim = Dgim(N=N, r=r)
stream = generate_random_stream(length=length)
time_start = time.time()
profile_dgim(dgim, stream)
time_stop = time.time()
print "Took: {}s".format(time_stop - time_start)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Create a script to profile code.<commit_after>
|
import time
from dgim.utils import generate_random_stream
from dgim.dgim import Dgim
def profile_dgim(dgim, stream):
for elt in stream:
dgim.update(elt)
def main():
N = 100
r = 2
length = 1000000
dgim = Dgim(N=N, r=r)
stream = generate_random_stream(length=length)
time_start = time.time()
profile_dgim(dgim, stream)
time_stop = time.time()
print "Took: {}s".format(time_stop - time_start)
if __name__ == "__main__":
main()
|
Create a script to profile code.import time
from dgim.utils import generate_random_stream
from dgim.dgim import Dgim
def profile_dgim(dgim, stream):
for elt in stream:
dgim.update(elt)
def main():
N = 100
r = 2
length = 1000000
dgim = Dgim(N=N, r=r)
stream = generate_random_stream(length=length)
time_start = time.time()
profile_dgim(dgim, stream)
time_stop = time.time()
print "Took: {}s".format(time_stop - time_start)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Create a script to profile code.<commit_after>import time
from dgim.utils import generate_random_stream
from dgim.dgim import Dgim
def profile_dgim(dgim, stream):
for elt in stream:
dgim.update(elt)
def main():
N = 100
r = 2
length = 1000000
dgim = Dgim(N=N, r=r)
stream = generate_random_stream(length=length)
time_start = time.time()
profile_dgim(dgim, stream)
time_stop = time.time()
print "Took: {}s".format(time_stop - time_start)
if __name__ == "__main__":
main()
|
|
431ca01ca4d62c68c3e8ab858138f5fa9b1f2d4c
|
validity.py
|
validity.py
|
import pandas as pd
import numpy as np
import operator,os
def extract( file_name ):
with open(file_name) as f:
for i,line in enumerate(f,1):
if "SCN" in line:
return i
def main(lta_file):
os.system('ltahdr -i ' + lta_file + '> lta_header')
dictionary = {}
skipped_rows = extract(lta_header)-1
header = pd.read_csv(lta_header,skiprows=skipped_rows,delimiter=r"\s+")
flux = list(set(header["OBJECT"]))
header['Nrecs'] = header['Nrecs'].astype(float)
for i in flux :
temp = header.loc[header.OBJECT==i,'Nrecs'].values
temp = np.mean(temp)
dictionary[i]=temp
source = max(dictionary.iteritems(),key=operator.itemgetter(1))[0]
return source
|
Add file to extract source name from LTA header
|
Add file to extract source name from LTA header
|
Python
|
mit
|
NCRA-TIFR/gadpu,NCRA-TIFR/gadpu
|
Add file to extract source name from LTA header
|
import pandas as pd
import numpy as np
import operator,os
def extract( file_name ):
with open(file_name) as f:
for i,line in enumerate(f,1):
if "SCN" in line:
return i
def main(lta_file):
os.system('ltahdr -i ' + lta_file + '> lta_header')
dictionary = {}
skipped_rows = extract(lta_header)-1
header = pd.read_csv(lta_header,skiprows=skipped_rows,delimiter=r"\s+")
flux = list(set(header["OBJECT"]))
header['Nrecs'] = header['Nrecs'].astype(float)
for i in flux :
temp = header.loc[header.OBJECT==i,'Nrecs'].values
temp = np.mean(temp)
dictionary[i]=temp
source = max(dictionary.iteritems(),key=operator.itemgetter(1))[0]
return source
|
<commit_before><commit_msg>Add file to extract source name from LTA header<commit_after>
|
import pandas as pd
import numpy as np
import operator,os
def extract( file_name ):
with open(file_name) as f:
for i,line in enumerate(f,1):
if "SCN" in line:
return i
def main(lta_file):
os.system('ltahdr -i ' + lta_file + '> lta_header')
dictionary = {}
skipped_rows = extract(lta_header)-1
header = pd.read_csv(lta_header,skiprows=skipped_rows,delimiter=r"\s+")
flux = list(set(header["OBJECT"]))
header['Nrecs'] = header['Nrecs'].astype(float)
for i in flux :
temp = header.loc[header.OBJECT==i,'Nrecs'].values
temp = np.mean(temp)
dictionary[i]=temp
source = max(dictionary.iteritems(),key=operator.itemgetter(1))[0]
return source
|
Add file to extract source name from LTA headerimport pandas as pd
import numpy as np
import operator,os
def extract( file_name ):
with open(file_name) as f:
for i,line in enumerate(f,1):
if "SCN" in line:
return i
def main(lta_file):
os.system('ltahdr -i ' + lta_file + '> lta_header')
dictionary = {}
skipped_rows = extract(lta_header)-1
header = pd.read_csv(lta_header,skiprows=skipped_rows,delimiter=r"\s+")
flux = list(set(header["OBJECT"]))
header['Nrecs'] = header['Nrecs'].astype(float)
for i in flux :
temp = header.loc[header.OBJECT==i,'Nrecs'].values
temp = np.mean(temp)
dictionary[i]=temp
source = max(dictionary.iteritems(),key=operator.itemgetter(1))[0]
return source
|
<commit_before><commit_msg>Add file to extract source name from LTA header<commit_after>import pandas as pd
import numpy as np
import operator,os
def extract( file_name ):
with open(file_name) as f:
for i,line in enumerate(f,1):
if "SCN" in line:
return i
def main(lta_file):
os.system('ltahdr -i ' + lta_file + '> lta_header')
dictionary = {}
skipped_rows = extract(lta_header)-1
header = pd.read_csv(lta_header,skiprows=skipped_rows,delimiter=r"\s+")
flux = list(set(header["OBJECT"]))
header['Nrecs'] = header['Nrecs'].astype(float)
for i in flux :
temp = header.loc[header.OBJECT==i,'Nrecs'].values
temp = np.mean(temp)
dictionary[i]=temp
source = max(dictionary.iteritems(),key=operator.itemgetter(1))[0]
return source
|
|
4fa68b92ef31ff4b95d846d08a1259d2ccba5670
|
test/selenium/src/run_selenium.py
|
test/selenium/src/run_selenium.py
|
#!/usr/bin/env python2.7
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
import sys
import os
import commands
import logging
import pytest
from lib import constants
from lib import file_ops
from lib import log
from lib import virtual_env
from lib import environment
PROJECT_ROOT_PATH = os.path.dirname(os.path.abspath(__file__)) + "/../"
logger = logging.getLogger("selenium.webdriver.remote.remote_connection")
if __name__ == "__main__":
file_ops.create_directory(environment.LOG_PATH)
file_ops.delete_directory_contents(environment.LOG_PATH)
log.set_default_file_handler(
logger,
PROJECT_ROOT_PATH + constants.path.LOGS_DIR +
constants.path.TEST_RUNNER
)
logger.setLevel(environment.LOGGING_LEVEL)
pytest.main()
|
Add new test runner for selenium tests
|
Add new test runner for selenium tests
This is an extra test runner that is meant only for inside docker
containers. The original test runner will remain untill we handle the
screen capture of the test runs, so we can run the tests on our local
machine and view the browser window.
|
Python
|
apache-2.0
|
prasannav7/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,jmakov/ggrc-core,j0gurt/ggrc-core,jmakov/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,AleksNeStu/ggrc-core,jmakov/ggrc-core,kr41/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,NejcZupec/ggrc-core,prasannav7/ggrc-core,edofic/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,edofic/ggrc-core,andrei-karalionak/ggrc-core,prasannav7/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,edofic/ggrc-core,jmakov/ggrc-core,plamut/ggrc-core,NejcZupec/ggrc-core,j0gurt/ggrc-core
|
Add new test runner for selenium tests
This is an extra test runner that is meant only for inside docker
containers. The original test runner will remain untill we handle the
screen capture of the test runs, so we can run the tests on our local
machine and view the browser window.
|
#!/usr/bin/env python2.7
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
import sys
import os
import commands
import logging
import pytest
from lib import constants
from lib import file_ops
from lib import log
from lib import virtual_env
from lib import environment
PROJECT_ROOT_PATH = os.path.dirname(os.path.abspath(__file__)) + "/../"
logger = logging.getLogger("selenium.webdriver.remote.remote_connection")
if __name__ == "__main__":
file_ops.create_directory(environment.LOG_PATH)
file_ops.delete_directory_contents(environment.LOG_PATH)
log.set_default_file_handler(
logger,
PROJECT_ROOT_PATH + constants.path.LOGS_DIR +
constants.path.TEST_RUNNER
)
logger.setLevel(environment.LOGGING_LEVEL)
pytest.main()
|
<commit_before><commit_msg>Add new test runner for selenium tests
This is an extra test runner that is meant only for inside docker
containers. The original test runner will remain untill we handle the
screen capture of the test runs, so we can run the tests on our local
machine and view the browser window.<commit_after>
|
#!/usr/bin/env python2.7
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
import sys
import os
import commands
import logging
import pytest
from lib import constants
from lib import file_ops
from lib import log
from lib import virtual_env
from lib import environment
PROJECT_ROOT_PATH = os.path.dirname(os.path.abspath(__file__)) + "/../"
logger = logging.getLogger("selenium.webdriver.remote.remote_connection")
if __name__ == "__main__":
file_ops.create_directory(environment.LOG_PATH)
file_ops.delete_directory_contents(environment.LOG_PATH)
log.set_default_file_handler(
logger,
PROJECT_ROOT_PATH + constants.path.LOGS_DIR +
constants.path.TEST_RUNNER
)
logger.setLevel(environment.LOGGING_LEVEL)
pytest.main()
|
Add new test runner for selenium tests
This is an extra test runner that is meant only for inside docker
containers. The original test runner will remain untill we handle the
screen capture of the test runs, so we can run the tests on our local
machine and view the browser window.#!/usr/bin/env python2.7
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
import sys
import os
import commands
import logging
import pytest
from lib import constants
from lib import file_ops
from lib import log
from lib import virtual_env
from lib import environment
PROJECT_ROOT_PATH = os.path.dirname(os.path.abspath(__file__)) + "/../"
logger = logging.getLogger("selenium.webdriver.remote.remote_connection")
if __name__ == "__main__":
file_ops.create_directory(environment.LOG_PATH)
file_ops.delete_directory_contents(environment.LOG_PATH)
log.set_default_file_handler(
logger,
PROJECT_ROOT_PATH + constants.path.LOGS_DIR +
constants.path.TEST_RUNNER
)
logger.setLevel(environment.LOGGING_LEVEL)
pytest.main()
|
<commit_before><commit_msg>Add new test runner for selenium tests
This is an extra test runner that is meant only for inside docker
containers. The original test runner will remain untill we handle the
screen capture of the test runs, so we can run the tests on our local
machine and view the browser window.<commit_after>#!/usr/bin/env python2.7
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
import sys
import os
import commands
import logging
import pytest
from lib import constants
from lib import file_ops
from lib import log
from lib import virtual_env
from lib import environment
PROJECT_ROOT_PATH = os.path.dirname(os.path.abspath(__file__)) + "/../"
logger = logging.getLogger("selenium.webdriver.remote.remote_connection")
if __name__ == "__main__":
file_ops.create_directory(environment.LOG_PATH)
file_ops.delete_directory_contents(environment.LOG_PATH)
log.set_default_file_handler(
logger,
PROJECT_ROOT_PATH + constants.path.LOGS_DIR +
constants.path.TEST_RUNNER
)
logger.setLevel(environment.LOGGING_LEVEL)
pytest.main()
|
|
01b6ec639eb35c2d9b978839442481afc1f66422
|
quiz/4-substr.py
|
quiz/4-substr.py
|
#!/usr/bin/env python3
def arr_from_str(text):
arr = list()
for i in text:
if ord('A') <= ord(i) <= ord('Z'):
arr.append(ord(i) - ord('A'))
return arr
def solve_q1(pat, radix, target):
dfa = [[0] * radix]
dfa[0][pat[0]] = 1
pre = 0
for i in range(1, len(pat)):
v = list()
for j in range(radix):
v.append(dfa[pre][j])
v[pat[i]] = i + 1
pre = dfa[pre][pat[i]]
dfa.append(v)
for i in dfa:
print(i[target], end=' ')
print()
def solve_q2(txt, pat):
fallback = [-1] * 26
for i in range(len(pat)):
fallback[pat[i]] = i
answer = ''
i = 0
while i < len(txt) - len(pat):
f = 0
answer += chr(ord('A') + txt[i + len(pat) - 1])
for j in range(len(pat) - 1, -1, -1):
if pat[j] == txt[i + j]:
continue
f = j - fallback[txt[i + j]]
if f < 1:
f = 1
break
if f == 0:
print(' '.join(answer))
return
i += f
def solve_q3(head, tail, hash, Q, RM):
print(((hash + Q - head * RM % Q) * 10 + tail) % Q)
q1 = 'C C B C C C B A '
q1_target = 'C'
q2_pat = 'Y F A T H E R '
q2_txt = 'B R O T H E R T H A T F A T H E R W A S M Y F A T H E R T H '
q3_head = 7
q3_tail = 4
q3_hash = 35
q3_Q = 71
q3_RM = 5
solve_q1(arr_from_str(q1), 3, ord(q1_target) - ord('A'))
solve_q2(arr_from_str(q2_txt), arr_from_str(q2_pat))
solve_q3(q3_head, q3_tail, q3_hash, q3_Q, q3_RM)
|
Add autosolver for quiz in week 4.
|
Add autosolver for quiz in week 4.
|
Python
|
mit
|
hghwng/mooc-algs2,hghwng/mooc-algs2
|
Add autosolver for quiz in week 4.
|
#!/usr/bin/env python3
def arr_from_str(text):
arr = list()
for i in text:
if ord('A') <= ord(i) <= ord('Z'):
arr.append(ord(i) - ord('A'))
return arr
def solve_q1(pat, radix, target):
dfa = [[0] * radix]
dfa[0][pat[0]] = 1
pre = 0
for i in range(1, len(pat)):
v = list()
for j in range(radix):
v.append(dfa[pre][j])
v[pat[i]] = i + 1
pre = dfa[pre][pat[i]]
dfa.append(v)
for i in dfa:
print(i[target], end=' ')
print()
def solve_q2(txt, pat):
fallback = [-1] * 26
for i in range(len(pat)):
fallback[pat[i]] = i
answer = ''
i = 0
while i < len(txt) - len(pat):
f = 0
answer += chr(ord('A') + txt[i + len(pat) - 1])
for j in range(len(pat) - 1, -1, -1):
if pat[j] == txt[i + j]:
continue
f = j - fallback[txt[i + j]]
if f < 1:
f = 1
break
if f == 0:
print(' '.join(answer))
return
i += f
def solve_q3(head, tail, hash, Q, RM):
print(((hash + Q - head * RM % Q) * 10 + tail) % Q)
q1 = 'C C B C C C B A '
q1_target = 'C'
q2_pat = 'Y F A T H E R '
q2_txt = 'B R O T H E R T H A T F A T H E R W A S M Y F A T H E R T H '
q3_head = 7
q3_tail = 4
q3_hash = 35
q3_Q = 71
q3_RM = 5
solve_q1(arr_from_str(q1), 3, ord(q1_target) - ord('A'))
solve_q2(arr_from_str(q2_txt), arr_from_str(q2_pat))
solve_q3(q3_head, q3_tail, q3_hash, q3_Q, q3_RM)
|
<commit_before><commit_msg>Add autosolver for quiz in week 4.<commit_after>
|
#!/usr/bin/env python3
def arr_from_str(text):
arr = list()
for i in text:
if ord('A') <= ord(i) <= ord('Z'):
arr.append(ord(i) - ord('A'))
return arr
def solve_q1(pat, radix, target):
dfa = [[0] * radix]
dfa[0][pat[0]] = 1
pre = 0
for i in range(1, len(pat)):
v = list()
for j in range(radix):
v.append(dfa[pre][j])
v[pat[i]] = i + 1
pre = dfa[pre][pat[i]]
dfa.append(v)
for i in dfa:
print(i[target], end=' ')
print()
def solve_q2(txt, pat):
fallback = [-1] * 26
for i in range(len(pat)):
fallback[pat[i]] = i
answer = ''
i = 0
while i < len(txt) - len(pat):
f = 0
answer += chr(ord('A') + txt[i + len(pat) - 1])
for j in range(len(pat) - 1, -1, -1):
if pat[j] == txt[i + j]:
continue
f = j - fallback[txt[i + j]]
if f < 1:
f = 1
break
if f == 0:
print(' '.join(answer))
return
i += f
def solve_q3(head, tail, hash, Q, RM):
print(((hash + Q - head * RM % Q) * 10 + tail) % Q)
q1 = 'C C B C C C B A '
q1_target = 'C'
q2_pat = 'Y F A T H E R '
q2_txt = 'B R O T H E R T H A T F A T H E R W A S M Y F A T H E R T H '
q3_head = 7
q3_tail = 4
q3_hash = 35
q3_Q = 71
q3_RM = 5
solve_q1(arr_from_str(q1), 3, ord(q1_target) - ord('A'))
solve_q2(arr_from_str(q2_txt), arr_from_str(q2_pat))
solve_q3(q3_head, q3_tail, q3_hash, q3_Q, q3_RM)
|
Add autosolver for quiz in week 4.#!/usr/bin/env python3
def arr_from_str(text):
arr = list()
for i in text:
if ord('A') <= ord(i) <= ord('Z'):
arr.append(ord(i) - ord('A'))
return arr
def solve_q1(pat, radix, target):
dfa = [[0] * radix]
dfa[0][pat[0]] = 1
pre = 0
for i in range(1, len(pat)):
v = list()
for j in range(radix):
v.append(dfa[pre][j])
v[pat[i]] = i + 1
pre = dfa[pre][pat[i]]
dfa.append(v)
for i in dfa:
print(i[target], end=' ')
print()
def solve_q2(txt, pat):
fallback = [-1] * 26
for i in range(len(pat)):
fallback[pat[i]] = i
answer = ''
i = 0
while i < len(txt) - len(pat):
f = 0
answer += chr(ord('A') + txt[i + len(pat) - 1])
for j in range(len(pat) - 1, -1, -1):
if pat[j] == txt[i + j]:
continue
f = j - fallback[txt[i + j]]
if f < 1:
f = 1
break
if f == 0:
print(' '.join(answer))
return
i += f
def solve_q3(head, tail, hash, Q, RM):
print(((hash + Q - head * RM % Q) * 10 + tail) % Q)
q1 = 'C C B C C C B A '
q1_target = 'C'
q2_pat = 'Y F A T H E R '
q2_txt = 'B R O T H E R T H A T F A T H E R W A S M Y F A T H E R T H '
q3_head = 7
q3_tail = 4
q3_hash = 35
q3_Q = 71
q3_RM = 5
solve_q1(arr_from_str(q1), 3, ord(q1_target) - ord('A'))
solve_q2(arr_from_str(q2_txt), arr_from_str(q2_pat))
solve_q3(q3_head, q3_tail, q3_hash, q3_Q, q3_RM)
|
<commit_before><commit_msg>Add autosolver for quiz in week 4.<commit_after>#!/usr/bin/env python3
def arr_from_str(text):
arr = list()
for i in text:
if ord('A') <= ord(i) <= ord('Z'):
arr.append(ord(i) - ord('A'))
return arr
def solve_q1(pat, radix, target):
dfa = [[0] * radix]
dfa[0][pat[0]] = 1
pre = 0
for i in range(1, len(pat)):
v = list()
for j in range(radix):
v.append(dfa[pre][j])
v[pat[i]] = i + 1
pre = dfa[pre][pat[i]]
dfa.append(v)
for i in dfa:
print(i[target], end=' ')
print()
def solve_q2(txt, pat):
fallback = [-1] * 26
for i in range(len(pat)):
fallback[pat[i]] = i
answer = ''
i = 0
while i < len(txt) - len(pat):
f = 0
answer += chr(ord('A') + txt[i + len(pat) - 1])
for j in range(len(pat) - 1, -1, -1):
if pat[j] == txt[i + j]:
continue
f = j - fallback[txt[i + j]]
if f < 1:
f = 1
break
if f == 0:
print(' '.join(answer))
return
i += f
def solve_q3(head, tail, hash, Q, RM):
print(((hash + Q - head * RM % Q) * 10 + tail) % Q)
q1 = 'C C B C C C B A '
q1_target = 'C'
q2_pat = 'Y F A T H E R '
q2_txt = 'B R O T H E R T H A T F A T H E R W A S M Y F A T H E R T H '
q3_head = 7
q3_tail = 4
q3_hash = 35
q3_Q = 71
q3_RM = 5
solve_q1(arr_from_str(q1), 3, ord(q1_target) - ord('A'))
solve_q2(arr_from_str(q2_txt), arr_from_str(q2_pat))
solve_q3(q3_head, q3_tail, q3_hash, q3_Q, q3_RM)
|
|
7b1108b596a48ab1d837aa0a3bba07e6d50abe32
|
tests/test_classifier.py
|
tests/test_classifier.py
|
import pytest
import numpy as np
from sklearn.utils.estimator_checks import check_estimator
from sklearn.model_selection import train_test_split
import legendary_potato.classifiers as classifiers
def classifier_iterator():
"""Return an iterator over classifier.
"""
return (classifiers.svdd,)
def two_class_generator(random_state=1576):
"""Generate classic multidimensional toy set.
For each label, a set of centers are generated following normal
distribution. A set of samples are generated following anormal distribution
centered on this centers.
The generated set is a 2-class balanced dataset.
"""
np.random.seed = random_state
for dim in range(1, 10):
orig = np.zeros(dim)
orig[0] = -1
a_centers = np.random.normal(orig, 2, size=(3, dim))
orig[0] = 1
b_centers = np.random.normal(orig, 2, size=(3, dim))
a_data = np.vstack(
np.random.normal(center, 0.5, size=(5, dim))
for center in a_centers
)
b_data = np.vstack(
np.random.normal(center, 0.5, size=(5, dim))
for center in b_centers
)
labels = [1 for _ in range(15)] + [-1 for _ in range(15)]
data = np.hstack([np.vstack([a_data, b_data]), np.transpose([labels])])
np.random.shuffle(data)
yield data[:, 0:-1], data[:, -1]
@pytest.mark.parametrize('classifier', classifier_iterator())
def test_sklearn_compatibility(classifier):
"""Check the compatibility.
"""
check_estimator(classifier)
@pytest.mark.parametrize('classifier', classifier_iterator())
@pytest.mark.parametrize('dataset', two_class_generator())
def test_oneclass(classifier, dataset):
"""Perform one class classification.
"""
X, y = dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=False)
y_train = np.ones(X_train.shape[0])
classif = classifier()
classif.fit(X_train, y_train)
classif.predict(X_test)
|
Add tests for (not yet implemented) classifier
|
[TESTS] Add tests for (not yet implemented) classifier
|
Python
|
mit
|
manu3618/legendary-potato
|
[TESTS] Add tests for (not yet implemented) classifier
|
import pytest
import numpy as np
from sklearn.utils.estimator_checks import check_estimator
from sklearn.model_selection import train_test_split
import legendary_potato.classifiers as classifiers
def classifier_iterator():
"""Return an iterator over classifier.
"""
return (classifiers.svdd,)
def two_class_generator(random_state=1576):
"""Generate classic multidimensional toy set.
For each label, a set of centers are generated following normal
distribution. A set of samples are generated following anormal distribution
centered on this centers.
The generated set is a 2-class balanced dataset.
"""
np.random.seed = random_state
for dim in range(1, 10):
orig = np.zeros(dim)
orig[0] = -1
a_centers = np.random.normal(orig, 2, size=(3, dim))
orig[0] = 1
b_centers = np.random.normal(orig, 2, size=(3, dim))
a_data = np.vstack(
np.random.normal(center, 0.5, size=(5, dim))
for center in a_centers
)
b_data = np.vstack(
np.random.normal(center, 0.5, size=(5, dim))
for center in b_centers
)
labels = [1 for _ in range(15)] + [-1 for _ in range(15)]
data = np.hstack([np.vstack([a_data, b_data]), np.transpose([labels])])
np.random.shuffle(data)
yield data[:, 0:-1], data[:, -1]
@pytest.mark.parametrize('classifier', classifier_iterator())
def test_sklearn_compatibility(classifier):
"""Check the compatibility.
"""
check_estimator(classifier)
@pytest.mark.parametrize('classifier', classifier_iterator())
@pytest.mark.parametrize('dataset', two_class_generator())
def test_oneclass(classifier, dataset):
"""Perform one class classification.
"""
X, y = dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=False)
y_train = np.ones(X_train.shape[0])
classif = classifier()
classif.fit(X_train, y_train)
classif.predict(X_test)
|
<commit_before><commit_msg>[TESTS] Add tests for (not yet implemented) classifier<commit_after>
|
import pytest
import numpy as np
from sklearn.utils.estimator_checks import check_estimator
from sklearn.model_selection import train_test_split
import legendary_potato.classifiers as classifiers
def classifier_iterator():
"""Return an iterator over classifier.
"""
return (classifiers.svdd,)
def two_class_generator(random_state=1576):
"""Generate classic multidimensional toy set.
For each label, a set of centers are generated following normal
distribution. A set of samples are generated following anormal distribution
centered on this centers.
The generated set is a 2-class balanced dataset.
"""
np.random.seed = random_state
for dim in range(1, 10):
orig = np.zeros(dim)
orig[0] = -1
a_centers = np.random.normal(orig, 2, size=(3, dim))
orig[0] = 1
b_centers = np.random.normal(orig, 2, size=(3, dim))
a_data = np.vstack(
np.random.normal(center, 0.5, size=(5, dim))
for center in a_centers
)
b_data = np.vstack(
np.random.normal(center, 0.5, size=(5, dim))
for center in b_centers
)
labels = [1 for _ in range(15)] + [-1 for _ in range(15)]
data = np.hstack([np.vstack([a_data, b_data]), np.transpose([labels])])
np.random.shuffle(data)
yield data[:, 0:-1], data[:, -1]
@pytest.mark.parametrize('classifier', classifier_iterator())
def test_sklearn_compatibility(classifier):
"""Check the compatibility.
"""
check_estimator(classifier)
@pytest.mark.parametrize('classifier', classifier_iterator())
@pytest.mark.parametrize('dataset', two_class_generator())
def test_oneclass(classifier, dataset):
"""Perform one class classification.
"""
X, y = dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=False)
y_train = np.ones(X_train.shape[0])
classif = classifier()
classif.fit(X_train, y_train)
classif.predict(X_test)
|
[TESTS] Add tests for (not yet implemented) classifierimport pytest
import numpy as np
from sklearn.utils.estimator_checks import check_estimator
from sklearn.model_selection import train_test_split
import legendary_potato.classifiers as classifiers
def classifier_iterator():
"""Return an iterator over classifier.
"""
return (classifiers.svdd,)
def two_class_generator(random_state=1576):
"""Generate classic multidimensional toy set.
For each label, a set of centers are generated following normal
distribution. A set of samples are generated following anormal distribution
centered on this centers.
The generated set is a 2-class balanced dataset.
"""
np.random.seed = random_state
for dim in range(1, 10):
orig = np.zeros(dim)
orig[0] = -1
a_centers = np.random.normal(orig, 2, size=(3, dim))
orig[0] = 1
b_centers = np.random.normal(orig, 2, size=(3, dim))
a_data = np.vstack(
np.random.normal(center, 0.5, size=(5, dim))
for center in a_centers
)
b_data = np.vstack(
np.random.normal(center, 0.5, size=(5, dim))
for center in b_centers
)
labels = [1 for _ in range(15)] + [-1 for _ in range(15)]
data = np.hstack([np.vstack([a_data, b_data]), np.transpose([labels])])
np.random.shuffle(data)
yield data[:, 0:-1], data[:, -1]
@pytest.mark.parametrize('classifier', classifier_iterator())
def test_sklearn_compatibility(classifier):
"""Check the compatibility.
"""
check_estimator(classifier)
@pytest.mark.parametrize('classifier', classifier_iterator())
@pytest.mark.parametrize('dataset', two_class_generator())
def test_oneclass(classifier, dataset):
"""Perform one class classification.
"""
X, y = dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=False)
y_train = np.ones(X_train.shape[0])
classif = classifier()
classif.fit(X_train, y_train)
classif.predict(X_test)
|
<commit_before><commit_msg>[TESTS] Add tests for (not yet implemented) classifier<commit_after>import pytest
import numpy as np
from sklearn.utils.estimator_checks import check_estimator
from sklearn.model_selection import train_test_split
import legendary_potato.classifiers as classifiers
def classifier_iterator():
"""Return an iterator over classifier.
"""
return (classifiers.svdd,)
def two_class_generator(random_state=1576):
"""Generate classic multidimensional toy set.
For each label, a set of centers are generated following normal
distribution. A set of samples are generated following anormal distribution
centered on this centers.
The generated set is a 2-class balanced dataset.
"""
np.random.seed = random_state
for dim in range(1, 10):
orig = np.zeros(dim)
orig[0] = -1
a_centers = np.random.normal(orig, 2, size=(3, dim))
orig[0] = 1
b_centers = np.random.normal(orig, 2, size=(3, dim))
a_data = np.vstack(
np.random.normal(center, 0.5, size=(5, dim))
for center in a_centers
)
b_data = np.vstack(
np.random.normal(center, 0.5, size=(5, dim))
for center in b_centers
)
labels = [1 for _ in range(15)] + [-1 for _ in range(15)]
data = np.hstack([np.vstack([a_data, b_data]), np.transpose([labels])])
np.random.shuffle(data)
yield data[:, 0:-1], data[:, -1]
@pytest.mark.parametrize('classifier', classifier_iterator())
def test_sklearn_compatibility(classifier):
"""Check the compatibility.
"""
check_estimator(classifier)
@pytest.mark.parametrize('classifier', classifier_iterator())
@pytest.mark.parametrize('dataset', two_class_generator())
def test_oneclass(classifier, dataset):
"""Perform one class classification.
"""
X, y = dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=False)
y_train = np.ones(X_train.shape[0])
classif = classifier()
classif.fit(X_train, y_train)
classif.predict(X_test)
|
|
34b1d1de57022e2d7cbfb1d3c1fac8c29632caef
|
tests/test_converters.py
|
tests/test_converters.py
|
import unittest
from beaker.converters import asbool, aslist
class AsBool(unittest.TestCase):
def test_truth_str(self):
for v in ('true', 'yes', 'on', 'y', 't', '1'):
self.assertTrue(asbool(v), "%s should be considered True" % (v,))
v = v.upper()
self.assertTrue(asbool(v), "%s should be considered True" % (v,))
def test_false_str(self):
for v in ('false', 'no', 'off', 'n', 'f', '0'):
self.assertFalse(asbool(v), v)
v = v.upper()
self.assertFalse(asbool(v), v)
def test_coerce(self):
"""Things that can coerce right straight to booleans."""
self.assertTrue(asbool(True))
self.assertTrue(asbool(1))
self.assertTrue(asbool(42))
self.assertFalse(asbool(False))
self.assertFalse(asbool(0))
def test_bad_values(self):
self.assertRaises(ValueError, asbool, ('mommy!'))
self.assertRaises(ValueError, asbool, (u'Blargl?'))
class AsList(unittest.TestCase):
def test_string(self):
self.assertEqual(aslist('abc'), ['abc'])
self.assertEqual(aslist('1a2a3'), [1, 2, 3])
def test_None(self):
self.assertEqual(aslist(None), [])
def test_listy_noops(self):
"""Lists and tuples should come back unchanged."""
x = [1, 2, 3]
self.assertEqual(aslist(x), x)
y = ('z', 'y', 'x')
self.assertEqual(aslist(y), y)
def test_listify(self):
"""Other objects should just result in a single item list."""
self.assertEqual(aslist(dict()), {})
if __name__ == '__main__':
unittest.main()
|
Bring coverage for the converters module up to 100%.
|
Bring coverage for the converters module up to 100%.
--HG--
branch : trunk
|
Python
|
bsd-3-clause
|
enomado/beaker,jvanasco/beaker,masayuko/beaker
|
Bring coverage for the converters module up to 100%.
--HG--
branch : trunk
|
import unittest
from beaker.converters import asbool, aslist
class AsBool(unittest.TestCase):
def test_truth_str(self):
for v in ('true', 'yes', 'on', 'y', 't', '1'):
self.assertTrue(asbool(v), "%s should be considered True" % (v,))
v = v.upper()
self.assertTrue(asbool(v), "%s should be considered True" % (v,))
def test_false_str(self):
for v in ('false', 'no', 'off', 'n', 'f', '0'):
self.assertFalse(asbool(v), v)
v = v.upper()
self.assertFalse(asbool(v), v)
def test_coerce(self):
"""Things that can coerce right straight to booleans."""
self.assertTrue(asbool(True))
self.assertTrue(asbool(1))
self.assertTrue(asbool(42))
self.assertFalse(asbool(False))
self.assertFalse(asbool(0))
def test_bad_values(self):
self.assertRaises(ValueError, asbool, ('mommy!'))
self.assertRaises(ValueError, asbool, (u'Blargl?'))
class AsList(unittest.TestCase):
def test_string(self):
self.assertEqual(aslist('abc'), ['abc'])
self.assertEqual(aslist('1a2a3'), [1, 2, 3])
def test_None(self):
self.assertEqual(aslist(None), [])
def test_listy_noops(self):
"""Lists and tuples should come back unchanged."""
x = [1, 2, 3]
self.assertEqual(aslist(x), x)
y = ('z', 'y', 'x')
self.assertEqual(aslist(y), y)
def test_listify(self):
"""Other objects should just result in a single item list."""
self.assertEqual(aslist(dict()), {})
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Bring coverage for the converters module up to 100%.
--HG--
branch : trunk<commit_after>
|
import unittest
from beaker.converters import asbool, aslist
class AsBool(unittest.TestCase):
def test_truth_str(self):
for v in ('true', 'yes', 'on', 'y', 't', '1'):
self.assertTrue(asbool(v), "%s should be considered True" % (v,))
v = v.upper()
self.assertTrue(asbool(v), "%s should be considered True" % (v,))
def test_false_str(self):
for v in ('false', 'no', 'off', 'n', 'f', '0'):
self.assertFalse(asbool(v), v)
v = v.upper()
self.assertFalse(asbool(v), v)
def test_coerce(self):
"""Things that can coerce right straight to booleans."""
self.assertTrue(asbool(True))
self.assertTrue(asbool(1))
self.assertTrue(asbool(42))
self.assertFalse(asbool(False))
self.assertFalse(asbool(0))
def test_bad_values(self):
self.assertRaises(ValueError, asbool, ('mommy!'))
self.assertRaises(ValueError, asbool, (u'Blargl?'))
class AsList(unittest.TestCase):
def test_string(self):
self.assertEqual(aslist('abc'), ['abc'])
self.assertEqual(aslist('1a2a3'), [1, 2, 3])
def test_None(self):
self.assertEqual(aslist(None), [])
def test_listy_noops(self):
"""Lists and tuples should come back unchanged."""
x = [1, 2, 3]
self.assertEqual(aslist(x), x)
y = ('z', 'y', 'x')
self.assertEqual(aslist(y), y)
def test_listify(self):
"""Other objects should just result in a single item list."""
self.assertEqual(aslist(dict()), {})
if __name__ == '__main__':
unittest.main()
|
Bring coverage for the converters module up to 100%.
--HG--
branch : trunkimport unittest
from beaker.converters import asbool, aslist
class AsBool(unittest.TestCase):
def test_truth_str(self):
for v in ('true', 'yes', 'on', 'y', 't', '1'):
self.assertTrue(asbool(v), "%s should be considered True" % (v,))
v = v.upper()
self.assertTrue(asbool(v), "%s should be considered True" % (v,))
def test_false_str(self):
for v in ('false', 'no', 'off', 'n', 'f', '0'):
self.assertFalse(asbool(v), v)
v = v.upper()
self.assertFalse(asbool(v), v)
def test_coerce(self):
"""Things that can coerce right straight to booleans."""
self.assertTrue(asbool(True))
self.assertTrue(asbool(1))
self.assertTrue(asbool(42))
self.assertFalse(asbool(False))
self.assertFalse(asbool(0))
def test_bad_values(self):
self.assertRaises(ValueError, asbool, ('mommy!'))
self.assertRaises(ValueError, asbool, (u'Blargl?'))
class AsList(unittest.TestCase):
def test_string(self):
self.assertEqual(aslist('abc'), ['abc'])
self.assertEqual(aslist('1a2a3'), [1, 2, 3])
def test_None(self):
self.assertEqual(aslist(None), [])
def test_listy_noops(self):
"""Lists and tuples should come back unchanged."""
x = [1, 2, 3]
self.assertEqual(aslist(x), x)
y = ('z', 'y', 'x')
self.assertEqual(aslist(y), y)
def test_listify(self):
"""Other objects should just result in a single item list."""
self.assertEqual(aslist(dict()), {})
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Bring coverage for the converters module up to 100%.
--HG--
branch : trunk<commit_after>import unittest
from beaker.converters import asbool, aslist
class AsBool(unittest.TestCase):
def test_truth_str(self):
for v in ('true', 'yes', 'on', 'y', 't', '1'):
self.assertTrue(asbool(v), "%s should be considered True" % (v,))
v = v.upper()
self.assertTrue(asbool(v), "%s should be considered True" % (v,))
def test_false_str(self):
for v in ('false', 'no', 'off', 'n', 'f', '0'):
self.assertFalse(asbool(v), v)
v = v.upper()
self.assertFalse(asbool(v), v)
def test_coerce(self):
"""Things that can coerce right straight to booleans."""
self.assertTrue(asbool(True))
self.assertTrue(asbool(1))
self.assertTrue(asbool(42))
self.assertFalse(asbool(False))
self.assertFalse(asbool(0))
def test_bad_values(self):
self.assertRaises(ValueError, asbool, ('mommy!'))
self.assertRaises(ValueError, asbool, (u'Blargl?'))
class AsList(unittest.TestCase):
def test_string(self):
self.assertEqual(aslist('abc'), ['abc'])
self.assertEqual(aslist('1a2a3'), [1, 2, 3])
def test_None(self):
self.assertEqual(aslist(None), [])
def test_listy_noops(self):
"""Lists and tuples should come back unchanged."""
x = [1, 2, 3]
self.assertEqual(aslist(x), x)
y = ('z', 'y', 'x')
self.assertEqual(aslist(y), y)
def test_listify(self):
"""Other objects should just result in a single item list."""
self.assertEqual(aslist(dict()), {})
if __name__ == '__main__':
unittest.main()
|
|
5e60c2c7e794ad1cac8340d10d9d913ad486a28c
|
tests/test_getproject.py
|
tests/test_getproject.py
|
"""Tests for the ``getproject`` subcommand."""
import os
from pew._utils import temp_environ, invoke_pew as invoke
from utils import TemporaryDirectory
def test_getproject(env1):
"""Check that ``getproject`` prints an environment’s project directory."""
with temp_environ():
os.environ.pop('VIRTUAL_ENV', None)
with TemporaryDirectory() as tmpdir:
invoke('setproject', 'env1', tmpdir)
res = invoke('getproject', 'env1')
assert not res.err
assert res.out == tmpdir
def test_project_directory_not_set(env1):
"""Check the error message if no project directory was set.
If no project directory has been configured for an environment,
``getproject`` should quit with an error message.
"""
name = 'env1'
with temp_environ():
os.environ.pop('VIRTUAL_ENV', None)
with TemporaryDirectory() as tmpdir:
res = invoke('getproject', name)
assert not res.out
assert res.err == (
"ERROR: no project directory set for Environment '{0}'"
.format(name)
)
def test_unknown_environment():
"""Check the error message if passed an unknown environment name.
If ``getproject`` is invoked with the name of an environment that
does not exist, the call should fail with an appropriate error
message.
"""
name = 'bogus-environment-that-/hopefully/-does-not-exist'
res = invoke('getproject', name)
assert not res.out
assert res.err == "ERROR: Environment '{0}' does not exist.".format(name)
def test_call_without_args_outside_active_venv():
"""Check the error message if called without args outside a virtualenv.
If ``getproject`` is called without additional arguments outside of
an active virtualenv, it should print an error message.
"""
os.environ.pop('VIRTUAL_ENV', None)
res = invoke('getproject')
assert not res.out
assert res.err == "ERROR: no virtualenv active"
|
Add tests for the `getproject` subcommand
|
Add tests for the `getproject` subcommand
|
Python
|
mit
|
berdario/pew,berdario/pew
|
Add tests for the `getproject` subcommand
|
"""Tests for the ``getproject`` subcommand."""
import os
from pew._utils import temp_environ, invoke_pew as invoke
from utils import TemporaryDirectory
def test_getproject(env1):
"""Check that ``getproject`` prints an environment’s project directory."""
with temp_environ():
os.environ.pop('VIRTUAL_ENV', None)
with TemporaryDirectory() as tmpdir:
invoke('setproject', 'env1', tmpdir)
res = invoke('getproject', 'env1')
assert not res.err
assert res.out == tmpdir
def test_project_directory_not_set(env1):
"""Check the error message if no project directory was set.
If no project directory has been configured for an environment,
``getproject`` should quit with an error message.
"""
name = 'env1'
with temp_environ():
os.environ.pop('VIRTUAL_ENV', None)
with TemporaryDirectory() as tmpdir:
res = invoke('getproject', name)
assert not res.out
assert res.err == (
"ERROR: no project directory set for Environment '{0}'"
.format(name)
)
def test_unknown_environment():
"""Check the error message if passed an unknown environment name.
If ``getproject`` is invoked with the name of an environment that
does not exist, the call should fail with an appropriate error
message.
"""
name = 'bogus-environment-that-/hopefully/-does-not-exist'
res = invoke('getproject', name)
assert not res.out
assert res.err == "ERROR: Environment '{0}' does not exist.".format(name)
def test_call_without_args_outside_active_venv():
"""Check the error message if called without args outside a virtualenv.
If ``getproject`` is called without additional arguments outside of
an active virtualenv, it should print an error message.
"""
os.environ.pop('VIRTUAL_ENV', None)
res = invoke('getproject')
assert not res.out
assert res.err == "ERROR: no virtualenv active"
|
<commit_before><commit_msg>Add tests for the `getproject` subcommand<commit_after>
|
"""Tests for the ``getproject`` subcommand."""
import os
from pew._utils import temp_environ, invoke_pew as invoke
from utils import TemporaryDirectory
def test_getproject(env1):
"""Check that ``getproject`` prints an environment’s project directory."""
with temp_environ():
os.environ.pop('VIRTUAL_ENV', None)
with TemporaryDirectory() as tmpdir:
invoke('setproject', 'env1', tmpdir)
res = invoke('getproject', 'env1')
assert not res.err
assert res.out == tmpdir
def test_project_directory_not_set(env1):
"""Check the error message if no project directory was set.
If no project directory has been configured for an environment,
``getproject`` should quit with an error message.
"""
name = 'env1'
with temp_environ():
os.environ.pop('VIRTUAL_ENV', None)
with TemporaryDirectory() as tmpdir:
res = invoke('getproject', name)
assert not res.out
assert res.err == (
"ERROR: no project directory set for Environment '{0}'"
.format(name)
)
def test_unknown_environment():
"""Check the error message if passed an unknown environment name.
If ``getproject`` is invoked with the name of an environment that
does not exist, the call should fail with an appropriate error
message.
"""
name = 'bogus-environment-that-/hopefully/-does-not-exist'
res = invoke('getproject', name)
assert not res.out
assert res.err == "ERROR: Environment '{0}' does not exist.".format(name)
def test_call_without_args_outside_active_venv():
"""Check the error message if called without args outside a virtualenv.
If ``getproject`` is called without additional arguments outside of
an active virtualenv, it should print an error message.
"""
os.environ.pop('VIRTUAL_ENV', None)
res = invoke('getproject')
assert not res.out
assert res.err == "ERROR: no virtualenv active"
|
Add tests for the `getproject` subcommand"""Tests for the ``getproject`` subcommand."""
import os
from pew._utils import temp_environ, invoke_pew as invoke
from utils import TemporaryDirectory
def test_getproject(env1):
"""Check that ``getproject`` prints an environment’s project directory."""
with temp_environ():
os.environ.pop('VIRTUAL_ENV', None)
with TemporaryDirectory() as tmpdir:
invoke('setproject', 'env1', tmpdir)
res = invoke('getproject', 'env1')
assert not res.err
assert res.out == tmpdir
def test_project_directory_not_set(env1):
"""Check the error message if no project directory was set.
If no project directory has been configured for an environment,
``getproject`` should quit with an error message.
"""
name = 'env1'
with temp_environ():
os.environ.pop('VIRTUAL_ENV', None)
with TemporaryDirectory() as tmpdir:
res = invoke('getproject', name)
assert not res.out
assert res.err == (
"ERROR: no project directory set for Environment '{0}'"
.format(name)
)
def test_unknown_environment():
"""Check the error message if passed an unknown environment name.
If ``getproject`` is invoked with the name of an environment that
does not exist, the call should fail with an appropriate error
message.
"""
name = 'bogus-environment-that-/hopefully/-does-not-exist'
res = invoke('getproject', name)
assert not res.out
assert res.err == "ERROR: Environment '{0}' does not exist.".format(name)
def test_call_without_args_outside_active_venv():
"""Check the error message if called without args outside a virtualenv.
If ``getproject`` is called without additional arguments outside of
an active virtualenv, it should print an error message.
"""
os.environ.pop('VIRTUAL_ENV', None)
res = invoke('getproject')
assert not res.out
assert res.err == "ERROR: no virtualenv active"
|
<commit_before><commit_msg>Add tests for the `getproject` subcommand<commit_after>"""Tests for the ``getproject`` subcommand."""
import os
from pew._utils import temp_environ, invoke_pew as invoke
from utils import TemporaryDirectory
def test_getproject(env1):
"""Check that ``getproject`` prints an environment’s project directory."""
with temp_environ():
os.environ.pop('VIRTUAL_ENV', None)
with TemporaryDirectory() as tmpdir:
invoke('setproject', 'env1', tmpdir)
res = invoke('getproject', 'env1')
assert not res.err
assert res.out == tmpdir
def test_project_directory_not_set(env1):
"""Check the error message if no project directory was set.
If no project directory has been configured for an environment,
``getproject`` should quit with an error message.
"""
name = 'env1'
with temp_environ():
os.environ.pop('VIRTUAL_ENV', None)
with TemporaryDirectory() as tmpdir:
res = invoke('getproject', name)
assert not res.out
assert res.err == (
"ERROR: no project directory set for Environment '{0}'"
.format(name)
)
def test_unknown_environment():
"""Check the error message if passed an unknown environment name.
If ``getproject`` is invoked with the name of an environment that
does not exist, the call should fail with an appropriate error
message.
"""
name = 'bogus-environment-that-/hopefully/-does-not-exist'
res = invoke('getproject', name)
assert not res.out
assert res.err == "ERROR: Environment '{0}' does not exist.".format(name)
def test_call_without_args_outside_active_venv():
"""Check the error message if called without args outside a virtualenv.
If ``getproject`` is called without additional arguments outside of
an active virtualenv, it should print an error message.
"""
os.environ.pop('VIRTUAL_ENV', None)
res = invoke('getproject')
assert not res.out
assert res.err == "ERROR: no virtualenv active"
|
|
bc37951f9ff2064d70b7cee42f92ccc4a1284140
|
tests/test_chaining.py
|
tests/test_chaining.py
|
import mr_streams as ms
import unittest
# :::: auxilary functions ::::
def add_one(x):
return x + 1
def triplicate(x):
return (x,x,x)
def no_op(*args, **kwargs):
pass
class TestChaining(unittest.TestCase):
def test_MaFiTaFlTkTpDr(self):
_ = ms.stream(range(20))
_.map(add_one)\
.filter(lambda x: x%2 == 0)\
.take(3)\
.flatmap(triplicate)\
.take(8)\
.tap(no_op)
_.drain()
|
Fix bug in reduce and tap.
|
Fix bug in reduce and tap.
|
Python
|
mit
|
caffeine-potent/Streamer-Datastructure
|
Fix bug in reduce and tap.
|
import mr_streams as ms
import unittest
# :::: auxilary functions ::::
def add_one(x):
return x + 1
def triplicate(x):
return (x,x,x)
def no_op(*args, **kwargs):
pass
class TestChaining(unittest.TestCase):
def test_MaFiTaFlTkTpDr(self):
_ = ms.stream(range(20))
_.map(add_one)\
.filter(lambda x: x%2 == 0)\
.take(3)\
.flatmap(triplicate)\
.take(8)\
.tap(no_op)
_.drain()
|
<commit_before><commit_msg>Fix bug in reduce and tap.<commit_after>
|
import mr_streams as ms
import unittest
# :::: auxilary functions ::::
def add_one(x):
return x + 1
def triplicate(x):
return (x,x,x)
def no_op(*args, **kwargs):
pass
class TestChaining(unittest.TestCase):
def test_MaFiTaFlTkTpDr(self):
_ = ms.stream(range(20))
_.map(add_one)\
.filter(lambda x: x%2 == 0)\
.take(3)\
.flatmap(triplicate)\
.take(8)\
.tap(no_op)
_.drain()
|
Fix bug in reduce and tap.import mr_streams as ms
import unittest
# :::: auxilary functions ::::
def add_one(x):
return x + 1
def triplicate(x):
return (x,x,x)
def no_op(*args, **kwargs):
pass
class TestChaining(unittest.TestCase):
def test_MaFiTaFlTkTpDr(self):
_ = ms.stream(range(20))
_.map(add_one)\
.filter(lambda x: x%2 == 0)\
.take(3)\
.flatmap(triplicate)\
.take(8)\
.tap(no_op)
_.drain()
|
<commit_before><commit_msg>Fix bug in reduce and tap.<commit_after>import mr_streams as ms
import unittest
# :::: auxilary functions ::::
def add_one(x):
return x + 1
def triplicate(x):
return (x,x,x)
def no_op(*args, **kwargs):
pass
class TestChaining(unittest.TestCase):
def test_MaFiTaFlTkTpDr(self):
_ = ms.stream(range(20))
_.map(add_one)\
.filter(lambda x: x%2 == 0)\
.take(3)\
.flatmap(triplicate)\
.take(8)\
.tap(no_op)
_.drain()
|
|
39da7b8fff5420187c1c3f388cdb744cbde38e7d
|
tests/test_database.py
|
tests/test_database.py
|
from StringIO import StringIO
from django.core.management import call_command
import pytest
def test_for_missing_migrations():
output = StringIO()
try:
call_command(
'makemigrations', interactive=False, dry_run=True, exit_code=True,
stdout=output)
except SystemExit as e:
# The exit code will be 1 when there are no missing migrations
assert unicode(e) == '1'
else:
pytest.fail("There are missing migrations:\n %s" % output.getvalue())
|
Add test to fail if there are migrations missing.
|
Add test to fail if there are migrations missing.
|
Python
|
mpl-2.0
|
mozilla/telemetry-analysis-service,mozilla/telemetry-analysis-service,mozilla/telemetry-analysis-service,mozilla/telemetry-analysis-service
|
Add test to fail if there are migrations missing.
|
from StringIO import StringIO
from django.core.management import call_command
import pytest
def test_for_missing_migrations():
output = StringIO()
try:
call_command(
'makemigrations', interactive=False, dry_run=True, exit_code=True,
stdout=output)
except SystemExit as e:
# The exit code will be 1 when there are no missing migrations
assert unicode(e) == '1'
else:
pytest.fail("There are missing migrations:\n %s" % output.getvalue())
|
<commit_before><commit_msg>Add test to fail if there are migrations missing.<commit_after>
|
from StringIO import StringIO
from django.core.management import call_command
import pytest
def test_for_missing_migrations():
output = StringIO()
try:
call_command(
'makemigrations', interactive=False, dry_run=True, exit_code=True,
stdout=output)
except SystemExit as e:
# The exit code will be 1 when there are no missing migrations
assert unicode(e) == '1'
else:
pytest.fail("There are missing migrations:\n %s" % output.getvalue())
|
Add test to fail if there are migrations missing.from StringIO import StringIO
from django.core.management import call_command
import pytest
def test_for_missing_migrations():
output = StringIO()
try:
call_command(
'makemigrations', interactive=False, dry_run=True, exit_code=True,
stdout=output)
except SystemExit as e:
# The exit code will be 1 when there are no missing migrations
assert unicode(e) == '1'
else:
pytest.fail("There are missing migrations:\n %s" % output.getvalue())
|
<commit_before><commit_msg>Add test to fail if there are migrations missing.<commit_after>from StringIO import StringIO
from django.core.management import call_command
import pytest
def test_for_missing_migrations():
output = StringIO()
try:
call_command(
'makemigrations', interactive=False, dry_run=True, exit_code=True,
stdout=output)
except SystemExit as e:
# The exit code will be 1 when there are no missing migrations
assert unicode(e) == '1'
else:
pytest.fail("There are missing migrations:\n %s" % output.getvalue())
|
|
40148403575885640d7b90a33e2497081503757e
|
pygraphc/evaluation/CalinskiHarabaszIndex2.py
|
pygraphc/evaluation/CalinskiHarabaszIndex2.py
|
from pygraphc.similarity.CosineSimilarity import CosineSimilarity
class CalinskiHarabaszIndex(object):
def __init__(self, clusters, preprocessed_logs, log_length):
self.clusters = clusters
self.preprocessed_logs = preprocessed_logs
self.log_length = log_length
self.cluster_centroids = {}
self.cluster_total_nodes = {}
def __get_centroid(self, cluster=None):
centroid = ''
# centroid for a particular cluster
if cluster:
for log_id in cluster:
centroid += self.preprocessed_logs[log_id]
# centroid for the whole logs
else:
for log_id in self.preprocessed_logs:
centroid += self.preprocessed_logs[log_id]
return centroid
def __get_all_cluster_properties(self):
for cluster_id, log_ids in self.clusters.iteritems():
self.cluster_centroids[cluster_id] = self.__get_centroid(log_ids)
self.cluster_total_nodes[cluster_id] = len(log_ids)
@staticmethod
def __get_distance(source, dest):
cs = CosineSimilarity()
distance = cs.get_cosine_similarity(source, dest)
return distance
def __get_trace_b(self):
traces_b = []
logs_centroid = self.__get_centroid()
for cluster_id, log_ids in self.clusters.iteritems():
trace_b = self.cluster_total_nodes[cluster_id] * \
(self.__get_distance(self.cluster_centroids[cluster_id], logs_centroid) ** 2)
traces_b.append(trace_b)
total_trace_b = sum(traces_b)
return total_trace_b
def __get_trace_w(self):
traces_w = []
for cluster_id, log_ids in self.clusters.iteritems():
trace_w_cluster = []
for log_id in log_ids:
trace_w = self.__get_distance(log_id, self.cluster_centroids[cluster_id]) ** 2
trace_w_cluster.append(trace_w)
traces_w.append(sum(trace_w_cluster))
total_traces_w = sum(traces_w)
return total_traces_w
def get_calinski_harabasz(self):
total_cluster = len(self.clusters.keys())
ch_index = (self.__get_trace_b() / (total_cluster - 1)) / \
(self.__get_trace_w() / (self.log_length - total_cluster))
return ch_index
|
Create new generic Calinski Harabasz index
|
Create new generic Calinski Harabasz index
|
Python
|
mit
|
studiawan/pygraphc
|
Create new generic Calinski Harabasz index
|
from pygraphc.similarity.CosineSimilarity import CosineSimilarity
class CalinskiHarabaszIndex(object):
def __init__(self, clusters, preprocessed_logs, log_length):
self.clusters = clusters
self.preprocessed_logs = preprocessed_logs
self.log_length = log_length
self.cluster_centroids = {}
self.cluster_total_nodes = {}
def __get_centroid(self, cluster=None):
centroid = ''
# centroid for a particular cluster
if cluster:
for log_id in cluster:
centroid += self.preprocessed_logs[log_id]
# centroid for the whole logs
else:
for log_id in self.preprocessed_logs:
centroid += self.preprocessed_logs[log_id]
return centroid
def __get_all_cluster_properties(self):
for cluster_id, log_ids in self.clusters.iteritems():
self.cluster_centroids[cluster_id] = self.__get_centroid(log_ids)
self.cluster_total_nodes[cluster_id] = len(log_ids)
@staticmethod
def __get_distance(source, dest):
cs = CosineSimilarity()
distance = cs.get_cosine_similarity(source, dest)
return distance
def __get_trace_b(self):
traces_b = []
logs_centroid = self.__get_centroid()
for cluster_id, log_ids in self.clusters.iteritems():
trace_b = self.cluster_total_nodes[cluster_id] * \
(self.__get_distance(self.cluster_centroids[cluster_id], logs_centroid) ** 2)
traces_b.append(trace_b)
total_trace_b = sum(traces_b)
return total_trace_b
def __get_trace_w(self):
traces_w = []
for cluster_id, log_ids in self.clusters.iteritems():
trace_w_cluster = []
for log_id in log_ids:
trace_w = self.__get_distance(log_id, self.cluster_centroids[cluster_id]) ** 2
trace_w_cluster.append(trace_w)
traces_w.append(sum(trace_w_cluster))
total_traces_w = sum(traces_w)
return total_traces_w
def get_calinski_harabasz(self):
total_cluster = len(self.clusters.keys())
ch_index = (self.__get_trace_b() / (total_cluster - 1)) / \
(self.__get_trace_w() / (self.log_length - total_cluster))
return ch_index
|
<commit_before><commit_msg>Create new generic Calinski Harabasz index<commit_after>
|
from pygraphc.similarity.CosineSimilarity import CosineSimilarity
class CalinskiHarabaszIndex(object):
def __init__(self, clusters, preprocessed_logs, log_length):
self.clusters = clusters
self.preprocessed_logs = preprocessed_logs
self.log_length = log_length
self.cluster_centroids = {}
self.cluster_total_nodes = {}
def __get_centroid(self, cluster=None):
centroid = ''
# centroid for a particular cluster
if cluster:
for log_id in cluster:
centroid += self.preprocessed_logs[log_id]
# centroid for the whole logs
else:
for log_id in self.preprocessed_logs:
centroid += self.preprocessed_logs[log_id]
return centroid
def __get_all_cluster_properties(self):
for cluster_id, log_ids in self.clusters.iteritems():
self.cluster_centroids[cluster_id] = self.__get_centroid(log_ids)
self.cluster_total_nodes[cluster_id] = len(log_ids)
@staticmethod
def __get_distance(source, dest):
cs = CosineSimilarity()
distance = cs.get_cosine_similarity(source, dest)
return distance
def __get_trace_b(self):
traces_b = []
logs_centroid = self.__get_centroid()
for cluster_id, log_ids in self.clusters.iteritems():
trace_b = self.cluster_total_nodes[cluster_id] * \
(self.__get_distance(self.cluster_centroids[cluster_id], logs_centroid) ** 2)
traces_b.append(trace_b)
total_trace_b = sum(traces_b)
return total_trace_b
def __get_trace_w(self):
traces_w = []
for cluster_id, log_ids in self.clusters.iteritems():
trace_w_cluster = []
for log_id in log_ids:
trace_w = self.__get_distance(log_id, self.cluster_centroids[cluster_id]) ** 2
trace_w_cluster.append(trace_w)
traces_w.append(sum(trace_w_cluster))
total_traces_w = sum(traces_w)
return total_traces_w
def get_calinski_harabasz(self):
total_cluster = len(self.clusters.keys())
ch_index = (self.__get_trace_b() / (total_cluster - 1)) / \
(self.__get_trace_w() / (self.log_length - total_cluster))
return ch_index
|
Create new generic Calinski Harabasz indexfrom pygraphc.similarity.CosineSimilarity import CosineSimilarity
class CalinskiHarabaszIndex(object):
def __init__(self, clusters, preprocessed_logs, log_length):
self.clusters = clusters
self.preprocessed_logs = preprocessed_logs
self.log_length = log_length
self.cluster_centroids = {}
self.cluster_total_nodes = {}
def __get_centroid(self, cluster=None):
centroid = ''
# centroid for a particular cluster
if cluster:
for log_id in cluster:
centroid += self.preprocessed_logs[log_id]
# centroid for the whole logs
else:
for log_id in self.preprocessed_logs:
centroid += self.preprocessed_logs[log_id]
return centroid
def __get_all_cluster_properties(self):
for cluster_id, log_ids in self.clusters.iteritems():
self.cluster_centroids[cluster_id] = self.__get_centroid(log_ids)
self.cluster_total_nodes[cluster_id] = len(log_ids)
@staticmethod
def __get_distance(source, dest):
cs = CosineSimilarity()
distance = cs.get_cosine_similarity(source, dest)
return distance
def __get_trace_b(self):
traces_b = []
logs_centroid = self.__get_centroid()
for cluster_id, log_ids in self.clusters.iteritems():
trace_b = self.cluster_total_nodes[cluster_id] * \
(self.__get_distance(self.cluster_centroids[cluster_id], logs_centroid) ** 2)
traces_b.append(trace_b)
total_trace_b = sum(traces_b)
return total_trace_b
def __get_trace_w(self):
traces_w = []
for cluster_id, log_ids in self.clusters.iteritems():
trace_w_cluster = []
for log_id in log_ids:
trace_w = self.__get_distance(log_id, self.cluster_centroids[cluster_id]) ** 2
trace_w_cluster.append(trace_w)
traces_w.append(sum(trace_w_cluster))
total_traces_w = sum(traces_w)
return total_traces_w
def get_calinski_harabasz(self):
total_cluster = len(self.clusters.keys())
ch_index = (self.__get_trace_b() / (total_cluster - 1)) / \
(self.__get_trace_w() / (self.log_length - total_cluster))
return ch_index
|
<commit_before><commit_msg>Create new generic Calinski Harabasz index<commit_after>from pygraphc.similarity.CosineSimilarity import CosineSimilarity
class CalinskiHarabaszIndex(object):
def __init__(self, clusters, preprocessed_logs, log_length):
self.clusters = clusters
self.preprocessed_logs = preprocessed_logs
self.log_length = log_length
self.cluster_centroids = {}
self.cluster_total_nodes = {}
def __get_centroid(self, cluster=None):
centroid = ''
# centroid for a particular cluster
if cluster:
for log_id in cluster:
centroid += self.preprocessed_logs[log_id]
# centroid for the whole logs
else:
for log_id in self.preprocessed_logs:
centroid += self.preprocessed_logs[log_id]
return centroid
def __get_all_cluster_properties(self):
for cluster_id, log_ids in self.clusters.iteritems():
self.cluster_centroids[cluster_id] = self.__get_centroid(log_ids)
self.cluster_total_nodes[cluster_id] = len(log_ids)
@staticmethod
def __get_distance(source, dest):
cs = CosineSimilarity()
distance = cs.get_cosine_similarity(source, dest)
return distance
def __get_trace_b(self):
traces_b = []
logs_centroid = self.__get_centroid()
for cluster_id, log_ids in self.clusters.iteritems():
trace_b = self.cluster_total_nodes[cluster_id] * \
(self.__get_distance(self.cluster_centroids[cluster_id], logs_centroid) ** 2)
traces_b.append(trace_b)
total_trace_b = sum(traces_b)
return total_trace_b
def __get_trace_w(self):
traces_w = []
for cluster_id, log_ids in self.clusters.iteritems():
trace_w_cluster = []
for log_id in log_ids:
trace_w = self.__get_distance(log_id, self.cluster_centroids[cluster_id]) ** 2
trace_w_cluster.append(trace_w)
traces_w.append(sum(trace_w_cluster))
total_traces_w = sum(traces_w)
return total_traces_w
def get_calinski_harabasz(self):
total_cluster = len(self.clusters.keys())
ch_index = (self.__get_trace_b() / (total_cluster - 1)) / \
(self.__get_trace_w() / (self.log_length - total_cluster))
return ch_index
|
|
f29ac339dba7bb90327cdf9b41245ce7c383b126
|
vimap-test.py
|
vimap-test.py
|
import imap_cli
from imap_cli import config
from imap_cli import search
connect_conf = config.new_context_from_file(section='imap')
display_conf = config.new_context_from_file(section='display')
imap_account = imap_cli.connect(**connect_conf)
display_conf['format_list'] = u'{uid:>5} : {from:<40} : {subject}'
for truc in search.fetch_mails_info(imap_account, limit=10):
truc['from'] = truncate_string(truc['from'], 40)
print display_conf['format_list'].format(**truc)
|
Add a python test file
|
Add a python test file
|
Python
|
mit
|
Gentux/vimap
|
Add a python test file
|
import imap_cli
from imap_cli import config
from imap_cli import search
connect_conf = config.new_context_from_file(section='imap')
display_conf = config.new_context_from_file(section='display')
imap_account = imap_cli.connect(**connect_conf)
display_conf['format_list'] = u'{uid:>5} : {from:<40} : {subject}'
for truc in search.fetch_mails_info(imap_account, limit=10):
truc['from'] = truncate_string(truc['from'], 40)
print display_conf['format_list'].format(**truc)
|
<commit_before><commit_msg>Add a python test file<commit_after>
|
import imap_cli
from imap_cli import config
from imap_cli import search
connect_conf = config.new_context_from_file(section='imap')
display_conf = config.new_context_from_file(section='display')
imap_account = imap_cli.connect(**connect_conf)
display_conf['format_list'] = u'{uid:>5} : {from:<40} : {subject}'
for truc in search.fetch_mails_info(imap_account, limit=10):
truc['from'] = truncate_string(truc['from'], 40)
print display_conf['format_list'].format(**truc)
|
Add a python test fileimport imap_cli
from imap_cli import config
from imap_cli import search
connect_conf = config.new_context_from_file(section='imap')
display_conf = config.new_context_from_file(section='display')
imap_account = imap_cli.connect(**connect_conf)
display_conf['format_list'] = u'{uid:>5} : {from:<40} : {subject}'
for truc in search.fetch_mails_info(imap_account, limit=10):
truc['from'] = truncate_string(truc['from'], 40)
print display_conf['format_list'].format(**truc)
|
<commit_before><commit_msg>Add a python test file<commit_after>import imap_cli
from imap_cli import config
from imap_cli import search
connect_conf = config.new_context_from_file(section='imap')
display_conf = config.new_context_from_file(section='display')
imap_account = imap_cli.connect(**connect_conf)
display_conf['format_list'] = u'{uid:>5} : {from:<40} : {subject}'
for truc in search.fetch_mails_info(imap_account, limit=10):
truc['from'] = truncate_string(truc['from'], 40)
print display_conf['format_list'].format(**truc)
|
|
2540d4d9383d8c2cb67ef85fd1f2e3206b006902
|
app/views.py
|
app/views.py
|
from django.shortcuts import render_to_response
from django.http import HttpResponse
from app.models import Event
def home(request):
if request.method == 'POST':
if request.POST.has_key('what') and len(request.POST['what']) > 0:
e = Event.objects.create(what=request.POST['what'])
e.save()
else:
return HttpResponse('Illegal Request')
context = {}
events = Event.objects.all()
return render_to_response('log.html', {'events': events})
|
Add a view that handles POST and GET requests for the useless log
|
Add a view that handles POST and GET requests for the useless log
|
Python
|
mit
|
schatten/logan
|
Add a view that handles POST and GET requests for the useless log
|
from django.shortcuts import render_to_response
from django.http import HttpResponse
from app.models import Event
def home(request):
if request.method == 'POST':
if request.POST.has_key('what') and len(request.POST['what']) > 0:
e = Event.objects.create(what=request.POST['what'])
e.save()
else:
return HttpResponse('Illegal Request')
context = {}
events = Event.objects.all()
return render_to_response('log.html', {'events': events})
|
<commit_before><commit_msg>Add a view that handles POST and GET requests for the useless log<commit_after>
|
from django.shortcuts import render_to_response
from django.http import HttpResponse
from app.models import Event
def home(request):
if request.method == 'POST':
if request.POST.has_key('what') and len(request.POST['what']) > 0:
e = Event.objects.create(what=request.POST['what'])
e.save()
else:
return HttpResponse('Illegal Request')
context = {}
events = Event.objects.all()
return render_to_response('log.html', {'events': events})
|
Add a view that handles POST and GET requests for the useless logfrom django.shortcuts import render_to_response
from django.http import HttpResponse
from app.models import Event
def home(request):
if request.method == 'POST':
if request.POST.has_key('what') and len(request.POST['what']) > 0:
e = Event.objects.create(what=request.POST['what'])
e.save()
else:
return HttpResponse('Illegal Request')
context = {}
events = Event.objects.all()
return render_to_response('log.html', {'events': events})
|
<commit_before><commit_msg>Add a view that handles POST and GET requests for the useless log<commit_after>from django.shortcuts import render_to_response
from django.http import HttpResponse
from app.models import Event
def home(request):
if request.method == 'POST':
if request.POST.has_key('what') and len(request.POST['what']) > 0:
e = Event.objects.create(what=request.POST['what'])
e.save()
else:
return HttpResponse('Illegal Request')
context = {}
events = Event.objects.all()
return render_to_response('log.html', {'events': events})
|
|
299cb28627228465a1cde6bb21682bc91314cdf6
|
tools/clang-format/clang-format-sublime.py
|
tools/clang-format/clang-format-sublime.py
|
# This file is a minimal clang-format sublime-integration. To install:
# - Change 'binary' if clang-format is not on the path (see below).
# - Put this file into your sublime Packages directory, e.g. on Linux:
# ~/.config/sublime-text-2/Packages/User/clang-format-sublime.py
# - Add a key binding:
# { "keys": ["ctrl+shift+c"], "command": "clang_format" },
#
# With this integration you can press the bound key and clang-format will
# format the current lines and selections for all cursor positions. The lines
# or regions are extended to the next bigger syntactic entities.
#
# It operates on the current, potentially unsaved buffer and does not create
# or save any files. To revert a formatting, just undo.
import sublime
import sublime_plugin
import subprocess
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
# Change this to format according to other formatting styles
# (see clang-format -help).
style = 'LLVM'
class ClangFormatCommand(sublime_plugin.TextCommand):
def run(self, edit):
encoding = self.view.encoding()
if encoding == 'Undefined':
encoding = 'utf-8'
regions = []
command = [binary, '-style', style]
for region in self.view.sel():
regions.append(region)
region_offset = min(region.a, region.b)
region_length = abs(region.b - region.a)
command.extend(['-offset', str(region_offset),
'-length', str(region_length)])
old_viewport_position = self.view.viewport_position()
buf = self.view.substr(sublime.Region(0, self.view.size()))
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
output, error = p.communicate(buf.encode(encoding))
if not error:
self.view.replace(
edit, sublime.Region(0, self.view.size()),
output.decode(encoding))
self.view.sel().clear()
for region in regions:
self.view.sel().add(region)
# FIXME: Without the 10ms delay, the viewport sometimes jumps.
sublime.set_timeout(lambda: self.view.set_viewport_position(
old_viewport_position, False), 10)
else:
print error
|
Add basic clang-format integration for sublime text.
|
Add basic clang-format integration for sublime text.
git-svn-id: ffe668792ed300d6c2daa1f6eba2e0aa28d7ec6c@182015 91177308-0d34-0410-b5e6-96231b3b80d8
|
Python
|
apache-2.0
|
llvm-mirror/clang,apple/swift-clang,llvm-mirror/clang,apple/swift-clang,llvm-mirror/clang,apple/swift-clang,llvm-mirror/clang,llvm-mirror/clang,apple/swift-clang,apple/swift-clang,apple/swift-clang,apple/swift-clang,llvm-mirror/clang,apple/swift-clang,apple/swift-clang,apple/swift-clang,llvm-mirror/clang,llvm-mirror/clang,llvm-mirror/clang,llvm-mirror/clang
|
Add basic clang-format integration for sublime text.
git-svn-id: ffe668792ed300d6c2daa1f6eba2e0aa28d7ec6c@182015 91177308-0d34-0410-b5e6-96231b3b80d8
|
# This file is a minimal clang-format sublime-integration. To install:
# - Change 'binary' if clang-format is not on the path (see below).
# - Put this file into your sublime Packages directory, e.g. on Linux:
# ~/.config/sublime-text-2/Packages/User/clang-format-sublime.py
# - Add a key binding:
# { "keys": ["ctrl+shift+c"], "command": "clang_format" },
#
# With this integration you can press the bound key and clang-format will
# format the current lines and selections for all cursor positions. The lines
# or regions are extended to the next bigger syntactic entities.
#
# It operates on the current, potentially unsaved buffer and does not create
# or save any files. To revert a formatting, just undo.
import sublime
import sublime_plugin
import subprocess
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
# Change this to format according to other formatting styles
# (see clang-format -help).
style = 'LLVM'
class ClangFormatCommand(sublime_plugin.TextCommand):
def run(self, edit):
encoding = self.view.encoding()
if encoding == 'Undefined':
encoding = 'utf-8'
regions = []
command = [binary, '-style', style]
for region in self.view.sel():
regions.append(region)
region_offset = min(region.a, region.b)
region_length = abs(region.b - region.a)
command.extend(['-offset', str(region_offset),
'-length', str(region_length)])
old_viewport_position = self.view.viewport_position()
buf = self.view.substr(sublime.Region(0, self.view.size()))
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
output, error = p.communicate(buf.encode(encoding))
if not error:
self.view.replace(
edit, sublime.Region(0, self.view.size()),
output.decode(encoding))
self.view.sel().clear()
for region in regions:
self.view.sel().add(region)
# FIXME: Without the 10ms delay, the viewport sometimes jumps.
sublime.set_timeout(lambda: self.view.set_viewport_position(
old_viewport_position, False), 10)
else:
print error
|
<commit_before><commit_msg>Add basic clang-format integration for sublime text.
git-svn-id: ffe668792ed300d6c2daa1f6eba2e0aa28d7ec6c@182015 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>
|
# This file is a minimal clang-format sublime-integration. To install:
# - Change 'binary' if clang-format is not on the path (see below).
# - Put this file into your sublime Packages directory, e.g. on Linux:
# ~/.config/sublime-text-2/Packages/User/clang-format-sublime.py
# - Add a key binding:
# { "keys": ["ctrl+shift+c"], "command": "clang_format" },
#
# With this integration you can press the bound key and clang-format will
# format the current lines and selections for all cursor positions. The lines
# or regions are extended to the next bigger syntactic entities.
#
# It operates on the current, potentially unsaved buffer and does not create
# or save any files. To revert a formatting, just undo.
import sublime
import sublime_plugin
import subprocess
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
# Change this to format according to other formatting styles
# (see clang-format -help).
style = 'LLVM'
class ClangFormatCommand(sublime_plugin.TextCommand):
def run(self, edit):
encoding = self.view.encoding()
if encoding == 'Undefined':
encoding = 'utf-8'
regions = []
command = [binary, '-style', style]
for region in self.view.sel():
regions.append(region)
region_offset = min(region.a, region.b)
region_length = abs(region.b - region.a)
command.extend(['-offset', str(region_offset),
'-length', str(region_length)])
old_viewport_position = self.view.viewport_position()
buf = self.view.substr(sublime.Region(0, self.view.size()))
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
output, error = p.communicate(buf.encode(encoding))
if not error:
self.view.replace(
edit, sublime.Region(0, self.view.size()),
output.decode(encoding))
self.view.sel().clear()
for region in regions:
self.view.sel().add(region)
# FIXME: Without the 10ms delay, the viewport sometimes jumps.
sublime.set_timeout(lambda: self.view.set_viewport_position(
old_viewport_position, False), 10)
else:
print error
|
Add basic clang-format integration for sublime text.
git-svn-id: ffe668792ed300d6c2daa1f6eba2e0aa28d7ec6c@182015 91177308-0d34-0410-b5e6-96231b3b80d8# This file is a minimal clang-format sublime-integration. To install:
# - Change 'binary' if clang-format is not on the path (see below).
# - Put this file into your sublime Packages directory, e.g. on Linux:
# ~/.config/sublime-text-2/Packages/User/clang-format-sublime.py
# - Add a key binding:
# { "keys": ["ctrl+shift+c"], "command": "clang_format" },
#
# With this integration you can press the bound key and clang-format will
# format the current lines and selections for all cursor positions. The lines
# or regions are extended to the next bigger syntactic entities.
#
# It operates on the current, potentially unsaved buffer and does not create
# or save any files. To revert a formatting, just undo.
import sublime
import sublime_plugin
import subprocess
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
# Change this to format according to other formatting styles
# (see clang-format -help).
style = 'LLVM'
class ClangFormatCommand(sublime_plugin.TextCommand):
def run(self, edit):
encoding = self.view.encoding()
if encoding == 'Undefined':
encoding = 'utf-8'
regions = []
command = [binary, '-style', style]
for region in self.view.sel():
regions.append(region)
region_offset = min(region.a, region.b)
region_length = abs(region.b - region.a)
command.extend(['-offset', str(region_offset),
'-length', str(region_length)])
old_viewport_position = self.view.viewport_position()
buf = self.view.substr(sublime.Region(0, self.view.size()))
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
output, error = p.communicate(buf.encode(encoding))
if not error:
self.view.replace(
edit, sublime.Region(0, self.view.size()),
output.decode(encoding))
self.view.sel().clear()
for region in regions:
self.view.sel().add(region)
# FIXME: Without the 10ms delay, the viewport sometimes jumps.
sublime.set_timeout(lambda: self.view.set_viewport_position(
old_viewport_position, False), 10)
else:
print error
|
<commit_before><commit_msg>Add basic clang-format integration for sublime text.
git-svn-id: ffe668792ed300d6c2daa1f6eba2e0aa28d7ec6c@182015 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after># This file is a minimal clang-format sublime-integration. To install:
# - Change 'binary' if clang-format is not on the path (see below).
# - Put this file into your sublime Packages directory, e.g. on Linux:
# ~/.config/sublime-text-2/Packages/User/clang-format-sublime.py
# - Add a key binding:
# { "keys": ["ctrl+shift+c"], "command": "clang_format" },
#
# With this integration you can press the bound key and clang-format will
# format the current lines and selections for all cursor positions. The lines
# or regions are extended to the next bigger syntactic entities.
#
# It operates on the current, potentially unsaved buffer and does not create
# or save any files. To revert a formatting, just undo.
import sublime
import sublime_plugin
import subprocess
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
# Change this to format according to other formatting styles
# (see clang-format -help).
style = 'LLVM'
class ClangFormatCommand(sublime_plugin.TextCommand):
def run(self, edit):
encoding = self.view.encoding()
if encoding == 'Undefined':
encoding = 'utf-8'
regions = []
command = [binary, '-style', style]
for region in self.view.sel():
regions.append(region)
region_offset = min(region.a, region.b)
region_length = abs(region.b - region.a)
command.extend(['-offset', str(region_offset),
'-length', str(region_length)])
old_viewport_position = self.view.viewport_position()
buf = self.view.substr(sublime.Region(0, self.view.size()))
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
output, error = p.communicate(buf.encode(encoding))
if not error:
self.view.replace(
edit, sublime.Region(0, self.view.size()),
output.decode(encoding))
self.view.sel().clear()
for region in regions:
self.view.sel().add(region)
# FIXME: Without the 10ms delay, the viewport sometimes jumps.
sublime.set_timeout(lambda: self.view.set_viewport_position(
old_viewport_position, False), 10)
else:
print error
|
|
5ec8f36c2831c0870afbf2926e3fb473cea4780d
|
document/migrations/0008_auto_20160519_2253.py
|
document/migrations/0008_auto_20160519_2253.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-19 20:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('document', '0007_auto_20160518_1825'),
]
operations = [
migrations.AlterModelOptions(
name='kamerstuk',
options={'ordering': ['id_sub'], 'verbose_name_plural': 'Kamerstukken'},
),
migrations.AlterField(
model_name='kamerstuk',
name='id_main',
field=models.CharField(blank=True, default='', max_length=40),
preserve_default=False,
),
migrations.AlterField(
model_name='kamerstuk',
name='id_sub',
field=models.CharField(blank=True, default='', max_length=40),
preserve_default=False,
),
]
|
Change kamerstuk id from integer to string
|
Change kamerstuk id from integer to string
Anything can be the id. No documentation on standards.
|
Python
|
mit
|
openkamer/openkamer,openkamer/openkamer,openkamer/openkamer,openkamer/openkamer
|
Change kamerstuk id from integer to string
Anything can be the id. No documentation on standards.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-19 20:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('document', '0007_auto_20160518_1825'),
]
operations = [
migrations.AlterModelOptions(
name='kamerstuk',
options={'ordering': ['id_sub'], 'verbose_name_plural': 'Kamerstukken'},
),
migrations.AlterField(
model_name='kamerstuk',
name='id_main',
field=models.CharField(blank=True, default='', max_length=40),
preserve_default=False,
),
migrations.AlterField(
model_name='kamerstuk',
name='id_sub',
field=models.CharField(blank=True, default='', max_length=40),
preserve_default=False,
),
]
|
<commit_before><commit_msg>Change kamerstuk id from integer to string
Anything can be the id. No documentation on standards.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-19 20:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('document', '0007_auto_20160518_1825'),
]
operations = [
migrations.AlterModelOptions(
name='kamerstuk',
options={'ordering': ['id_sub'], 'verbose_name_plural': 'Kamerstukken'},
),
migrations.AlterField(
model_name='kamerstuk',
name='id_main',
field=models.CharField(blank=True, default='', max_length=40),
preserve_default=False,
),
migrations.AlterField(
model_name='kamerstuk',
name='id_sub',
field=models.CharField(blank=True, default='', max_length=40),
preserve_default=False,
),
]
|
Change kamerstuk id from integer to string
Anything can be the id. No documentation on standards.# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-19 20:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('document', '0007_auto_20160518_1825'),
]
operations = [
migrations.AlterModelOptions(
name='kamerstuk',
options={'ordering': ['id_sub'], 'verbose_name_plural': 'Kamerstukken'},
),
migrations.AlterField(
model_name='kamerstuk',
name='id_main',
field=models.CharField(blank=True, default='', max_length=40),
preserve_default=False,
),
migrations.AlterField(
model_name='kamerstuk',
name='id_sub',
field=models.CharField(blank=True, default='', max_length=40),
preserve_default=False,
),
]
|
<commit_before><commit_msg>Change kamerstuk id from integer to string
Anything can be the id. No documentation on standards.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-19 20:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('document', '0007_auto_20160518_1825'),
]
operations = [
migrations.AlterModelOptions(
name='kamerstuk',
options={'ordering': ['id_sub'], 'verbose_name_plural': 'Kamerstukken'},
),
migrations.AlterField(
model_name='kamerstuk',
name='id_main',
field=models.CharField(blank=True, default='', max_length=40),
preserve_default=False,
),
migrations.AlterField(
model_name='kamerstuk',
name='id_sub',
field=models.CharField(blank=True, default='', max_length=40),
preserve_default=False,
),
]
|
|
3c301335a4c39b1099c9fd29d7eebbf6506c0979
|
codes/20180922/test.py
|
codes/20180922/test.py
|
# coding: utf-8
import matplotlib.pyplot as plt
import numpy as np
import os
from tqdm import trange
os.makedirs('./images/', exist_ok=True)
i = 0
for i in trange(10000, desc='saving images'):
img = np.full((64, 64, 3), 128)
plt.imshow(img / 255.)
plt.axis('off')
plt.tick_params(labelbottom=False, labelleft=False, labelright=False, labeltop=False)
plt.savefig('./images/%s.jpg'%str(i).zfill(4), bbox_inches='tight', pad_inches=0)
plt.close()
|
Add code that makes many images being clear memory.
|
Add code that makes many images being clear memory.
|
Python
|
mit
|
iShoto/testpy
|
Add code that makes many images being clear memory.
|
# coding: utf-8
import matplotlib.pyplot as plt
import numpy as np
import os
from tqdm import trange
os.makedirs('./images/', exist_ok=True)
i = 0
for i in trange(10000, desc='saving images'):
img = np.full((64, 64, 3), 128)
plt.imshow(img / 255.)
plt.axis('off')
plt.tick_params(labelbottom=False, labelleft=False, labelright=False, labeltop=False)
plt.savefig('./images/%s.jpg'%str(i).zfill(4), bbox_inches='tight', pad_inches=0)
plt.close()
|
<commit_before><commit_msg>Add code that makes many images being clear memory.<commit_after>
|
# coding: utf-8
import matplotlib.pyplot as plt
import numpy as np
import os
from tqdm import trange
os.makedirs('./images/', exist_ok=True)
i = 0
for i in trange(10000, desc='saving images'):
img = np.full((64, 64, 3), 128)
plt.imshow(img / 255.)
plt.axis('off')
plt.tick_params(labelbottom=False, labelleft=False, labelright=False, labeltop=False)
plt.savefig('./images/%s.jpg'%str(i).zfill(4), bbox_inches='tight', pad_inches=0)
plt.close()
|
Add code that makes many images being clear memory.# coding: utf-8
import matplotlib.pyplot as plt
import numpy as np
import os
from tqdm import trange
os.makedirs('./images/', exist_ok=True)
i = 0
for i in trange(10000, desc='saving images'):
img = np.full((64, 64, 3), 128)
plt.imshow(img / 255.)
plt.axis('off')
plt.tick_params(labelbottom=False, labelleft=False, labelright=False, labeltop=False)
plt.savefig('./images/%s.jpg'%str(i).zfill(4), bbox_inches='tight', pad_inches=0)
plt.close()
|
<commit_before><commit_msg>Add code that makes many images being clear memory.<commit_after># coding: utf-8
import matplotlib.pyplot as plt
import numpy as np
import os
from tqdm import trange
os.makedirs('./images/', exist_ok=True)
i = 0
for i in trange(10000, desc='saving images'):
img = np.full((64, 64, 3), 128)
plt.imshow(img / 255.)
plt.axis('off')
plt.tick_params(labelbottom=False, labelleft=False, labelright=False, labeltop=False)
plt.savefig('./images/%s.jpg'%str(i).zfill(4), bbox_inches='tight', pad_inches=0)
plt.close()
|
|
b8143d73d8f557ea9612b056c457d31a20de7aae
|
h2o-py/tests/testdir_algos/deepwater/pyunit_inception_bn_feature_extraction.py
|
h2o-py/tests/testdir_algos/deepwater/pyunit_inception_bn_feature_extraction.py
|
from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
import urllib
def deepwater_inception_bn_feature_extraction():
if not H2ODeepWaterEstimator.available(): return
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
nclasses = frame[1].nlevels()[0]
print("Downloading the model")
if not os.path.exists("model.json"):
urllib.urlretrieve ("https://raw.githubusercontent.com/h2oai/deepwater/master/mxnet/src/main/resources/deepwater/backends/mxnet/models/Inception/Inception_BN-symbol.json", "model.json")
if not os.path.exists("model.params"):
urllib.urlretrieve ("https://raw.githubusercontent.com/h2oai/deepwater/master/mxnet/src/main/resources/deepwater/backends/mxnet/models/Inception/Inception_BN-0039.params", "model.params")
if not os.path.exists("mean_224.nd"):
urllib.urlretrieve ("https://raw.githubusercontent.com/h2oai/deepwater/master/mxnet/src/main/resources/deepwater/backends/mxnet/models/Inception/mean_224.nd", "mean_224.nd")
print("Importing the model architecture for training in H2O")
model = H2ODeepWaterEstimator(epochs=0, ## no training - just load the state - NOTE: training for this 3-class problem wouldn't work since the model has 1k classes
mini_batch_size=32, ## mini-batch size is used for scoring
## all parameters below are needed
network='user',
network_definition_file=os.getcwd() + "/model.json",
network_parameters_file=os.getcwd() + "/model.params",
mean_image_file=os.getcwd() + "/mean_224.nd",
image_shape=[224,224],
channels=3
)
model.train(x=[0],y=1, training_frame=frame) ## must call train() to initialize the model, but it isn't training
## Extract deep features from final layer before going into Softmax.
extracted_features = model.deepfeatures(frame, "global_pool_output")
print(extracted_features.ncol)
print(extracted_features.head(1,1024))
## Cleanup
os.remove("model.json")
os.remove("model.params")
os.remove("mean_224.nd")
assert extracted_features.ncol == 1024
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_inception_bn_feature_extraction)
else:
deepwater_inception_bn_feature_extraction()
|
Add feature extraction (transfer learning) for Inception 1k model for cat/dog/mouse dataset (no training).
|
Add feature extraction (transfer learning) for Inception 1k model for cat/dog/mouse dataset (no training).
|
Python
|
apache-2.0
|
h2oai/h2o-dev,spennihana/h2o-3,mathemage/h2o-3,h2oai/h2o-3,h2oai/h2o-3,mathemage/h2o-3,mathemage/h2o-3,michalkurka/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,h2oai/h2o-3,h2oai/h2o-3,michalkurka/h2o-3,h2oai/h2o-dev,michalkurka/h2o-3,mathemage/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,h2oai/h2o-dev,spennihana/h2o-3,h2oai/h2o-dev,h2oai/h2o-dev,spennihana/h2o-3,spennihana/h2o-3,spennihana/h2o-3,mathemage/h2o-3,spennihana/h2o-3,mathemage/h2o-3,h2oai/h2o-dev,mathemage/h2o-3,michalkurka/h2o-3,h2oai/h2o-dev,h2oai/h2o-3,h2oai/h2o-3,spennihana/h2o-3,michalkurka/h2o-3
|
Add feature extraction (transfer learning) for Inception 1k model for cat/dog/mouse dataset (no training).
|
from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
import urllib
def deepwater_inception_bn_feature_extraction():
if not H2ODeepWaterEstimator.available(): return
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
nclasses = frame[1].nlevels()[0]
print("Downloading the model")
if not os.path.exists("model.json"):
urllib.urlretrieve ("https://raw.githubusercontent.com/h2oai/deepwater/master/mxnet/src/main/resources/deepwater/backends/mxnet/models/Inception/Inception_BN-symbol.json", "model.json")
if not os.path.exists("model.params"):
urllib.urlretrieve ("https://raw.githubusercontent.com/h2oai/deepwater/master/mxnet/src/main/resources/deepwater/backends/mxnet/models/Inception/Inception_BN-0039.params", "model.params")
if not os.path.exists("mean_224.nd"):
urllib.urlretrieve ("https://raw.githubusercontent.com/h2oai/deepwater/master/mxnet/src/main/resources/deepwater/backends/mxnet/models/Inception/mean_224.nd", "mean_224.nd")
print("Importing the model architecture for training in H2O")
model = H2ODeepWaterEstimator(epochs=0, ## no training - just load the state - NOTE: training for this 3-class problem wouldn't work since the model has 1k classes
mini_batch_size=32, ## mini-batch size is used for scoring
## all parameters below are needed
network='user',
network_definition_file=os.getcwd() + "/model.json",
network_parameters_file=os.getcwd() + "/model.params",
mean_image_file=os.getcwd() + "/mean_224.nd",
image_shape=[224,224],
channels=3
)
model.train(x=[0],y=1, training_frame=frame) ## must call train() to initialize the model, but it isn't training
## Extract deep features from final layer before going into Softmax.
extracted_features = model.deepfeatures(frame, "global_pool_output")
print(extracted_features.ncol)
print(extracted_features.head(1,1024))
## Cleanup
os.remove("model.json")
os.remove("model.params")
os.remove("mean_224.nd")
assert extracted_features.ncol == 1024
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_inception_bn_feature_extraction)
else:
deepwater_inception_bn_feature_extraction()
|
<commit_before><commit_msg>Add feature extraction (transfer learning) for Inception 1k model for cat/dog/mouse dataset (no training).<commit_after>
|
from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
import urllib
def deepwater_inception_bn_feature_extraction():
if not H2ODeepWaterEstimator.available(): return
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
nclasses = frame[1].nlevels()[0]
print("Downloading the model")
if not os.path.exists("model.json"):
urllib.urlretrieve ("https://raw.githubusercontent.com/h2oai/deepwater/master/mxnet/src/main/resources/deepwater/backends/mxnet/models/Inception/Inception_BN-symbol.json", "model.json")
if not os.path.exists("model.params"):
urllib.urlretrieve ("https://raw.githubusercontent.com/h2oai/deepwater/master/mxnet/src/main/resources/deepwater/backends/mxnet/models/Inception/Inception_BN-0039.params", "model.params")
if not os.path.exists("mean_224.nd"):
urllib.urlretrieve ("https://raw.githubusercontent.com/h2oai/deepwater/master/mxnet/src/main/resources/deepwater/backends/mxnet/models/Inception/mean_224.nd", "mean_224.nd")
print("Importing the model architecture for training in H2O")
model = H2ODeepWaterEstimator(epochs=0, ## no training - just load the state - NOTE: training for this 3-class problem wouldn't work since the model has 1k classes
mini_batch_size=32, ## mini-batch size is used for scoring
## all parameters below are needed
network='user',
network_definition_file=os.getcwd() + "/model.json",
network_parameters_file=os.getcwd() + "/model.params",
mean_image_file=os.getcwd() + "/mean_224.nd",
image_shape=[224,224],
channels=3
)
model.train(x=[0],y=1, training_frame=frame) ## must call train() to initialize the model, but it isn't training
## Extract deep features from final layer before going into Softmax.
extracted_features = model.deepfeatures(frame, "global_pool_output")
print(extracted_features.ncol)
print(extracted_features.head(1,1024))
## Cleanup
os.remove("model.json")
os.remove("model.params")
os.remove("mean_224.nd")
assert extracted_features.ncol == 1024
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_inception_bn_feature_extraction)
else:
deepwater_inception_bn_feature_extraction()
|
Add feature extraction (transfer learning) for Inception 1k model for cat/dog/mouse dataset (no training).from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
import urllib
def deepwater_inception_bn_feature_extraction():
if not H2ODeepWaterEstimator.available(): return
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
nclasses = frame[1].nlevels()[0]
print("Downloading the model")
if not os.path.exists("model.json"):
urllib.urlretrieve ("https://raw.githubusercontent.com/h2oai/deepwater/master/mxnet/src/main/resources/deepwater/backends/mxnet/models/Inception/Inception_BN-symbol.json", "model.json")
if not os.path.exists("model.params"):
urllib.urlretrieve ("https://raw.githubusercontent.com/h2oai/deepwater/master/mxnet/src/main/resources/deepwater/backends/mxnet/models/Inception/Inception_BN-0039.params", "model.params")
if not os.path.exists("mean_224.nd"):
urllib.urlretrieve ("https://raw.githubusercontent.com/h2oai/deepwater/master/mxnet/src/main/resources/deepwater/backends/mxnet/models/Inception/mean_224.nd", "mean_224.nd")
print("Importing the model architecture for training in H2O")
model = H2ODeepWaterEstimator(epochs=0, ## no training - just load the state - NOTE: training for this 3-class problem wouldn't work since the model has 1k classes
mini_batch_size=32, ## mini-batch size is used for scoring
## all parameters below are needed
network='user',
network_definition_file=os.getcwd() + "/model.json",
network_parameters_file=os.getcwd() + "/model.params",
mean_image_file=os.getcwd() + "/mean_224.nd",
image_shape=[224,224],
channels=3
)
model.train(x=[0],y=1, training_frame=frame) ## must call train() to initialize the model, but it isn't training
## Extract deep features from final layer before going into Softmax.
extracted_features = model.deepfeatures(frame, "global_pool_output")
print(extracted_features.ncol)
print(extracted_features.head(1,1024))
## Cleanup
os.remove("model.json")
os.remove("model.params")
os.remove("mean_224.nd")
assert extracted_features.ncol == 1024
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_inception_bn_feature_extraction)
else:
deepwater_inception_bn_feature_extraction()
|
<commit_before><commit_msg>Add feature extraction (transfer learning) for Inception 1k model for cat/dog/mouse dataset (no training).<commit_after>from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
import urllib
def deepwater_inception_bn_feature_extraction():
if not H2ODeepWaterEstimator.available(): return
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
nclasses = frame[1].nlevels()[0]
print("Downloading the model")
if not os.path.exists("model.json"):
urllib.urlretrieve ("https://raw.githubusercontent.com/h2oai/deepwater/master/mxnet/src/main/resources/deepwater/backends/mxnet/models/Inception/Inception_BN-symbol.json", "model.json")
if not os.path.exists("model.params"):
urllib.urlretrieve ("https://raw.githubusercontent.com/h2oai/deepwater/master/mxnet/src/main/resources/deepwater/backends/mxnet/models/Inception/Inception_BN-0039.params", "model.params")
if not os.path.exists("mean_224.nd"):
urllib.urlretrieve ("https://raw.githubusercontent.com/h2oai/deepwater/master/mxnet/src/main/resources/deepwater/backends/mxnet/models/Inception/mean_224.nd", "mean_224.nd")
print("Importing the model architecture for training in H2O")
model = H2ODeepWaterEstimator(epochs=0, ## no training - just load the state - NOTE: training for this 3-class problem wouldn't work since the model has 1k classes
mini_batch_size=32, ## mini-batch size is used for scoring
## all parameters below are needed
network='user',
network_definition_file=os.getcwd() + "/model.json",
network_parameters_file=os.getcwd() + "/model.params",
mean_image_file=os.getcwd() + "/mean_224.nd",
image_shape=[224,224],
channels=3
)
model.train(x=[0],y=1, training_frame=frame) ## must call train() to initialize the model, but it isn't training
## Extract deep features from final layer before going into Softmax.
extracted_features = model.deepfeatures(frame, "global_pool_output")
print(extracted_features.ncol)
print(extracted_features.head(1,1024))
## Cleanup
os.remove("model.json")
os.remove("model.params")
os.remove("mean_224.nd")
assert extracted_features.ncol == 1024
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_inception_bn_feature_extraction)
else:
deepwater_inception_bn_feature_extraction()
|
|
3edec9bf4b71e2cfde537f5f443d184323e5d8f8
|
GetCallLogs.py
|
GetCallLogs.py
|
from twilio.rest import TwilioRestClient
import config
# To find these visit https://www.twilio.com/user/account
account_sid = config.account_sid
auth_token = config.auth_token
client = TwilioRestClient(account_sid, auth_token)
for call in client.calls.list():
print("From: " + call.from_formatted + " To: " + call.to_formatted)
|
Add snippet to retrieve call logs from Twilio account
|
Add snippet to retrieve call logs from Twilio account
|
Python
|
mit
|
mattstibbs/twilio-snippets
|
Add snippet to retrieve call logs from Twilio account
|
from twilio.rest import TwilioRestClient
import config
# To find these visit https://www.twilio.com/user/account
account_sid = config.account_sid
auth_token = config.auth_token
client = TwilioRestClient(account_sid, auth_token)
for call in client.calls.list():
print("From: " + call.from_formatted + " To: " + call.to_formatted)
|
<commit_before><commit_msg>Add snippet to retrieve call logs from Twilio account<commit_after>
|
from twilio.rest import TwilioRestClient
import config
# To find these visit https://www.twilio.com/user/account
account_sid = config.account_sid
auth_token = config.auth_token
client = TwilioRestClient(account_sid, auth_token)
for call in client.calls.list():
print("From: " + call.from_formatted + " To: " + call.to_formatted)
|
Add snippet to retrieve call logs from Twilio accountfrom twilio.rest import TwilioRestClient
import config
# To find these visit https://www.twilio.com/user/account
account_sid = config.account_sid
auth_token = config.auth_token
client = TwilioRestClient(account_sid, auth_token)
for call in client.calls.list():
print("From: " + call.from_formatted + " To: " + call.to_formatted)
|
<commit_before><commit_msg>Add snippet to retrieve call logs from Twilio account<commit_after>from twilio.rest import TwilioRestClient
import config
# To find these visit https://www.twilio.com/user/account
account_sid = config.account_sid
auth_token = config.auth_token
client = TwilioRestClient(account_sid, auth_token)
for call in client.calls.list():
print("From: " + call.from_formatted + " To: " + call.to_formatted)
|
|
11a962099ea8735227623443c62c0248b98e4805
|
examples/hwapi/hwconfig_z_96b_carbon.py
|
examples/hwapi/hwconfig_z_96b_carbon.py
|
from machine import Signal
# 96Boards Carbon board
# USR1 - User controlled led, connected to PD2
# USR2 - User controlled led, connected to PA15
# BT - Bluetooth indicator, connected to PB5.
# Note - 96b_carbon uses (at the time of writing) non-standard
# for Zephyr port device naming convention.
LED = Signal(("GPIOA", 15), Pin.OUT)
|
Add config for Zephyr port of 96Boards Carbon.
|
examples/hwapi: Add config for Zephyr port of 96Boards Carbon.
|
Python
|
mit
|
infinnovation/micropython,HenrikSolver/micropython,henriknelson/micropython,cwyark/micropython,tralamazza/micropython,micropython/micropython-esp32,SHA2017-badge/micropython-esp32,tobbad/micropython,alex-robbins/micropython,deshipu/micropython,chrisdearman/micropython,hiway/micropython,Timmenem/micropython,adafruit/circuitpython,selste/micropython,toolmacher/micropython,MrSurly/micropython-esp32,ryannathans/micropython,chrisdearman/micropython,PappaPeppar/micropython,deshipu/micropython,infinnovation/micropython,dmazzella/micropython,micropython/micropython-esp32,micropython/micropython-esp32,adafruit/circuitpython,torwag/micropython,HenrikSolver/micropython,MrSurly/micropython,ryannathans/micropython,hiway/micropython,TDAbboud/micropython,PappaPeppar/micropython,henriknelson/micropython,tralamazza/micropython,ryannathans/micropython,pozetroninc/micropython,deshipu/micropython,blazewicz/micropython,dmazzella/micropython,TDAbboud/micropython,pozetroninc/micropython,MrSurly/micropython,tobbad/micropython,SHA2017-badge/micropython-esp32,SHA2017-badge/micropython-esp32,swegener/micropython,pramasoul/micropython,toolmacher/micropython,tralamazza/micropython,hiway/micropython,oopy/micropython,infinnovation/micropython,AriZuu/micropython,chrisdearman/micropython,micropython/micropython-esp32,selste/micropython,pozetroninc/micropython,trezor/micropython,AriZuu/micropython,pfalcon/micropython,swegener/micropython,henriknelson/micropython,AriZuu/micropython,Timmenem/micropython,pfalcon/micropython,dmazzella/micropython,Timmenem/micropython,blazewicz/micropython,MrSurly/micropython,MrSurly/micropython-esp32,PappaPeppar/micropython,SHA2017-badge/micropython-esp32,HenrikSolver/micropython,henriknelson/micropython,TDAbboud/micropython,toolmacher/micropython,swegener/micropython,pfalcon/micropython,trezor/micropython,swegener/micropython,blazewicz/micropython,lowRISC/micropython,hiway/micropython,lowRISC/micropython,torwag/micropython,henriknelson/micropython,SHA2017-badge/micropython-esp32,cwyark/micropython,MrSurly/micropython-esp32,Timmenem/micropython,bvernoux/micropython,pramasoul/micropython,ryannathans/micropython,selste/micropython,tobbad/micropython,swegener/micropython,dmazzella/micropython,pozetroninc/micropython,adafruit/circuitpython,infinnovation/micropython,adafruit/micropython,oopy/micropython,AriZuu/micropython,hiway/micropython,HenrikSolver/micropython,selste/micropython,micropython/micropython-esp32,TDAbboud/micropython,oopy/micropython,PappaPeppar/micropython,ryannathans/micropython,puuu/micropython,toolmacher/micropython,kerneltask/micropython,puuu/micropython,lowRISC/micropython,oopy/micropython,adafruit/circuitpython,bvernoux/micropython,trezor/micropython,cwyark/micropython,puuu/micropython,adafruit/micropython,torwag/micropython,PappaPeppar/micropython,kerneltask/micropython,alex-robbins/micropython,cwyark/micropython,toolmacher/micropython,trezor/micropython,tobbad/micropython,oopy/micropython,adafruit/micropython,selste/micropython,deshipu/micropython,HenrikSolver/micropython,pramasoul/micropython,chrisdearman/micropython,adafruit/micropython,pfalcon/micropython,infinnovation/micropython,alex-robbins/micropython,adafruit/micropython,pramasoul/micropython,tobbad/micropython,bvernoux/micropython,alex-robbins/micropython,Timmenem/micropython,kerneltask/micropython,tralamazza/micropython,pozetroninc/micropython,chrisdearman/micropython,trezor/micropython,bvernoux/micropython,MrSurly/micropython,kerneltask/micropython,MrSurly/micropython,pfalcon/micropython,lowRISC/micropython,lowRISC/micropython,cwyark/micropython,torwag/micropython,torwag/micropython,pramasoul/micropython,AriZuu/micropython,adafruit/circuitpython,puuu/micropython,adafruit/circuitpython,MrSurly/micropython-esp32,kerneltask/micropython,blazewicz/micropython,puuu/micropython,MrSurly/micropython-esp32,TDAbboud/micropython,deshipu/micropython,alex-robbins/micropython,bvernoux/micropython,blazewicz/micropython
|
examples/hwapi: Add config for Zephyr port of 96Boards Carbon.
|
from machine import Signal
# 96Boards Carbon board
# USR1 - User controlled led, connected to PD2
# USR2 - User controlled led, connected to PA15
# BT - Bluetooth indicator, connected to PB5.
# Note - 96b_carbon uses (at the time of writing) non-standard
# for Zephyr port device naming convention.
LED = Signal(("GPIOA", 15), Pin.OUT)
|
<commit_before><commit_msg>examples/hwapi: Add config for Zephyr port of 96Boards Carbon.<commit_after>
|
from machine import Signal
# 96Boards Carbon board
# USR1 - User controlled led, connected to PD2
# USR2 - User controlled led, connected to PA15
# BT - Bluetooth indicator, connected to PB5.
# Note - 96b_carbon uses (at the time of writing) non-standard
# for Zephyr port device naming convention.
LED = Signal(("GPIOA", 15), Pin.OUT)
|
examples/hwapi: Add config for Zephyr port of 96Boards Carbon.from machine import Signal
# 96Boards Carbon board
# USR1 - User controlled led, connected to PD2
# USR2 - User controlled led, connected to PA15
# BT - Bluetooth indicator, connected to PB5.
# Note - 96b_carbon uses (at the time of writing) non-standard
# for Zephyr port device naming convention.
LED = Signal(("GPIOA", 15), Pin.OUT)
|
<commit_before><commit_msg>examples/hwapi: Add config for Zephyr port of 96Boards Carbon.<commit_after>from machine import Signal
# 96Boards Carbon board
# USR1 - User controlled led, connected to PD2
# USR2 - User controlled led, connected to PA15
# BT - Bluetooth indicator, connected to PB5.
# Note - 96b_carbon uses (at the time of writing) non-standard
# for Zephyr port device naming convention.
LED = Signal(("GPIOA", 15), Pin.OUT)
|
|
1d7e4aa94288db515a673f223e4b4488a80580be
|
tests/app/test_accessibility_statement.py
|
tests/app/test_accessibility_statement.py
|
import re
import subprocess
from datetime import datetime
def test_last_review_date():
statement_file_path = "app/templates/views/accessibility_statement.html"
# test local changes against master for a full diff of what will be merged
statement_diff = subprocess.run([f"git diff --exit-code origin/master -- {statement_file_path}"],
stdout=subprocess.PIPE, shell=True)
# if statement has changed, test the review date was part of those changes
if statement_diff.returncode == 1:
raw_diff = statement_diff.stdout.decode('utf-8')
today = datetime.now().strftime('%d %B %Y')
with open(statement_file_path, 'r') as statement_file:
current_review_date = re.search((r'This statement was prepared on 23 September 2020\. '
r'It was last reviewed on (\d{1,2} [A-Z]{1}[a-z]+ \d{4})'),
statement_file.read()).group(1)
# guard against changes that don't need to update the review date
if current_review_date != today:
assert 'This statement was prepared on 23 September 2020. It was last reviewed on' in raw_diff
|
Add test for accessibility statement last review
|
Add test for accessibility statement last review
This is a proposal of a way to test that changes
to this page include updates to the 'last
reviewed' date, if needed.
|
Python
|
mit
|
alphagov/notifications-admin,alphagov/notifications-admin,alphagov/notifications-admin,alphagov/notifications-admin
|
Add test for accessibility statement last review
This is a proposal of a way to test that changes
to this page include updates to the 'last
reviewed' date, if needed.
|
import re
import subprocess
from datetime import datetime
def test_last_review_date():
statement_file_path = "app/templates/views/accessibility_statement.html"
# test local changes against master for a full diff of what will be merged
statement_diff = subprocess.run([f"git diff --exit-code origin/master -- {statement_file_path}"],
stdout=subprocess.PIPE, shell=True)
# if statement has changed, test the review date was part of those changes
if statement_diff.returncode == 1:
raw_diff = statement_diff.stdout.decode('utf-8')
today = datetime.now().strftime('%d %B %Y')
with open(statement_file_path, 'r') as statement_file:
current_review_date = re.search((r'This statement was prepared on 23 September 2020\. '
r'It was last reviewed on (\d{1,2} [A-Z]{1}[a-z]+ \d{4})'),
statement_file.read()).group(1)
# guard against changes that don't need to update the review date
if current_review_date != today:
assert 'This statement was prepared on 23 September 2020. It was last reviewed on' in raw_diff
|
<commit_before><commit_msg>Add test for accessibility statement last review
This is a proposal of a way to test that changes
to this page include updates to the 'last
reviewed' date, if needed.<commit_after>
|
import re
import subprocess
from datetime import datetime
def test_last_review_date():
statement_file_path = "app/templates/views/accessibility_statement.html"
# test local changes against master for a full diff of what will be merged
statement_diff = subprocess.run([f"git diff --exit-code origin/master -- {statement_file_path}"],
stdout=subprocess.PIPE, shell=True)
# if statement has changed, test the review date was part of those changes
if statement_diff.returncode == 1:
raw_diff = statement_diff.stdout.decode('utf-8')
today = datetime.now().strftime('%d %B %Y')
with open(statement_file_path, 'r') as statement_file:
current_review_date = re.search((r'This statement was prepared on 23 September 2020\. '
r'It was last reviewed on (\d{1,2} [A-Z]{1}[a-z]+ \d{4})'),
statement_file.read()).group(1)
# guard against changes that don't need to update the review date
if current_review_date != today:
assert 'This statement was prepared on 23 September 2020. It was last reviewed on' in raw_diff
|
Add test for accessibility statement last review
This is a proposal of a way to test that changes
to this page include updates to the 'last
reviewed' date, if needed.import re
import subprocess
from datetime import datetime
def test_last_review_date():
statement_file_path = "app/templates/views/accessibility_statement.html"
# test local changes against master for a full diff of what will be merged
statement_diff = subprocess.run([f"git diff --exit-code origin/master -- {statement_file_path}"],
stdout=subprocess.PIPE, shell=True)
# if statement has changed, test the review date was part of those changes
if statement_diff.returncode == 1:
raw_diff = statement_diff.stdout.decode('utf-8')
today = datetime.now().strftime('%d %B %Y')
with open(statement_file_path, 'r') as statement_file:
current_review_date = re.search((r'This statement was prepared on 23 September 2020\. '
r'It was last reviewed on (\d{1,2} [A-Z]{1}[a-z]+ \d{4})'),
statement_file.read()).group(1)
# guard against changes that don't need to update the review date
if current_review_date != today:
assert 'This statement was prepared on 23 September 2020. It was last reviewed on' in raw_diff
|
<commit_before><commit_msg>Add test for accessibility statement last review
This is a proposal of a way to test that changes
to this page include updates to the 'last
reviewed' date, if needed.<commit_after>import re
import subprocess
from datetime import datetime
def test_last_review_date():
statement_file_path = "app/templates/views/accessibility_statement.html"
# test local changes against master for a full diff of what will be merged
statement_diff = subprocess.run([f"git diff --exit-code origin/master -- {statement_file_path}"],
stdout=subprocess.PIPE, shell=True)
# if statement has changed, test the review date was part of those changes
if statement_diff.returncode == 1:
raw_diff = statement_diff.stdout.decode('utf-8')
today = datetime.now().strftime('%d %B %Y')
with open(statement_file_path, 'r') as statement_file:
current_review_date = re.search((r'This statement was prepared on 23 September 2020\. '
r'It was last reviewed on (\d{1,2} [A-Z]{1}[a-z]+ \d{4})'),
statement_file.read()).group(1)
# guard against changes that don't need to update the review date
if current_review_date != today:
assert 'This statement was prepared on 23 September 2020. It was last reviewed on' in raw_diff
|
|
db43c60e49a26fd178cb101b45ca1cd709853f91
|
samples/filter_vms.py
|
samples/filter_vms.py
|
#!/usr/bin/env python
"""
Written by Nathan Prziborowski
Github: https://github.com/prziborowski
This code is released under the terms of the Apache 2
http://www.apache.org/licenses/LICENSE-2.0.html
Example script to filter the list of VMs by a property's value.
Defaults for powered on VMs.
"""
import sys
from pyVmomi import vim, vmodl
from pyVim.connect import SmartConnect, Disconnect
from pyVim.task import WaitForTask
from tools import cli
__author__ = 'prziborowski'
def setup_args():
parser = cli.build_arg_parser()
parser.add_argument('-n', '--property', default='runtime.powerState',
help='Name of the property to filter by')
parser.add_argument('-v', '--value', default='poweredOn',
help='Value to filter with')
return cli.prompt_for_password(parser.parse_args())
def get_obj(si, root, vim_type):
container = si.content.viewManager.CreateContainerView(root, vim_type,
True)
view = container.view
container.Destroy()
return view
def create_filter_spec(pc, vms, prop):
objSpecs = []
for vm in vms:
objSpec = vmodl.query.PropertyCollector.ObjectSpec(obj=vm)
objSpecs.append(objSpec)
filterSpec = vmodl.query.PropertyCollector.FilterSpec()
filterSpec.objectSet = objSpecs
propSet = vmodl.query.PropertyCollector.PropertySpec(all=False)
propSet.type = vim.VirtualMachine
propSet.pathSet = [prop]
filterSpec.propSet = [propSet]
return filterSpec
def filter_results(result, value):
vms = []
for o in result.objects:
if o.propSet[0].val == value:
vms.append(o.obj)
return vms
def main():
args = setup_args()
si = SmartConnect(host=args.host, user=args.user, pwd=args.password)
# Start with all the VMs from container, which is easier to write than
# PropertyCollector to retrieve them.
vms = get_obj(si, si.content.rootFolder, [vim.VirtualMachine])
pc = si.content.propertyCollector
filter_spec = create_filter_spec(pc, vms, args.property)
options = vmodl.query.PropertyCollector.RetrieveOptions()
result = pc.RetrievePropertiesEx([filter_spec], options)
vms = filter_results(result, args.value)
print("VMs with %s = %s" % (args.property, args.value))
for vm in vms:
print(vm.name)
Disconnect(si)
if __name__ == '__main__':
main()
|
Add sample for using Property collector to assist in filtering VMs
|
Add sample for using Property collector to assist in filtering VMs
The property collector can be used to fetch a subset of properties
for a large amount of objects with fewer round trips that iterating.
This sample shows how it could be used to fetch the power state of
all the VMs and post-process filter on them.
Note the printing vm.name does cause round-trip per-VM, so it could
be extended in a real script/product to fetch the name property as
well.
I used the ViewManager to gather VMs as it seems easier than making
a traverse spec go through all the datacenters to gather VMs that
may also be in sub-folders.
|
Python
|
apache-2.0
|
vmware/pyvmomi-community-samples,pathcl/pyvmomi-community-samples,prziborowski/pyvmomi-community-samples,ddcrjlalumiere/pyvmomi-community-samples,jm66/pyvmomi-community-samples
|
Add sample for using Property collector to assist in filtering VMs
The property collector can be used to fetch a subset of properties
for a large amount of objects with fewer round trips that iterating.
This sample shows how it could be used to fetch the power state of
all the VMs and post-process filter on them.
Note the printing vm.name does cause round-trip per-VM, so it could
be extended in a real script/product to fetch the name property as
well.
I used the ViewManager to gather VMs as it seems easier than making
a traverse spec go through all the datacenters to gather VMs that
may also be in sub-folders.
|
#!/usr/bin/env python
"""
Written by Nathan Prziborowski
Github: https://github.com/prziborowski
This code is released under the terms of the Apache 2
http://www.apache.org/licenses/LICENSE-2.0.html
Example script to filter the list of VMs by a property's value.
Defaults for powered on VMs.
"""
import sys
from pyVmomi import vim, vmodl
from pyVim.connect import SmartConnect, Disconnect
from pyVim.task import WaitForTask
from tools import cli
__author__ = 'prziborowski'
def setup_args():
parser = cli.build_arg_parser()
parser.add_argument('-n', '--property', default='runtime.powerState',
help='Name of the property to filter by')
parser.add_argument('-v', '--value', default='poweredOn',
help='Value to filter with')
return cli.prompt_for_password(parser.parse_args())
def get_obj(si, root, vim_type):
container = si.content.viewManager.CreateContainerView(root, vim_type,
True)
view = container.view
container.Destroy()
return view
def create_filter_spec(pc, vms, prop):
objSpecs = []
for vm in vms:
objSpec = vmodl.query.PropertyCollector.ObjectSpec(obj=vm)
objSpecs.append(objSpec)
filterSpec = vmodl.query.PropertyCollector.FilterSpec()
filterSpec.objectSet = objSpecs
propSet = vmodl.query.PropertyCollector.PropertySpec(all=False)
propSet.type = vim.VirtualMachine
propSet.pathSet = [prop]
filterSpec.propSet = [propSet]
return filterSpec
def filter_results(result, value):
vms = []
for o in result.objects:
if o.propSet[0].val == value:
vms.append(o.obj)
return vms
def main():
args = setup_args()
si = SmartConnect(host=args.host, user=args.user, pwd=args.password)
# Start with all the VMs from container, which is easier to write than
# PropertyCollector to retrieve them.
vms = get_obj(si, si.content.rootFolder, [vim.VirtualMachine])
pc = si.content.propertyCollector
filter_spec = create_filter_spec(pc, vms, args.property)
options = vmodl.query.PropertyCollector.RetrieveOptions()
result = pc.RetrievePropertiesEx([filter_spec], options)
vms = filter_results(result, args.value)
print("VMs with %s = %s" % (args.property, args.value))
for vm in vms:
print(vm.name)
Disconnect(si)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add sample for using Property collector to assist in filtering VMs
The property collector can be used to fetch a subset of properties
for a large amount of objects with fewer round trips that iterating.
This sample shows how it could be used to fetch the power state of
all the VMs and post-process filter on them.
Note the printing vm.name does cause round-trip per-VM, so it could
be extended in a real script/product to fetch the name property as
well.
I used the ViewManager to gather VMs as it seems easier than making
a traverse spec go through all the datacenters to gather VMs that
may also be in sub-folders.<commit_after>
|
#!/usr/bin/env python
"""
Written by Nathan Prziborowski
Github: https://github.com/prziborowski
This code is released under the terms of the Apache 2
http://www.apache.org/licenses/LICENSE-2.0.html
Example script to filter the list of VMs by a property's value.
Defaults for powered on VMs.
"""
import sys
from pyVmomi import vim, vmodl
from pyVim.connect import SmartConnect, Disconnect
from pyVim.task import WaitForTask
from tools import cli
__author__ = 'prziborowski'
def setup_args():
parser = cli.build_arg_parser()
parser.add_argument('-n', '--property', default='runtime.powerState',
help='Name of the property to filter by')
parser.add_argument('-v', '--value', default='poweredOn',
help='Value to filter with')
return cli.prompt_for_password(parser.parse_args())
def get_obj(si, root, vim_type):
container = si.content.viewManager.CreateContainerView(root, vim_type,
True)
view = container.view
container.Destroy()
return view
def create_filter_spec(pc, vms, prop):
objSpecs = []
for vm in vms:
objSpec = vmodl.query.PropertyCollector.ObjectSpec(obj=vm)
objSpecs.append(objSpec)
filterSpec = vmodl.query.PropertyCollector.FilterSpec()
filterSpec.objectSet = objSpecs
propSet = vmodl.query.PropertyCollector.PropertySpec(all=False)
propSet.type = vim.VirtualMachine
propSet.pathSet = [prop]
filterSpec.propSet = [propSet]
return filterSpec
def filter_results(result, value):
vms = []
for o in result.objects:
if o.propSet[0].val == value:
vms.append(o.obj)
return vms
def main():
args = setup_args()
si = SmartConnect(host=args.host, user=args.user, pwd=args.password)
# Start with all the VMs from container, which is easier to write than
# PropertyCollector to retrieve them.
vms = get_obj(si, si.content.rootFolder, [vim.VirtualMachine])
pc = si.content.propertyCollector
filter_spec = create_filter_spec(pc, vms, args.property)
options = vmodl.query.PropertyCollector.RetrieveOptions()
result = pc.RetrievePropertiesEx([filter_spec], options)
vms = filter_results(result, args.value)
print("VMs with %s = %s" % (args.property, args.value))
for vm in vms:
print(vm.name)
Disconnect(si)
if __name__ == '__main__':
main()
|
Add sample for using Property collector to assist in filtering VMs
The property collector can be used to fetch a subset of properties
for a large amount of objects with fewer round trips that iterating.
This sample shows how it could be used to fetch the power state of
all the VMs and post-process filter on them.
Note the printing vm.name does cause round-trip per-VM, so it could
be extended in a real script/product to fetch the name property as
well.
I used the ViewManager to gather VMs as it seems easier than making
a traverse spec go through all the datacenters to gather VMs that
may also be in sub-folders.#!/usr/bin/env python
"""
Written by Nathan Prziborowski
Github: https://github.com/prziborowski
This code is released under the terms of the Apache 2
http://www.apache.org/licenses/LICENSE-2.0.html
Example script to filter the list of VMs by a property's value.
Defaults for powered on VMs.
"""
import sys
from pyVmomi import vim, vmodl
from pyVim.connect import SmartConnect, Disconnect
from pyVim.task import WaitForTask
from tools import cli
__author__ = 'prziborowski'
def setup_args():
parser = cli.build_arg_parser()
parser.add_argument('-n', '--property', default='runtime.powerState',
help='Name of the property to filter by')
parser.add_argument('-v', '--value', default='poweredOn',
help='Value to filter with')
return cli.prompt_for_password(parser.parse_args())
def get_obj(si, root, vim_type):
container = si.content.viewManager.CreateContainerView(root, vim_type,
True)
view = container.view
container.Destroy()
return view
def create_filter_spec(pc, vms, prop):
objSpecs = []
for vm in vms:
objSpec = vmodl.query.PropertyCollector.ObjectSpec(obj=vm)
objSpecs.append(objSpec)
filterSpec = vmodl.query.PropertyCollector.FilterSpec()
filterSpec.objectSet = objSpecs
propSet = vmodl.query.PropertyCollector.PropertySpec(all=False)
propSet.type = vim.VirtualMachine
propSet.pathSet = [prop]
filterSpec.propSet = [propSet]
return filterSpec
def filter_results(result, value):
vms = []
for o in result.objects:
if o.propSet[0].val == value:
vms.append(o.obj)
return vms
def main():
args = setup_args()
si = SmartConnect(host=args.host, user=args.user, pwd=args.password)
# Start with all the VMs from container, which is easier to write than
# PropertyCollector to retrieve them.
vms = get_obj(si, si.content.rootFolder, [vim.VirtualMachine])
pc = si.content.propertyCollector
filter_spec = create_filter_spec(pc, vms, args.property)
options = vmodl.query.PropertyCollector.RetrieveOptions()
result = pc.RetrievePropertiesEx([filter_spec], options)
vms = filter_results(result, args.value)
print("VMs with %s = %s" % (args.property, args.value))
for vm in vms:
print(vm.name)
Disconnect(si)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add sample for using Property collector to assist in filtering VMs
The property collector can be used to fetch a subset of properties
for a large amount of objects with fewer round trips that iterating.
This sample shows how it could be used to fetch the power state of
all the VMs and post-process filter on them.
Note the printing vm.name does cause round-trip per-VM, so it could
be extended in a real script/product to fetch the name property as
well.
I used the ViewManager to gather VMs as it seems easier than making
a traverse spec go through all the datacenters to gather VMs that
may also be in sub-folders.<commit_after>#!/usr/bin/env python
"""
Written by Nathan Prziborowski
Github: https://github.com/prziborowski
This code is released under the terms of the Apache 2
http://www.apache.org/licenses/LICENSE-2.0.html
Example script to filter the list of VMs by a property's value.
Defaults for powered on VMs.
"""
import sys
from pyVmomi import vim, vmodl
from pyVim.connect import SmartConnect, Disconnect
from pyVim.task import WaitForTask
from tools import cli
__author__ = 'prziborowski'
def setup_args():
parser = cli.build_arg_parser()
parser.add_argument('-n', '--property', default='runtime.powerState',
help='Name of the property to filter by')
parser.add_argument('-v', '--value', default='poweredOn',
help='Value to filter with')
return cli.prompt_for_password(parser.parse_args())
def get_obj(si, root, vim_type):
container = si.content.viewManager.CreateContainerView(root, vim_type,
True)
view = container.view
container.Destroy()
return view
def create_filter_spec(pc, vms, prop):
objSpecs = []
for vm in vms:
objSpec = vmodl.query.PropertyCollector.ObjectSpec(obj=vm)
objSpecs.append(objSpec)
filterSpec = vmodl.query.PropertyCollector.FilterSpec()
filterSpec.objectSet = objSpecs
propSet = vmodl.query.PropertyCollector.PropertySpec(all=False)
propSet.type = vim.VirtualMachine
propSet.pathSet = [prop]
filterSpec.propSet = [propSet]
return filterSpec
def filter_results(result, value):
vms = []
for o in result.objects:
if o.propSet[0].val == value:
vms.append(o.obj)
return vms
def main():
args = setup_args()
si = SmartConnect(host=args.host, user=args.user, pwd=args.password)
# Start with all the VMs from container, which is easier to write than
# PropertyCollector to retrieve them.
vms = get_obj(si, si.content.rootFolder, [vim.VirtualMachine])
pc = si.content.propertyCollector
filter_spec = create_filter_spec(pc, vms, args.property)
options = vmodl.query.PropertyCollector.RetrieveOptions()
result = pc.RetrievePropertiesEx([filter_spec], options)
vms = filter_results(result, args.value)
print("VMs with %s = %s" % (args.property, args.value))
for vm in vms:
print(vm.name)
Disconnect(si)
if __name__ == '__main__':
main()
|
|
53e25e5cb1ffb62cb54fed021b2d61144e422b05
|
download_from_Wikimedia_Commons.py
|
download_from_Wikimedia_Commons.py
|
#!/usr/bin/python
# -=- encoding: latin-1 -=-
"""Download files from Wikimedia Commons"""
import os
import logging
import argparse
from commonsdownloader import download_file
def get_file_names_from_textfile(textfile_handler):
"""Yield the file names and widths by parsing a given text fileahandler."""
for line in textfile_handler:
line = line.rstrip()
try:
(image_name, width) = line.split(',')
except ValueError:
image_name = line
width = None
yield (image_name, width)
class Folder(argparse.Action):
"""An argparse action for directories."""
def __call__(self, parser, namespace, values, option_string=None):
prospective_dir = values
if not os.path.isdir(prospective_dir):
msg = "Folder:{0} is not a valid path".format(prospective_dir)
raise argparse.ArgumentTypeError(msg)
else:
setattr(namespace, self.dest, prospective_dir)
def main():
"""Main method, entry point of the script."""
from argparse import ArgumentParser
description = "Download a bunch of thumbnails from Wikimedia Commons"
parser = ArgumentParser(description=description)
parser.add_argument("files",
nargs='*',
metavar="FILES",
help='A list of filenames')
parser.add_argument("-l", "--list", metavar="LIST",
dest="file_list",
type=argparse.FileType('r'),
help='A list of files <filename,width>')
parser.add_argument("-o", "--output", metavar="FOLDER",
dest="output_path",
action=Folder,
default=os.getcwd(),
help='The directory to download the files to')
parser.add_argument("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="Displays INFO messages")
args = parser.parse_args()
if args.verbose:
log_level = logging.INFO
else:
log_level = logging.WARNING
logging.basicConfig(level=log_level)
logging.info("Starting")
if args.file_list:
for (file_name, width) in get_file_names_from_textfile(args.file_list):
download_file(file_name, args.output_path, width=width)
elif args.files:
for file_name in args.files:
download_file(file_name, args.output_path)
else:
parser.print_help()
if __name__ == "__main__":
main()
|
Add script to download files from Commons
|
Add script to download files from Commons
This command line script interfaces with the commonsdownloader module.
It provides two ways to download files:
-give file names to the command line
-give a text file listing all the files to download
|
Python
|
mit
|
Commonists/CommonsDownloader
|
Add script to download files from Commons
This command line script interfaces with the commonsdownloader module.
It provides two ways to download files:
-give file names to the command line
-give a text file listing all the files to download
|
#!/usr/bin/python
# -=- encoding: latin-1 -=-
"""Download files from Wikimedia Commons"""
import os
import logging
import argparse
from commonsdownloader import download_file
def get_file_names_from_textfile(textfile_handler):
"""Yield the file names and widths by parsing a given text fileahandler."""
for line in textfile_handler:
line = line.rstrip()
try:
(image_name, width) = line.split(',')
except ValueError:
image_name = line
width = None
yield (image_name, width)
class Folder(argparse.Action):
"""An argparse action for directories."""
def __call__(self, parser, namespace, values, option_string=None):
prospective_dir = values
if not os.path.isdir(prospective_dir):
msg = "Folder:{0} is not a valid path".format(prospective_dir)
raise argparse.ArgumentTypeError(msg)
else:
setattr(namespace, self.dest, prospective_dir)
def main():
"""Main method, entry point of the script."""
from argparse import ArgumentParser
description = "Download a bunch of thumbnails from Wikimedia Commons"
parser = ArgumentParser(description=description)
parser.add_argument("files",
nargs='*',
metavar="FILES",
help='A list of filenames')
parser.add_argument("-l", "--list", metavar="LIST",
dest="file_list",
type=argparse.FileType('r'),
help='A list of files <filename,width>')
parser.add_argument("-o", "--output", metavar="FOLDER",
dest="output_path",
action=Folder,
default=os.getcwd(),
help='The directory to download the files to')
parser.add_argument("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="Displays INFO messages")
args = parser.parse_args()
if args.verbose:
log_level = logging.INFO
else:
log_level = logging.WARNING
logging.basicConfig(level=log_level)
logging.info("Starting")
if args.file_list:
for (file_name, width) in get_file_names_from_textfile(args.file_list):
download_file(file_name, args.output_path, width=width)
elif args.files:
for file_name in args.files:
download_file(file_name, args.output_path)
else:
parser.print_help()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to download files from Commons
This command line script interfaces with the commonsdownloader module.
It provides two ways to download files:
-give file names to the command line
-give a text file listing all the files to download<commit_after>
|
#!/usr/bin/python
# -=- encoding: latin-1 -=-
"""Download files from Wikimedia Commons"""
import os
import logging
import argparse
from commonsdownloader import download_file
def get_file_names_from_textfile(textfile_handler):
"""Yield the file names and widths by parsing a given text fileahandler."""
for line in textfile_handler:
line = line.rstrip()
try:
(image_name, width) = line.split(',')
except ValueError:
image_name = line
width = None
yield (image_name, width)
class Folder(argparse.Action):
"""An argparse action for directories."""
def __call__(self, parser, namespace, values, option_string=None):
prospective_dir = values
if not os.path.isdir(prospective_dir):
msg = "Folder:{0} is not a valid path".format(prospective_dir)
raise argparse.ArgumentTypeError(msg)
else:
setattr(namespace, self.dest, prospective_dir)
def main():
"""Main method, entry point of the script."""
from argparse import ArgumentParser
description = "Download a bunch of thumbnails from Wikimedia Commons"
parser = ArgumentParser(description=description)
parser.add_argument("files",
nargs='*',
metavar="FILES",
help='A list of filenames')
parser.add_argument("-l", "--list", metavar="LIST",
dest="file_list",
type=argparse.FileType('r'),
help='A list of files <filename,width>')
parser.add_argument("-o", "--output", metavar="FOLDER",
dest="output_path",
action=Folder,
default=os.getcwd(),
help='The directory to download the files to')
parser.add_argument("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="Displays INFO messages")
args = parser.parse_args()
if args.verbose:
log_level = logging.INFO
else:
log_level = logging.WARNING
logging.basicConfig(level=log_level)
logging.info("Starting")
if args.file_list:
for (file_name, width) in get_file_names_from_textfile(args.file_list):
download_file(file_name, args.output_path, width=width)
elif args.files:
for file_name in args.files:
download_file(file_name, args.output_path)
else:
parser.print_help()
if __name__ == "__main__":
main()
|
Add script to download files from Commons
This command line script interfaces with the commonsdownloader module.
It provides two ways to download files:
-give file names to the command line
-give a text file listing all the files to download#!/usr/bin/python
# -=- encoding: latin-1 -=-
"""Download files from Wikimedia Commons"""
import os
import logging
import argparse
from commonsdownloader import download_file
def get_file_names_from_textfile(textfile_handler):
"""Yield the file names and widths by parsing a given text fileahandler."""
for line in textfile_handler:
line = line.rstrip()
try:
(image_name, width) = line.split(',')
except ValueError:
image_name = line
width = None
yield (image_name, width)
class Folder(argparse.Action):
"""An argparse action for directories."""
def __call__(self, parser, namespace, values, option_string=None):
prospective_dir = values
if not os.path.isdir(prospective_dir):
msg = "Folder:{0} is not a valid path".format(prospective_dir)
raise argparse.ArgumentTypeError(msg)
else:
setattr(namespace, self.dest, prospective_dir)
def main():
"""Main method, entry point of the script."""
from argparse import ArgumentParser
description = "Download a bunch of thumbnails from Wikimedia Commons"
parser = ArgumentParser(description=description)
parser.add_argument("files",
nargs='*',
metavar="FILES",
help='A list of filenames')
parser.add_argument("-l", "--list", metavar="LIST",
dest="file_list",
type=argparse.FileType('r'),
help='A list of files <filename,width>')
parser.add_argument("-o", "--output", metavar="FOLDER",
dest="output_path",
action=Folder,
default=os.getcwd(),
help='The directory to download the files to')
parser.add_argument("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="Displays INFO messages")
args = parser.parse_args()
if args.verbose:
log_level = logging.INFO
else:
log_level = logging.WARNING
logging.basicConfig(level=log_level)
logging.info("Starting")
if args.file_list:
for (file_name, width) in get_file_names_from_textfile(args.file_list):
download_file(file_name, args.output_path, width=width)
elif args.files:
for file_name in args.files:
download_file(file_name, args.output_path)
else:
parser.print_help()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to download files from Commons
This command line script interfaces with the commonsdownloader module.
It provides two ways to download files:
-give file names to the command line
-give a text file listing all the files to download<commit_after>#!/usr/bin/python
# -=- encoding: latin-1 -=-
"""Download files from Wikimedia Commons"""
import os
import logging
import argparse
from commonsdownloader import download_file
def get_file_names_from_textfile(textfile_handler):
"""Yield the file names and widths by parsing a given text fileahandler."""
for line in textfile_handler:
line = line.rstrip()
try:
(image_name, width) = line.split(',')
except ValueError:
image_name = line
width = None
yield (image_name, width)
class Folder(argparse.Action):
"""An argparse action for directories."""
def __call__(self, parser, namespace, values, option_string=None):
prospective_dir = values
if not os.path.isdir(prospective_dir):
msg = "Folder:{0} is not a valid path".format(prospective_dir)
raise argparse.ArgumentTypeError(msg)
else:
setattr(namespace, self.dest, prospective_dir)
def main():
"""Main method, entry point of the script."""
from argparse import ArgumentParser
description = "Download a bunch of thumbnails from Wikimedia Commons"
parser = ArgumentParser(description=description)
parser.add_argument("files",
nargs='*',
metavar="FILES",
help='A list of filenames')
parser.add_argument("-l", "--list", metavar="LIST",
dest="file_list",
type=argparse.FileType('r'),
help='A list of files <filename,width>')
parser.add_argument("-o", "--output", metavar="FOLDER",
dest="output_path",
action=Folder,
default=os.getcwd(),
help='The directory to download the files to')
parser.add_argument("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="Displays INFO messages")
args = parser.parse_args()
if args.verbose:
log_level = logging.INFO
else:
log_level = logging.WARNING
logging.basicConfig(level=log_level)
logging.info("Starting")
if args.file_list:
for (file_name, width) in get_file_names_from_textfile(args.file_list):
download_file(file_name, args.output_path, width=width)
elif args.files:
for file_name in args.files:
download_file(file_name, args.output_path)
else:
parser.print_help()
if __name__ == "__main__":
main()
|
|
8268f13015b4b6da7f3b1ab25898bb403c2ae22d
|
scripts/compact_seriesly.py
|
scripts/compact_seriesly.py
|
from logger import logger
from seriesly import Seriesly
from perfrunner.settings import StatsSettings
def main():
s = Seriesly(StatsSettings.SERIESLY['host'])
for db in s.list_dbs():
logger.info('Compacting {}'.format(db))
result = s[db].compact()
logger.info('Compaction finished: {}'.format(result))
if __name__ == '__main__':
main()
|
Add a script for periodic compaction of Seriesly databases
|
CBPS-210: Add a script for periodic compaction of Seriesly databases
Change-Id: I95123c116c0dcdce4b4df02974d2a9fdeaef55dc
Reviewed-on: http://review.couchbase.org/69249
Tested-by: buildbot <80754af91bfb6d1073585b046fe0a474ce868509@couchbase.com>
Reviewed-by: Pavel Paulau <dd88eded64e90046a680e3a6c0828ceb8fe8a0e7@gmail.com>
|
Python
|
apache-2.0
|
couchbase/perfrunner,pavel-paulau/perfrunner,pavel-paulau/perfrunner,couchbase/perfrunner,couchbase/perfrunner,pavel-paulau/perfrunner,couchbase/perfrunner,couchbase/perfrunner,couchbase/perfrunner,pavel-paulau/perfrunner,pavel-paulau/perfrunner
|
CBPS-210: Add a script for periodic compaction of Seriesly databases
Change-Id: I95123c116c0dcdce4b4df02974d2a9fdeaef55dc
Reviewed-on: http://review.couchbase.org/69249
Tested-by: buildbot <80754af91bfb6d1073585b046fe0a474ce868509@couchbase.com>
Reviewed-by: Pavel Paulau <dd88eded64e90046a680e3a6c0828ceb8fe8a0e7@gmail.com>
|
from logger import logger
from seriesly import Seriesly
from perfrunner.settings import StatsSettings
def main():
s = Seriesly(StatsSettings.SERIESLY['host'])
for db in s.list_dbs():
logger.info('Compacting {}'.format(db))
result = s[db].compact()
logger.info('Compaction finished: {}'.format(result))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>CBPS-210: Add a script for periodic compaction of Seriesly databases
Change-Id: I95123c116c0dcdce4b4df02974d2a9fdeaef55dc
Reviewed-on: http://review.couchbase.org/69249
Tested-by: buildbot <80754af91bfb6d1073585b046fe0a474ce868509@couchbase.com>
Reviewed-by: Pavel Paulau <dd88eded64e90046a680e3a6c0828ceb8fe8a0e7@gmail.com><commit_after>
|
from logger import logger
from seriesly import Seriesly
from perfrunner.settings import StatsSettings
def main():
s = Seriesly(StatsSettings.SERIESLY['host'])
for db in s.list_dbs():
logger.info('Compacting {}'.format(db))
result = s[db].compact()
logger.info('Compaction finished: {}'.format(result))
if __name__ == '__main__':
main()
|
CBPS-210: Add a script for periodic compaction of Seriesly databases
Change-Id: I95123c116c0dcdce4b4df02974d2a9fdeaef55dc
Reviewed-on: http://review.couchbase.org/69249
Tested-by: buildbot <80754af91bfb6d1073585b046fe0a474ce868509@couchbase.com>
Reviewed-by: Pavel Paulau <dd88eded64e90046a680e3a6c0828ceb8fe8a0e7@gmail.com>from logger import logger
from seriesly import Seriesly
from perfrunner.settings import StatsSettings
def main():
s = Seriesly(StatsSettings.SERIESLY['host'])
for db in s.list_dbs():
logger.info('Compacting {}'.format(db))
result = s[db].compact()
logger.info('Compaction finished: {}'.format(result))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>CBPS-210: Add a script for periodic compaction of Seriesly databases
Change-Id: I95123c116c0dcdce4b4df02974d2a9fdeaef55dc
Reviewed-on: http://review.couchbase.org/69249
Tested-by: buildbot <80754af91bfb6d1073585b046fe0a474ce868509@couchbase.com>
Reviewed-by: Pavel Paulau <dd88eded64e90046a680e3a6c0828ceb8fe8a0e7@gmail.com><commit_after>from logger import logger
from seriesly import Seriesly
from perfrunner.settings import StatsSettings
def main():
s = Seriesly(StatsSettings.SERIESLY['host'])
for db in s.list_dbs():
logger.info('Compacting {}'.format(db))
result = s[db].compact()
logger.info('Compaction finished: {}'.format(result))
if __name__ == '__main__':
main()
|
|
cf9d549d161536a343fde1e66cb21a195630d6f2
|
learning-python/ch07/try_catch_finally.py
|
learning-python/ch07/try_catch_finally.py
|
import json
def div(a, b):
try:
print("calculating {}/{}: ".format(a, b))
result = a / b
except ZeroDivisionError as ex:
print(ex)
else:
print("result is: {}".format(result))
finally:
print("Finally")
div(10, 5)
div(20, 0)
def parse_emp_json(json_data):
try:
return json.loads(json_data)
except (ValueError, TypeError) as ex:
print(type(ex), ex)
json_str = '{"id": 1, "first_name":"Scott"}'
emp1 = parse_emp_json(json_str)
emp2 = parse_emp_json("{{")
emp3 = parse_emp_json(2)
class CustomeException(Exception):
pass
raise CustomeException
|
Add try-catch-finally and exception demo.
|
Add try-catch-finally and exception demo.
|
Python
|
apache-2.0
|
precompiler/python-101
|
Add try-catch-finally and exception demo.
|
import json
def div(a, b):
try:
print("calculating {}/{}: ".format(a, b))
result = a / b
except ZeroDivisionError as ex:
print(ex)
else:
print("result is: {}".format(result))
finally:
print("Finally")
div(10, 5)
div(20, 0)
def parse_emp_json(json_data):
try:
return json.loads(json_data)
except (ValueError, TypeError) as ex:
print(type(ex), ex)
json_str = '{"id": 1, "first_name":"Scott"}'
emp1 = parse_emp_json(json_str)
emp2 = parse_emp_json("{{")
emp3 = parse_emp_json(2)
class CustomeException(Exception):
pass
raise CustomeException
|
<commit_before><commit_msg>Add try-catch-finally and exception demo.<commit_after>
|
import json
def div(a, b):
try:
print("calculating {}/{}: ".format(a, b))
result = a / b
except ZeroDivisionError as ex:
print(ex)
else:
print("result is: {}".format(result))
finally:
print("Finally")
div(10, 5)
div(20, 0)
def parse_emp_json(json_data):
try:
return json.loads(json_data)
except (ValueError, TypeError) as ex:
print(type(ex), ex)
json_str = '{"id": 1, "first_name":"Scott"}'
emp1 = parse_emp_json(json_str)
emp2 = parse_emp_json("{{")
emp3 = parse_emp_json(2)
class CustomeException(Exception):
pass
raise CustomeException
|
Add try-catch-finally and exception demo.import json
def div(a, b):
try:
print("calculating {}/{}: ".format(a, b))
result = a / b
except ZeroDivisionError as ex:
print(ex)
else:
print("result is: {}".format(result))
finally:
print("Finally")
div(10, 5)
div(20, 0)
def parse_emp_json(json_data):
try:
return json.loads(json_data)
except (ValueError, TypeError) as ex:
print(type(ex), ex)
json_str = '{"id": 1, "first_name":"Scott"}'
emp1 = parse_emp_json(json_str)
emp2 = parse_emp_json("{{")
emp3 = parse_emp_json(2)
class CustomeException(Exception):
pass
raise CustomeException
|
<commit_before><commit_msg>Add try-catch-finally and exception demo.<commit_after>import json
def div(a, b):
try:
print("calculating {}/{}: ".format(a, b))
result = a / b
except ZeroDivisionError as ex:
print(ex)
else:
print("result is: {}".format(result))
finally:
print("Finally")
div(10, 5)
div(20, 0)
def parse_emp_json(json_data):
try:
return json.loads(json_data)
except (ValueError, TypeError) as ex:
print(type(ex), ex)
json_str = '{"id": 1, "first_name":"Scott"}'
emp1 = parse_emp_json(json_str)
emp2 = parse_emp_json("{{")
emp3 = parse_emp_json(2)
class CustomeException(Exception):
pass
raise CustomeException
|
|
da0073ba679ac658d92859614a009343e13c3c8b
|
meta-iotqa/lib/oeqa/runtime/mraa_hello.py
|
meta-iotqa/lib/oeqa/runtime/mraa_hello.py
|
import os
from oeqa.oetest import oeRuntimeTest
class Mraa_hello(oeRuntimeTest):
'''Say hello to mraa library and get platform name through it'''
def test_mraa_hello(self):
'''Prepare test binaries to image'''
(status, output) = self.target.run('mkdir -p /opt/mraa-test/apps/')
(status, output) = self.target.run('ls /opt/mraa-test/apps/mraa-test')
if status != 0:
(status,output) = self.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir,
'mraa-test'), "/opt/mraa-test/apps/")
'''run test mraa app to get the platform information'''
client_cmd = "/opt/mraa-test/apps/mraa-test"
(status, output) = self.target.run(client_cmd)
self.assertEqual(status, 0, msg="Error messages: %s" % output)
|
Test case for saying hello to target and get platform information from it
|
Test case for saying hello to target and get platform information from it
|
Python
|
mit
|
ostroproject/meta-iotqa,ostroproject/meta-iotqa,wanghongjuan/meta-iotqa-1,wanghongjuan/meta-iotqa-1,wanghongjuan/meta-iotqa-1,wanghongjuan/meta-iotqa-1,daweiwu/meta-iotqa-1,daweiwu/meta-iotqa-1,daweiwu/meta-iotqa-1,ostroproject/meta-iotqa,daweiwu/meta-iotqa-1,wanghongjuan/meta-iotqa-1,ostroproject/meta-iotqa,daweiwu/meta-iotqa-1,ostroproject/meta-iotqa
|
Test case for saying hello to target and get platform information from it
|
import os
from oeqa.oetest import oeRuntimeTest
class Mraa_hello(oeRuntimeTest):
'''Say hello to mraa library and get platform name through it'''
def test_mraa_hello(self):
'''Prepare test binaries to image'''
(status, output) = self.target.run('mkdir -p /opt/mraa-test/apps/')
(status, output) = self.target.run('ls /opt/mraa-test/apps/mraa-test')
if status != 0:
(status,output) = self.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir,
'mraa-test'), "/opt/mraa-test/apps/")
'''run test mraa app to get the platform information'''
client_cmd = "/opt/mraa-test/apps/mraa-test"
(status, output) = self.target.run(client_cmd)
self.assertEqual(status, 0, msg="Error messages: %s" % output)
|
<commit_before><commit_msg>Test case for saying hello to target and get platform information from it<commit_after>
|
import os
from oeqa.oetest import oeRuntimeTest
class Mraa_hello(oeRuntimeTest):
'''Say hello to mraa library and get platform name through it'''
def test_mraa_hello(self):
'''Prepare test binaries to image'''
(status, output) = self.target.run('mkdir -p /opt/mraa-test/apps/')
(status, output) = self.target.run('ls /opt/mraa-test/apps/mraa-test')
if status != 0:
(status,output) = self.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir,
'mraa-test'), "/opt/mraa-test/apps/")
'''run test mraa app to get the platform information'''
client_cmd = "/opt/mraa-test/apps/mraa-test"
(status, output) = self.target.run(client_cmd)
self.assertEqual(status, 0, msg="Error messages: %s" % output)
|
Test case for saying hello to target and get platform information from itimport os
from oeqa.oetest import oeRuntimeTest
class Mraa_hello(oeRuntimeTest):
'''Say hello to mraa library and get platform name through it'''
def test_mraa_hello(self):
'''Prepare test binaries to image'''
(status, output) = self.target.run('mkdir -p /opt/mraa-test/apps/')
(status, output) = self.target.run('ls /opt/mraa-test/apps/mraa-test')
if status != 0:
(status,output) = self.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir,
'mraa-test'), "/opt/mraa-test/apps/")
'''run test mraa app to get the platform information'''
client_cmd = "/opt/mraa-test/apps/mraa-test"
(status, output) = self.target.run(client_cmd)
self.assertEqual(status, 0, msg="Error messages: %s" % output)
|
<commit_before><commit_msg>Test case for saying hello to target and get platform information from it<commit_after>import os
from oeqa.oetest import oeRuntimeTest
class Mraa_hello(oeRuntimeTest):
'''Say hello to mraa library and get platform name through it'''
def test_mraa_hello(self):
'''Prepare test binaries to image'''
(status, output) = self.target.run('mkdir -p /opt/mraa-test/apps/')
(status, output) = self.target.run('ls /opt/mraa-test/apps/mraa-test')
if status != 0:
(status,output) = self.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir,
'mraa-test'), "/opt/mraa-test/apps/")
'''run test mraa app to get the platform information'''
client_cmd = "/opt/mraa-test/apps/mraa-test"
(status, output) = self.target.run(client_cmd)
self.assertEqual(status, 0, msg="Error messages: %s" % output)
|
|
148af8380b93af6771541397e6956ed05f4fc0de
|
cogbot/extensions/faq.py
|
cogbot/extensions/faq.py
|
import json
import logging
import urllib.request
from discord.ext import commands
from discord.ext.commands import CommandError, Context
from cogbot import checks
from cogbot.cog_bot import CogBot
log = logging.getLogger(__name__)
class FaqConfig:
def __init__(self, **options):
self.database = options['database']
class Faq:
def __init__(self, bot: CogBot, ext: str):
self.bot = bot
options = bot.state.get_extension_state(ext)
self.config = FaqConfig(**options)
self.data = {}
def get_answer_by_key(self, key: str):
return self.data.get(key)
def get_all_keys(self):
return self.data.keys()
def reload_data(self):
log.info('Reloading FAQs from: {}'.format(self.config.database))
response = urllib.request.urlopen(self.config.database)
content = response.read().decode('utf8')
try:
data = json.loads(content)
except Exception as e:
raise CommandError('Failed to reload FAQs: {}'.format(e))
self.data = data
log.info('Successfully reloaded {} FAQs'.format(len(data)))
async def on_ready(self):
self.reload_data()
@commands.command(pass_context=True, name='faq')
async def cmd_faq(self, ctx: Context, *, key: str = ''):
if key:
answer = self.get_answer_by_key(key)
if answer:
await self.bot.say(answer)
else:
await self.bot.add_reaction(ctx.message, u'🤷')
else:
await self.bot.say(', '.join(self.get_all_keys()))
@checks.is_manager()
@commands.command(pass_context=True, name='faqreload', hidden=True)
async def cmd_faqreload(self, ctx: Context):
try:
self.reload_data()
await self.bot.react_success(ctx)
except:
await self.bot.react_failure(ctx)
def setup(bot):
bot.add_cog(Faq(bot, __name__))
|
Implement basic FAQ command with remote config
|
Implement basic FAQ command with remote config
|
Python
|
mit
|
Arcensoth/cogbot
|
Implement basic FAQ command with remote config
|
import json
import logging
import urllib.request
from discord.ext import commands
from discord.ext.commands import CommandError, Context
from cogbot import checks
from cogbot.cog_bot import CogBot
log = logging.getLogger(__name__)
class FaqConfig:
def __init__(self, **options):
self.database = options['database']
class Faq:
def __init__(self, bot: CogBot, ext: str):
self.bot = bot
options = bot.state.get_extension_state(ext)
self.config = FaqConfig(**options)
self.data = {}
def get_answer_by_key(self, key: str):
return self.data.get(key)
def get_all_keys(self):
return self.data.keys()
def reload_data(self):
log.info('Reloading FAQs from: {}'.format(self.config.database))
response = urllib.request.urlopen(self.config.database)
content = response.read().decode('utf8')
try:
data = json.loads(content)
except Exception as e:
raise CommandError('Failed to reload FAQs: {}'.format(e))
self.data = data
log.info('Successfully reloaded {} FAQs'.format(len(data)))
async def on_ready(self):
self.reload_data()
@commands.command(pass_context=True, name='faq')
async def cmd_faq(self, ctx: Context, *, key: str = ''):
if key:
answer = self.get_answer_by_key(key)
if answer:
await self.bot.say(answer)
else:
await self.bot.add_reaction(ctx.message, u'🤷')
else:
await self.bot.say(', '.join(self.get_all_keys()))
@checks.is_manager()
@commands.command(pass_context=True, name='faqreload', hidden=True)
async def cmd_faqreload(self, ctx: Context):
try:
self.reload_data()
await self.bot.react_success(ctx)
except:
await self.bot.react_failure(ctx)
def setup(bot):
bot.add_cog(Faq(bot, __name__))
|
<commit_before><commit_msg>Implement basic FAQ command with remote config<commit_after>
|
import json
import logging
import urllib.request
from discord.ext import commands
from discord.ext.commands import CommandError, Context
from cogbot import checks
from cogbot.cog_bot import CogBot
log = logging.getLogger(__name__)
class FaqConfig:
def __init__(self, **options):
self.database = options['database']
class Faq:
def __init__(self, bot: CogBot, ext: str):
self.bot = bot
options = bot.state.get_extension_state(ext)
self.config = FaqConfig(**options)
self.data = {}
def get_answer_by_key(self, key: str):
return self.data.get(key)
def get_all_keys(self):
return self.data.keys()
def reload_data(self):
log.info('Reloading FAQs from: {}'.format(self.config.database))
response = urllib.request.urlopen(self.config.database)
content = response.read().decode('utf8')
try:
data = json.loads(content)
except Exception as e:
raise CommandError('Failed to reload FAQs: {}'.format(e))
self.data = data
log.info('Successfully reloaded {} FAQs'.format(len(data)))
async def on_ready(self):
self.reload_data()
@commands.command(pass_context=True, name='faq')
async def cmd_faq(self, ctx: Context, *, key: str = ''):
if key:
answer = self.get_answer_by_key(key)
if answer:
await self.bot.say(answer)
else:
await self.bot.add_reaction(ctx.message, u'🤷')
else:
await self.bot.say(', '.join(self.get_all_keys()))
@checks.is_manager()
@commands.command(pass_context=True, name='faqreload', hidden=True)
async def cmd_faqreload(self, ctx: Context):
try:
self.reload_data()
await self.bot.react_success(ctx)
except:
await self.bot.react_failure(ctx)
def setup(bot):
bot.add_cog(Faq(bot, __name__))
|
Implement basic FAQ command with remote configimport json
import logging
import urllib.request
from discord.ext import commands
from discord.ext.commands import CommandError, Context
from cogbot import checks
from cogbot.cog_bot import CogBot
log = logging.getLogger(__name__)
class FaqConfig:
def __init__(self, **options):
self.database = options['database']
class Faq:
def __init__(self, bot: CogBot, ext: str):
self.bot = bot
options = bot.state.get_extension_state(ext)
self.config = FaqConfig(**options)
self.data = {}
def get_answer_by_key(self, key: str):
return self.data.get(key)
def get_all_keys(self):
return self.data.keys()
def reload_data(self):
log.info('Reloading FAQs from: {}'.format(self.config.database))
response = urllib.request.urlopen(self.config.database)
content = response.read().decode('utf8')
try:
data = json.loads(content)
except Exception as e:
raise CommandError('Failed to reload FAQs: {}'.format(e))
self.data = data
log.info('Successfully reloaded {} FAQs'.format(len(data)))
async def on_ready(self):
self.reload_data()
@commands.command(pass_context=True, name='faq')
async def cmd_faq(self, ctx: Context, *, key: str = ''):
if key:
answer = self.get_answer_by_key(key)
if answer:
await self.bot.say(answer)
else:
await self.bot.add_reaction(ctx.message, u'🤷')
else:
await self.bot.say(', '.join(self.get_all_keys()))
@checks.is_manager()
@commands.command(pass_context=True, name='faqreload', hidden=True)
async def cmd_faqreload(self, ctx: Context):
try:
self.reload_data()
await self.bot.react_success(ctx)
except:
await self.bot.react_failure(ctx)
def setup(bot):
bot.add_cog(Faq(bot, __name__))
|
<commit_before><commit_msg>Implement basic FAQ command with remote config<commit_after>import json
import logging
import urllib.request
from discord.ext import commands
from discord.ext.commands import CommandError, Context
from cogbot import checks
from cogbot.cog_bot import CogBot
log = logging.getLogger(__name__)
class FaqConfig:
def __init__(self, **options):
self.database = options['database']
class Faq:
def __init__(self, bot: CogBot, ext: str):
self.bot = bot
options = bot.state.get_extension_state(ext)
self.config = FaqConfig(**options)
self.data = {}
def get_answer_by_key(self, key: str):
return self.data.get(key)
def get_all_keys(self):
return self.data.keys()
def reload_data(self):
log.info('Reloading FAQs from: {}'.format(self.config.database))
response = urllib.request.urlopen(self.config.database)
content = response.read().decode('utf8')
try:
data = json.loads(content)
except Exception as e:
raise CommandError('Failed to reload FAQs: {}'.format(e))
self.data = data
log.info('Successfully reloaded {} FAQs'.format(len(data)))
async def on_ready(self):
self.reload_data()
@commands.command(pass_context=True, name='faq')
async def cmd_faq(self, ctx: Context, *, key: str = ''):
if key:
answer = self.get_answer_by_key(key)
if answer:
await self.bot.say(answer)
else:
await self.bot.add_reaction(ctx.message, u'🤷')
else:
await self.bot.say(', '.join(self.get_all_keys()))
@checks.is_manager()
@commands.command(pass_context=True, name='faqreload', hidden=True)
async def cmd_faqreload(self, ctx: Context):
try:
self.reload_data()
await self.bot.react_success(ctx)
except:
await self.bot.react_failure(ctx)
def setup(bot):
bot.add_cog(Faq(bot, __name__))
|
|
6fd5333f1650cb12e81601bb73aefa9060e9c441
|
closures/closure-simple-example.py
|
closures/closure-simple-example.py
|
def printMsg(msg):
#This is the main outer function that encloses printer function
#this function is nested inside the printMsg function
def printer():
print(msg)
#We return a function when printMsg function is called
return printer
#here we call the function printMsg and store its output in another variable
another = printMsg("Hello")
another()
#we then call another and the output is
# Hello
#this is a simple example of closure that even when the execution
#of printMsg was finished, the message was still remembered and when we call
#another, the message Hello was displayed
|
Create this file as a simple example of closure
|
Create this file as a simple example of closure
i have added full working of example with comments so that whoever reads this can understand closure. This is the simplest closure example i can find.
|
Python
|
apache-2.0
|
Aneesh540/python-projects
|
Create this file as a simple example of closure
i have added full working of example with comments so that whoever reads this can understand closure. This is the simplest closure example i can find.
|
def printMsg(msg):
#This is the main outer function that encloses printer function
#this function is nested inside the printMsg function
def printer():
print(msg)
#We return a function when printMsg function is called
return printer
#here we call the function printMsg and store its output in another variable
another = printMsg("Hello")
another()
#we then call another and the output is
# Hello
#this is a simple example of closure that even when the execution
#of printMsg was finished, the message was still remembered and when we call
#another, the message Hello was displayed
|
<commit_before><commit_msg>Create this file as a simple example of closure
i have added full working of example with comments so that whoever reads this can understand closure. This is the simplest closure example i can find.<commit_after>
|
def printMsg(msg):
#This is the main outer function that encloses printer function
#this function is nested inside the printMsg function
def printer():
print(msg)
#We return a function when printMsg function is called
return printer
#here we call the function printMsg and store its output in another variable
another = printMsg("Hello")
another()
#we then call another and the output is
# Hello
#this is a simple example of closure that even when the execution
#of printMsg was finished, the message was still remembered and when we call
#another, the message Hello was displayed
|
Create this file as a simple example of closure
i have added full working of example with comments so that whoever reads this can understand closure. This is the simplest closure example i can find.def printMsg(msg):
#This is the main outer function that encloses printer function
#this function is nested inside the printMsg function
def printer():
print(msg)
#We return a function when printMsg function is called
return printer
#here we call the function printMsg and store its output in another variable
another = printMsg("Hello")
another()
#we then call another and the output is
# Hello
#this is a simple example of closure that even when the execution
#of printMsg was finished, the message was still remembered and when we call
#another, the message Hello was displayed
|
<commit_before><commit_msg>Create this file as a simple example of closure
i have added full working of example with comments so that whoever reads this can understand closure. This is the simplest closure example i can find.<commit_after>def printMsg(msg):
#This is the main outer function that encloses printer function
#this function is nested inside the printMsg function
def printer():
print(msg)
#We return a function when printMsg function is called
return printer
#here we call the function printMsg and store its output in another variable
another = printMsg("Hello")
another()
#we then call another and the output is
# Hello
#this is a simple example of closure that even when the execution
#of printMsg was finished, the message was still remembered and when we call
#another, the message Hello was displayed
|
|
2fb4a2db2486248f4ffed867defd3ffec0cc0e12
|
get_lexer.py
|
get_lexer.py
|
#!/usr/bin/python
from pygments.lexers import (get_all_lexers)
for lexname, aliases, _, mimetypes in get_all_lexers():
print "%s" % (lexname)
|
Add small script to retrieve lexers from pygments
|
Add small script to retrieve lexers from pygments
|
Python
|
agpl-3.0
|
formorer/paste.pl,shlomif/paste.debian.net-paste.pl,formorer/paste.pl,shlomif/paste.debian.net-paste.pl,formorer/paste.pl
|
Add small script to retrieve lexers from pygments
|
#!/usr/bin/python
from pygments.lexers import (get_all_lexers)
for lexname, aliases, _, mimetypes in get_all_lexers():
print "%s" % (lexname)
|
<commit_before><commit_msg>Add small script to retrieve lexers from pygments<commit_after>
|
#!/usr/bin/python
from pygments.lexers import (get_all_lexers)
for lexname, aliases, _, mimetypes in get_all_lexers():
print "%s" % (lexname)
|
Add small script to retrieve lexers from pygments#!/usr/bin/python
from pygments.lexers import (get_all_lexers)
for lexname, aliases, _, mimetypes in get_all_lexers():
print "%s" % (lexname)
|
<commit_before><commit_msg>Add small script to retrieve lexers from pygments<commit_after>#!/usr/bin/python
from pygments.lexers import (get_all_lexers)
for lexname, aliases, _, mimetypes in get_all_lexers():
print "%s" % (lexname)
|
|
802c48c67423d7db2a6e056606c4bbba9d24f842
|
guestbook.py
|
guestbook.py
|
# -*- coding: utf-8 -*-
import shelve
DATA_FILE = 'guestbook.dat'
def save_data(name, comment, create_at):
"""投稿データを保存します
"""
database = shelve.open(DATA_FILE)
if 'greeting_list' not in database:
greeting_list = []
else:
greeting_list = database['greeting_list']
greeting_list.insert(0, {
'name': name,
'comment': comment,
'create_at': create_at,
})
database['greeting_list'] = greeting_list
database.close()
def load_data():
"""投稿されたデータを返します
"""
database = shelve.open(DATA_FILE)
greeting_list = database.get('greeting_list', [])
database.close()
return greeting_list
|
Implement top level function 'save_data()', 'load_data()'
|
Implement top level function 'save_data()', 'load_data()'
|
Python
|
bsd-3-clause
|
raimon49/pypro2-guestbook-webapp,raimon49/pypro2-guestbook-webapp
|
Implement top level function 'save_data()', 'load_data()'
|
# -*- coding: utf-8 -*-
import shelve
DATA_FILE = 'guestbook.dat'
def save_data(name, comment, create_at):
"""投稿データを保存します
"""
database = shelve.open(DATA_FILE)
if 'greeting_list' not in database:
greeting_list = []
else:
greeting_list = database['greeting_list']
greeting_list.insert(0, {
'name': name,
'comment': comment,
'create_at': create_at,
})
database['greeting_list'] = greeting_list
database.close()
def load_data():
"""投稿されたデータを返します
"""
database = shelve.open(DATA_FILE)
greeting_list = database.get('greeting_list', [])
database.close()
return greeting_list
|
<commit_before><commit_msg>Implement top level function 'save_data()', 'load_data()'<commit_after>
|
# -*- coding: utf-8 -*-
import shelve
DATA_FILE = 'guestbook.dat'
def save_data(name, comment, create_at):
"""投稿データを保存します
"""
database = shelve.open(DATA_FILE)
if 'greeting_list' not in database:
greeting_list = []
else:
greeting_list = database['greeting_list']
greeting_list.insert(0, {
'name': name,
'comment': comment,
'create_at': create_at,
})
database['greeting_list'] = greeting_list
database.close()
def load_data():
"""投稿されたデータを返します
"""
database = shelve.open(DATA_FILE)
greeting_list = database.get('greeting_list', [])
database.close()
return greeting_list
|
Implement top level function 'save_data()', 'load_data()'# -*- coding: utf-8 -*-
import shelve
DATA_FILE = 'guestbook.dat'
def save_data(name, comment, create_at):
"""投稿データを保存します
"""
database = shelve.open(DATA_FILE)
if 'greeting_list' not in database:
greeting_list = []
else:
greeting_list = database['greeting_list']
greeting_list.insert(0, {
'name': name,
'comment': comment,
'create_at': create_at,
})
database['greeting_list'] = greeting_list
database.close()
def load_data():
"""投稿されたデータを返します
"""
database = shelve.open(DATA_FILE)
greeting_list = database.get('greeting_list', [])
database.close()
return greeting_list
|
<commit_before><commit_msg>Implement top level function 'save_data()', 'load_data()'<commit_after># -*- coding: utf-8 -*-
import shelve
DATA_FILE = 'guestbook.dat'
def save_data(name, comment, create_at):
"""投稿データを保存します
"""
database = shelve.open(DATA_FILE)
if 'greeting_list' not in database:
greeting_list = []
else:
greeting_list = database['greeting_list']
greeting_list.insert(0, {
'name': name,
'comment': comment,
'create_at': create_at,
})
database['greeting_list'] = greeting_list
database.close()
def load_data():
"""投稿されたデータを返します
"""
database = shelve.open(DATA_FILE)
greeting_list = database.get('greeting_list', [])
database.close()
return greeting_list
|
|
558fb57d2ce4a74f8cc1dbbc245f54f6dd1e55d5
|
scripts/text-files.py
|
scripts/text-files.py
|
from __future__ import print_function
import sys
from pyspark.sql import SparkSession, Row
from pyspark.sql.functions import col
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: text-files.py <input> <output>", file=sys.stderr)
exit(-1)
groupField = 'book'
spark = SparkSession.builder.appName('Load Whole Text Files').getOrCreate()
spark.sparkContext.setLogLevel('WARN')
spark.sparkContext._jsc.hadoopConfiguration()\
.set('mapreduce.input.fileinputformat.input.dir.recursive', 'true')
spark.sparkContext.wholeTextFiles(sys.argv[1])\
.map(lambda f: Row(id=f[0], text=f[1]))\
.toDF()\
.withColumn(groupField, col('id')) \
.write.save(sys.argv[2])
spark.stop()
|
Add text-file.py to make minimal records from whole text files.
|
Add text-file.py to make minimal records from whole text files.
|
Python
|
apache-2.0
|
ViralTexts/vt-passim,ViralTexts/vt-passim,ViralTexts/vt-passim
|
Add text-file.py to make minimal records from whole text files.
|
from __future__ import print_function
import sys
from pyspark.sql import SparkSession, Row
from pyspark.sql.functions import col
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: text-files.py <input> <output>", file=sys.stderr)
exit(-1)
groupField = 'book'
spark = SparkSession.builder.appName('Load Whole Text Files').getOrCreate()
spark.sparkContext.setLogLevel('WARN')
spark.sparkContext._jsc.hadoopConfiguration()\
.set('mapreduce.input.fileinputformat.input.dir.recursive', 'true')
spark.sparkContext.wholeTextFiles(sys.argv[1])\
.map(lambda f: Row(id=f[0], text=f[1]))\
.toDF()\
.withColumn(groupField, col('id')) \
.write.save(sys.argv[2])
spark.stop()
|
<commit_before><commit_msg>Add text-file.py to make minimal records from whole text files.<commit_after>
|
from __future__ import print_function
import sys
from pyspark.sql import SparkSession, Row
from pyspark.sql.functions import col
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: text-files.py <input> <output>", file=sys.stderr)
exit(-1)
groupField = 'book'
spark = SparkSession.builder.appName('Load Whole Text Files').getOrCreate()
spark.sparkContext.setLogLevel('WARN')
spark.sparkContext._jsc.hadoopConfiguration()\
.set('mapreduce.input.fileinputformat.input.dir.recursive', 'true')
spark.sparkContext.wholeTextFiles(sys.argv[1])\
.map(lambda f: Row(id=f[0], text=f[1]))\
.toDF()\
.withColumn(groupField, col('id')) \
.write.save(sys.argv[2])
spark.stop()
|
Add text-file.py to make minimal records from whole text files.from __future__ import print_function
import sys
from pyspark.sql import SparkSession, Row
from pyspark.sql.functions import col
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: text-files.py <input> <output>", file=sys.stderr)
exit(-1)
groupField = 'book'
spark = SparkSession.builder.appName('Load Whole Text Files').getOrCreate()
spark.sparkContext.setLogLevel('WARN')
spark.sparkContext._jsc.hadoopConfiguration()\
.set('mapreduce.input.fileinputformat.input.dir.recursive', 'true')
spark.sparkContext.wholeTextFiles(sys.argv[1])\
.map(lambda f: Row(id=f[0], text=f[1]))\
.toDF()\
.withColumn(groupField, col('id')) \
.write.save(sys.argv[2])
spark.stop()
|
<commit_before><commit_msg>Add text-file.py to make minimal records from whole text files.<commit_after>from __future__ import print_function
import sys
from pyspark.sql import SparkSession, Row
from pyspark.sql.functions import col
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: text-files.py <input> <output>", file=sys.stderr)
exit(-1)
groupField = 'book'
spark = SparkSession.builder.appName('Load Whole Text Files').getOrCreate()
spark.sparkContext.setLogLevel('WARN')
spark.sparkContext._jsc.hadoopConfiguration()\
.set('mapreduce.input.fileinputformat.input.dir.recursive', 'true')
spark.sparkContext.wholeTextFiles(sys.argv[1])\
.map(lambda f: Row(id=f[0], text=f[1]))\
.toDF()\
.withColumn(groupField, col('id')) \
.write.save(sys.argv[2])
spark.stop()
|
|
c12cf495e8d548b3b79cd994bb10369f4da6c146
|
test/benchmark_report.py
|
test/benchmark_report.py
|
'''
Make a report showing the speed of some critical commands
Check whether it is fast enough for your application
'''
from __future__ import print_function
from unrealcv import client
import docker_util
import time
import pytest
def run_command(cmd, num):
for _ in range(num):
client.request(cmd)
if __name__ == '__main__':
print('Start docker')
# Initialize the environment
docker_util.runner.start()
client.connect()
commands = [
('vget /unrealcv/status', 1000),
('vset /unrealcv/sync test message', 1000),
('vget /camera/0/lit', 100),
]
for (cmd, num) in commands:
tic = time.time()
run_command(cmd, num)
toc = time.time()
elapse = toc - tic
print('Run %s for %d, time = %.2f, %d FPS' % (cmd, num, elapse, float(num) / elapse))
docker_util.runner.stop()
|
Add a benchmark report to show the speed performance.
|
Add a benchmark report to show the speed performance.
|
Python
|
mit
|
unrealcv/unrealcv,unrealcv/unrealcv,unrealcv/unrealcv,unrealcv/unrealcv,unrealcv/unrealcv
|
Add a benchmark report to show the speed performance.
|
'''
Make a report showing the speed of some critical commands
Check whether it is fast enough for your application
'''
from __future__ import print_function
from unrealcv import client
import docker_util
import time
import pytest
def run_command(cmd, num):
for _ in range(num):
client.request(cmd)
if __name__ == '__main__':
print('Start docker')
# Initialize the environment
docker_util.runner.start()
client.connect()
commands = [
('vget /unrealcv/status', 1000),
('vset /unrealcv/sync test message', 1000),
('vget /camera/0/lit', 100),
]
for (cmd, num) in commands:
tic = time.time()
run_command(cmd, num)
toc = time.time()
elapse = toc - tic
print('Run %s for %d, time = %.2f, %d FPS' % (cmd, num, elapse, float(num) / elapse))
docker_util.runner.stop()
|
<commit_before><commit_msg>Add a benchmark report to show the speed performance.<commit_after>
|
'''
Make a report showing the speed of some critical commands
Check whether it is fast enough for your application
'''
from __future__ import print_function
from unrealcv import client
import docker_util
import time
import pytest
def run_command(cmd, num):
for _ in range(num):
client.request(cmd)
if __name__ == '__main__':
print('Start docker')
# Initialize the environment
docker_util.runner.start()
client.connect()
commands = [
('vget /unrealcv/status', 1000),
('vset /unrealcv/sync test message', 1000),
('vget /camera/0/lit', 100),
]
for (cmd, num) in commands:
tic = time.time()
run_command(cmd, num)
toc = time.time()
elapse = toc - tic
print('Run %s for %d, time = %.2f, %d FPS' % (cmd, num, elapse, float(num) / elapse))
docker_util.runner.stop()
|
Add a benchmark report to show the speed performance.'''
Make a report showing the speed of some critical commands
Check whether it is fast enough for your application
'''
from __future__ import print_function
from unrealcv import client
import docker_util
import time
import pytest
def run_command(cmd, num):
for _ in range(num):
client.request(cmd)
if __name__ == '__main__':
print('Start docker')
# Initialize the environment
docker_util.runner.start()
client.connect()
commands = [
('vget /unrealcv/status', 1000),
('vset /unrealcv/sync test message', 1000),
('vget /camera/0/lit', 100),
]
for (cmd, num) in commands:
tic = time.time()
run_command(cmd, num)
toc = time.time()
elapse = toc - tic
print('Run %s for %d, time = %.2f, %d FPS' % (cmd, num, elapse, float(num) / elapse))
docker_util.runner.stop()
|
<commit_before><commit_msg>Add a benchmark report to show the speed performance.<commit_after>'''
Make a report showing the speed of some critical commands
Check whether it is fast enough for your application
'''
from __future__ import print_function
from unrealcv import client
import docker_util
import time
import pytest
def run_command(cmd, num):
for _ in range(num):
client.request(cmd)
if __name__ == '__main__':
print('Start docker')
# Initialize the environment
docker_util.runner.start()
client.connect()
commands = [
('vget /unrealcv/status', 1000),
('vset /unrealcv/sync test message', 1000),
('vget /camera/0/lit', 100),
]
for (cmd, num) in commands:
tic = time.time()
run_command(cmd, num)
toc = time.time()
elapse = toc - tic
print('Run %s for %d, time = %.2f, %d FPS' % (cmd, num, elapse, float(num) / elapse))
docker_util.runner.stop()
|
|
b13cc8cc76bfa46e7a4fdcff664639fb18a12836
|
trex/filters.py
|
trex/filters.py
|
# -*- coding: utf-8 -*-
#
# (c) 2014 Bjoern Ricks <bjoern.ricks@gmail.com>
#
# See LICENSE comming with the source of 'trex' for details.
#
import django_filters
from trex.models import Entry
class EntryFilter(django_filters.FilterSet):
from_date = django_filters.DateFilter(name="date", lookup_type="gte")
to_date = django_filters.DateFilter(name="date", lookup_type="lte")
class Meta:
model = Entry
fields = ["from_date", "to_date", "state"]
|
Add a filter class for Entries
|
Add a filter class for Entries
|
Python
|
mit
|
bjoernricks/trex,bjoernricks/trex
|
Add a filter class for Entries
|
# -*- coding: utf-8 -*-
#
# (c) 2014 Bjoern Ricks <bjoern.ricks@gmail.com>
#
# See LICENSE comming with the source of 'trex' for details.
#
import django_filters
from trex.models import Entry
class EntryFilter(django_filters.FilterSet):
from_date = django_filters.DateFilter(name="date", lookup_type="gte")
to_date = django_filters.DateFilter(name="date", lookup_type="lte")
class Meta:
model = Entry
fields = ["from_date", "to_date", "state"]
|
<commit_before><commit_msg>Add a filter class for Entries<commit_after>
|
# -*- coding: utf-8 -*-
#
# (c) 2014 Bjoern Ricks <bjoern.ricks@gmail.com>
#
# See LICENSE comming with the source of 'trex' for details.
#
import django_filters
from trex.models import Entry
class EntryFilter(django_filters.FilterSet):
from_date = django_filters.DateFilter(name="date", lookup_type="gte")
to_date = django_filters.DateFilter(name="date", lookup_type="lte")
class Meta:
model = Entry
fields = ["from_date", "to_date", "state"]
|
Add a filter class for Entries# -*- coding: utf-8 -*-
#
# (c) 2014 Bjoern Ricks <bjoern.ricks@gmail.com>
#
# See LICENSE comming with the source of 'trex' for details.
#
import django_filters
from trex.models import Entry
class EntryFilter(django_filters.FilterSet):
from_date = django_filters.DateFilter(name="date", lookup_type="gte")
to_date = django_filters.DateFilter(name="date", lookup_type="lte")
class Meta:
model = Entry
fields = ["from_date", "to_date", "state"]
|
<commit_before><commit_msg>Add a filter class for Entries<commit_after># -*- coding: utf-8 -*-
#
# (c) 2014 Bjoern Ricks <bjoern.ricks@gmail.com>
#
# See LICENSE comming with the source of 'trex' for details.
#
import django_filters
from trex.models import Entry
class EntryFilter(django_filters.FilterSet):
from_date = django_filters.DateFilter(name="date", lookup_type="gte")
to_date = django_filters.DateFilter(name="date", lookup_type="lte")
class Meta:
model = Entry
fields = ["from_date", "to_date", "state"]
|
|
e92c047dd6ba07d5ac4b0b79403d0c3b28e9f0d8
|
pombola/core/management/commands/core_end_positions.py
|
pombola/core/management/commands/core_end_positions.py
|
from optparse import make_option
import sys
from django.core.management.base import NoArgsCommand, CommandError
from django_date_extensions.fields import ApproximateDate
from pombola.core.models import Person, Position, PositionTitle, Place, Organisation
def yyyymmdd_to_approx(yyyymmdd):
year, month, day = map(int, yyyymmdd.split('-'))
return ApproximateDate(year, month, day)
class Command(NoArgsCommand):
help = 'End positions which meet the criteria'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
make_option('--end-date', dest="end-date", help="The end date to apply to matching positions"),
make_option('--title', dest="title", help="The title to match positions"),
make_option('--organisation', dest="organisation", help="The organisation to match positions"),
make_option('--organisation-kind', dest="organisation-kind", help="The kind of organisation to match positions")
)
def handle_noargs(self, **options):
positions = Position.objects
if options['title']:
print 'Title filter: ' + options['title']
positions = positions.filter(title__slug = options['title'])
if options['organisation']:
print 'Organisation filter: ' + options['organisation']
positions = positions.filter(organisation__slug = options['organisation'])
if options['organisation-kind']:
print 'Organisation kind filter: ' + options['organisation-kind']
positions = positions.filter(organisation__kind__slug = options['organisation-kind'])
end_date = yyyymmdd_to_approx(options['end-date'])
for position in positions.currently_active():
print " Ending %s" % position
position.end_date = end_date
if options['commit']:
position.save()
print 'Ended a total of ' + str(positions.count()) + ' positions.'
|
Add script to end positions which meet the given criteria
|
Add script to end positions which meet the given criteria
|
Python
|
agpl-3.0
|
geoffkilpin/pombola,hzj123/56th,geoffkilpin/pombola,geoffkilpin/pombola,ken-muturi/pombola,ken-muturi/pombola,ken-muturi/pombola,geoffkilpin/pombola,hzj123/56th,mysociety/pombola,patricmutwiri/pombola,mysociety/pombola,geoffkilpin/pombola,hzj123/56th,hzj123/56th,mysociety/pombola,ken-muturi/pombola,mysociety/pombola,patricmutwiri/pombola,hzj123/56th,mysociety/pombola,patricmutwiri/pombola,patricmutwiri/pombola,ken-muturi/pombola,patricmutwiri/pombola,patricmutwiri/pombola,mysociety/pombola,geoffkilpin/pombola,ken-muturi/pombola,hzj123/56th
|
Add script to end positions which meet the given criteria
|
from optparse import make_option
import sys
from django.core.management.base import NoArgsCommand, CommandError
from django_date_extensions.fields import ApproximateDate
from pombola.core.models import Person, Position, PositionTitle, Place, Organisation
def yyyymmdd_to_approx(yyyymmdd):
year, month, day = map(int, yyyymmdd.split('-'))
return ApproximateDate(year, month, day)
class Command(NoArgsCommand):
help = 'End positions which meet the criteria'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
make_option('--end-date', dest="end-date", help="The end date to apply to matching positions"),
make_option('--title', dest="title", help="The title to match positions"),
make_option('--organisation', dest="organisation", help="The organisation to match positions"),
make_option('--organisation-kind', dest="organisation-kind", help="The kind of organisation to match positions")
)
def handle_noargs(self, **options):
positions = Position.objects
if options['title']:
print 'Title filter: ' + options['title']
positions = positions.filter(title__slug = options['title'])
if options['organisation']:
print 'Organisation filter: ' + options['organisation']
positions = positions.filter(organisation__slug = options['organisation'])
if options['organisation-kind']:
print 'Organisation kind filter: ' + options['organisation-kind']
positions = positions.filter(organisation__kind__slug = options['organisation-kind'])
end_date = yyyymmdd_to_approx(options['end-date'])
for position in positions.currently_active():
print " Ending %s" % position
position.end_date = end_date
if options['commit']:
position.save()
print 'Ended a total of ' + str(positions.count()) + ' positions.'
|
<commit_before><commit_msg>Add script to end positions which meet the given criteria<commit_after>
|
from optparse import make_option
import sys
from django.core.management.base import NoArgsCommand, CommandError
from django_date_extensions.fields import ApproximateDate
from pombola.core.models import Person, Position, PositionTitle, Place, Organisation
def yyyymmdd_to_approx(yyyymmdd):
year, month, day = map(int, yyyymmdd.split('-'))
return ApproximateDate(year, month, day)
class Command(NoArgsCommand):
help = 'End positions which meet the criteria'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
make_option('--end-date', dest="end-date", help="The end date to apply to matching positions"),
make_option('--title', dest="title", help="The title to match positions"),
make_option('--organisation', dest="organisation", help="The organisation to match positions"),
make_option('--organisation-kind', dest="organisation-kind", help="The kind of organisation to match positions")
)
def handle_noargs(self, **options):
positions = Position.objects
if options['title']:
print 'Title filter: ' + options['title']
positions = positions.filter(title__slug = options['title'])
if options['organisation']:
print 'Organisation filter: ' + options['organisation']
positions = positions.filter(organisation__slug = options['organisation'])
if options['organisation-kind']:
print 'Organisation kind filter: ' + options['organisation-kind']
positions = positions.filter(organisation__kind__slug = options['organisation-kind'])
end_date = yyyymmdd_to_approx(options['end-date'])
for position in positions.currently_active():
print " Ending %s" % position
position.end_date = end_date
if options['commit']:
position.save()
print 'Ended a total of ' + str(positions.count()) + ' positions.'
|
Add script to end positions which meet the given criteriafrom optparse import make_option
import sys
from django.core.management.base import NoArgsCommand, CommandError
from django_date_extensions.fields import ApproximateDate
from pombola.core.models import Person, Position, PositionTitle, Place, Organisation
def yyyymmdd_to_approx(yyyymmdd):
year, month, day = map(int, yyyymmdd.split('-'))
return ApproximateDate(year, month, day)
class Command(NoArgsCommand):
help = 'End positions which meet the criteria'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
make_option('--end-date', dest="end-date", help="The end date to apply to matching positions"),
make_option('--title', dest="title", help="The title to match positions"),
make_option('--organisation', dest="organisation", help="The organisation to match positions"),
make_option('--organisation-kind', dest="organisation-kind", help="The kind of organisation to match positions")
)
def handle_noargs(self, **options):
positions = Position.objects
if options['title']:
print 'Title filter: ' + options['title']
positions = positions.filter(title__slug = options['title'])
if options['organisation']:
print 'Organisation filter: ' + options['organisation']
positions = positions.filter(organisation__slug = options['organisation'])
if options['organisation-kind']:
print 'Organisation kind filter: ' + options['organisation-kind']
positions = positions.filter(organisation__kind__slug = options['organisation-kind'])
end_date = yyyymmdd_to_approx(options['end-date'])
for position in positions.currently_active():
print " Ending %s" % position
position.end_date = end_date
if options['commit']:
position.save()
print 'Ended a total of ' + str(positions.count()) + ' positions.'
|
<commit_before><commit_msg>Add script to end positions which meet the given criteria<commit_after>from optparse import make_option
import sys
from django.core.management.base import NoArgsCommand, CommandError
from django_date_extensions.fields import ApproximateDate
from pombola.core.models import Person, Position, PositionTitle, Place, Organisation
def yyyymmdd_to_approx(yyyymmdd):
year, month, day = map(int, yyyymmdd.split('-'))
return ApproximateDate(year, month, day)
class Command(NoArgsCommand):
help = 'End positions which meet the criteria'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
make_option('--end-date', dest="end-date", help="The end date to apply to matching positions"),
make_option('--title', dest="title", help="The title to match positions"),
make_option('--organisation', dest="organisation", help="The organisation to match positions"),
make_option('--organisation-kind', dest="organisation-kind", help="The kind of organisation to match positions")
)
def handle_noargs(self, **options):
positions = Position.objects
if options['title']:
print 'Title filter: ' + options['title']
positions = positions.filter(title__slug = options['title'])
if options['organisation']:
print 'Organisation filter: ' + options['organisation']
positions = positions.filter(organisation__slug = options['organisation'])
if options['organisation-kind']:
print 'Organisation kind filter: ' + options['organisation-kind']
positions = positions.filter(organisation__kind__slug = options['organisation-kind'])
end_date = yyyymmdd_to_approx(options['end-date'])
for position in positions.currently_active():
print " Ending %s" % position
position.end_date = end_date
if options['commit']:
position.save()
print 'Ended a total of ' + str(positions.count()) + ' positions.'
|
|
338bf2188bfe54f1a73ac565ccca65b056aaf616
|
pombola/kenya/management/commands/kenya_hide_people.py
|
pombola/kenya/management/commands/kenya_hide_people.py
|
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django.db.models import Q
from pombola.core.models import Person, Position
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
for possible_person in Person.objects.filter(
Q(position__title__slug='governor') |
Q(position__title__slug__contains='aspirant') |
Q(position__title__slug='ward-representative')):
interesting_positions = possible_person.position_set.filter(
Q(title__slug='mp', organisation__slug='parliament') |
Q(title__slug='aspirant-president', organisation__slug='republic-of-kenya') |
Q(title__slug='deputy-president-aspirant', organisation__slug='republic-of-kenya') |
Q(title__slug='president', organisation__slug='republic-of-kenya') |
Q(title__slug='vice-president', organisation__slug='cabinet') |
Q(title__slug='vice-president-of-kenya', organisation__slug='cabinet') |
Q(organisation__slug='cabinet') |
Q(title__slug='senator') |
Q(title__name='Minister')
)
if interesting_positions:
print "Ignoring", possible_person, "since they have the following positions:"
for p in interesting_positions:
print " ", p
else:
if options['commit']:
print "Hiding", possible_person, "; all their positions:"
possible_person.hidden = True
possible_person.save()
else:
print "Would hide", possible_person, " (no --commit); all their positions:"
for p in possible_person.position_set.all():
print " ", p
|
Add a script for hiding people on Mzalendo, as requested
|
KE: Add a script for hiding people on Mzalendo, as requested
|
Python
|
agpl-3.0
|
mysociety/pombola,mysociety/pombola,hzj123/56th,geoffkilpin/pombola,mysociety/pombola,ken-muturi/pombola,geoffkilpin/pombola,patricmutwiri/pombola,patricmutwiri/pombola,hzj123/56th,geoffkilpin/pombola,patricmutwiri/pombola,patricmutwiri/pombola,patricmutwiri/pombola,ken-muturi/pombola,hzj123/56th,geoffkilpin/pombola,patricmutwiri/pombola,geoffkilpin/pombola,hzj123/56th,mysociety/pombola,ken-muturi/pombola,geoffkilpin/pombola,ken-muturi/pombola,ken-muturi/pombola,hzj123/56th,mysociety/pombola,ken-muturi/pombola,hzj123/56th,mysociety/pombola
|
KE: Add a script for hiding people on Mzalendo, as requested
|
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django.db.models import Q
from pombola.core.models import Person, Position
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
for possible_person in Person.objects.filter(
Q(position__title__slug='governor') |
Q(position__title__slug__contains='aspirant') |
Q(position__title__slug='ward-representative')):
interesting_positions = possible_person.position_set.filter(
Q(title__slug='mp', organisation__slug='parliament') |
Q(title__slug='aspirant-president', organisation__slug='republic-of-kenya') |
Q(title__slug='deputy-president-aspirant', organisation__slug='republic-of-kenya') |
Q(title__slug='president', organisation__slug='republic-of-kenya') |
Q(title__slug='vice-president', organisation__slug='cabinet') |
Q(title__slug='vice-president-of-kenya', organisation__slug='cabinet') |
Q(organisation__slug='cabinet') |
Q(title__slug='senator') |
Q(title__name='Minister')
)
if interesting_positions:
print "Ignoring", possible_person, "since they have the following positions:"
for p in interesting_positions:
print " ", p
else:
if options['commit']:
print "Hiding", possible_person, "; all their positions:"
possible_person.hidden = True
possible_person.save()
else:
print "Would hide", possible_person, " (no --commit); all their positions:"
for p in possible_person.position_set.all():
print " ", p
|
<commit_before><commit_msg>KE: Add a script for hiding people on Mzalendo, as requested<commit_after>
|
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django.db.models import Q
from pombola.core.models import Person, Position
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
for possible_person in Person.objects.filter(
Q(position__title__slug='governor') |
Q(position__title__slug__contains='aspirant') |
Q(position__title__slug='ward-representative')):
interesting_positions = possible_person.position_set.filter(
Q(title__slug='mp', organisation__slug='parliament') |
Q(title__slug='aspirant-president', organisation__slug='republic-of-kenya') |
Q(title__slug='deputy-president-aspirant', organisation__slug='republic-of-kenya') |
Q(title__slug='president', organisation__slug='republic-of-kenya') |
Q(title__slug='vice-president', organisation__slug='cabinet') |
Q(title__slug='vice-president-of-kenya', organisation__slug='cabinet') |
Q(organisation__slug='cabinet') |
Q(title__slug='senator') |
Q(title__name='Minister')
)
if interesting_positions:
print "Ignoring", possible_person, "since they have the following positions:"
for p in interesting_positions:
print " ", p
else:
if options['commit']:
print "Hiding", possible_person, "; all their positions:"
possible_person.hidden = True
possible_person.save()
else:
print "Would hide", possible_person, " (no --commit); all their positions:"
for p in possible_person.position_set.all():
print " ", p
|
KE: Add a script for hiding people on Mzalendo, as requestedfrom optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django.db.models import Q
from pombola.core.models import Person, Position
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
for possible_person in Person.objects.filter(
Q(position__title__slug='governor') |
Q(position__title__slug__contains='aspirant') |
Q(position__title__slug='ward-representative')):
interesting_positions = possible_person.position_set.filter(
Q(title__slug='mp', organisation__slug='parliament') |
Q(title__slug='aspirant-president', organisation__slug='republic-of-kenya') |
Q(title__slug='deputy-president-aspirant', organisation__slug='republic-of-kenya') |
Q(title__slug='president', organisation__slug='republic-of-kenya') |
Q(title__slug='vice-president', organisation__slug='cabinet') |
Q(title__slug='vice-president-of-kenya', organisation__slug='cabinet') |
Q(organisation__slug='cabinet') |
Q(title__slug='senator') |
Q(title__name='Minister')
)
if interesting_positions:
print "Ignoring", possible_person, "since they have the following positions:"
for p in interesting_positions:
print " ", p
else:
if options['commit']:
print "Hiding", possible_person, "; all their positions:"
possible_person.hidden = True
possible_person.save()
else:
print "Would hide", possible_person, " (no --commit); all their positions:"
for p in possible_person.position_set.all():
print " ", p
|
<commit_before><commit_msg>KE: Add a script for hiding people on Mzalendo, as requested<commit_after>from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django.db.models import Q
from pombola.core.models import Person, Position
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
for possible_person in Person.objects.filter(
Q(position__title__slug='governor') |
Q(position__title__slug__contains='aspirant') |
Q(position__title__slug='ward-representative')):
interesting_positions = possible_person.position_set.filter(
Q(title__slug='mp', organisation__slug='parliament') |
Q(title__slug='aspirant-president', organisation__slug='republic-of-kenya') |
Q(title__slug='deputy-president-aspirant', organisation__slug='republic-of-kenya') |
Q(title__slug='president', organisation__slug='republic-of-kenya') |
Q(title__slug='vice-president', organisation__slug='cabinet') |
Q(title__slug='vice-president-of-kenya', organisation__slug='cabinet') |
Q(organisation__slug='cabinet') |
Q(title__slug='senator') |
Q(title__name='Minister')
)
if interesting_positions:
print "Ignoring", possible_person, "since they have the following positions:"
for p in interesting_positions:
print " ", p
else:
if options['commit']:
print "Hiding", possible_person, "; all their positions:"
possible_person.hidden = True
possible_person.save()
else:
print "Would hide", possible_person, " (no --commit); all their positions:"
for p in possible_person.position_set.all():
print " ", p
|
|
4e2bcaa57bcf3bd05c40cf8723032845a20a5c60
|
py/minimum-index-sum-of-two-lists.py
|
py/minimum-index-sum-of-two-lists.py
|
from collections import defaultdict
class Solution(object):
def findRestaurant(self, list1, list2):
"""
:type list1: List[str]
:type list2: List[str]
:rtype: List[str]
"""
d1 = {x: i for (i, x) in enumerate(list1)}
min_idxes = []
min_idx_sum = len(list1) + len(list2)
for i, r in enumerate(list2):
if r in d1:
if i + d1[r] < min_idx_sum:
min_idx_sum = i + d1[r]
min_idxes = [r]
elif i + d1[r] == min_idx_sum:
min_idxes.append(r)
return min_idxes
|
Add py solution for 599. Minimum Index Sum of Two Lists
|
Add py solution for 599. Minimum Index Sum of Two Lists
599. Minimum Index Sum of Two Lists: https://leetcode.com/problems/minimum-index-sum-of-two-lists/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 599. Minimum Index Sum of Two Lists
599. Minimum Index Sum of Two Lists: https://leetcode.com/problems/minimum-index-sum-of-two-lists/
|
from collections import defaultdict
class Solution(object):
def findRestaurant(self, list1, list2):
"""
:type list1: List[str]
:type list2: List[str]
:rtype: List[str]
"""
d1 = {x: i for (i, x) in enumerate(list1)}
min_idxes = []
min_idx_sum = len(list1) + len(list2)
for i, r in enumerate(list2):
if r in d1:
if i + d1[r] < min_idx_sum:
min_idx_sum = i + d1[r]
min_idxes = [r]
elif i + d1[r] == min_idx_sum:
min_idxes.append(r)
return min_idxes
|
<commit_before><commit_msg>Add py solution for 599. Minimum Index Sum of Two Lists
599. Minimum Index Sum of Two Lists: https://leetcode.com/problems/minimum-index-sum-of-two-lists/<commit_after>
|
from collections import defaultdict
class Solution(object):
def findRestaurant(self, list1, list2):
"""
:type list1: List[str]
:type list2: List[str]
:rtype: List[str]
"""
d1 = {x: i for (i, x) in enumerate(list1)}
min_idxes = []
min_idx_sum = len(list1) + len(list2)
for i, r in enumerate(list2):
if r in d1:
if i + d1[r] < min_idx_sum:
min_idx_sum = i + d1[r]
min_idxes = [r]
elif i + d1[r] == min_idx_sum:
min_idxes.append(r)
return min_idxes
|
Add py solution for 599. Minimum Index Sum of Two Lists
599. Minimum Index Sum of Two Lists: https://leetcode.com/problems/minimum-index-sum-of-two-lists/from collections import defaultdict
class Solution(object):
def findRestaurant(self, list1, list2):
"""
:type list1: List[str]
:type list2: List[str]
:rtype: List[str]
"""
d1 = {x: i for (i, x) in enumerate(list1)}
min_idxes = []
min_idx_sum = len(list1) + len(list2)
for i, r in enumerate(list2):
if r in d1:
if i + d1[r] < min_idx_sum:
min_idx_sum = i + d1[r]
min_idxes = [r]
elif i + d1[r] == min_idx_sum:
min_idxes.append(r)
return min_idxes
|
<commit_before><commit_msg>Add py solution for 599. Minimum Index Sum of Two Lists
599. Minimum Index Sum of Two Lists: https://leetcode.com/problems/minimum-index-sum-of-two-lists/<commit_after>from collections import defaultdict
class Solution(object):
def findRestaurant(self, list1, list2):
"""
:type list1: List[str]
:type list2: List[str]
:rtype: List[str]
"""
d1 = {x: i for (i, x) in enumerate(list1)}
min_idxes = []
min_idx_sum = len(list1) + len(list2)
for i, r in enumerate(list2):
if r in d1:
if i + d1[r] < min_idx_sum:
min_idx_sum = i + d1[r]
min_idxes = [r]
elif i + d1[r] == min_idx_sum:
min_idxes.append(r)
return min_idxes
|
|
45a46bec14c2c0a2793083cb391f29a632281f11
|
senlin/tests/tempest/api/profiles/test_profile_type.py
|
senlin/tests/tempest/api/profiles/test_profile_type.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
class TestProfileType(base.BaseSenlinTest):
@decorators.idempotent_id('fa0cf9e3-5b75-4d4d-9a0f-1748772b65d3')
def test_profile_type_list(self):
res = self.client.list_objs('profile-types')
# Verify resp of profile type list API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
profile_types = res['body']
for profile_type in profile_types:
self.assertIn('name', profile_type)
@decorators.idempotent_id('198165b3-1c1f-4801-8918-90c1adbf57c8')
def test_profile_type_show(self):
res = self.client.get_obj('profile-types', 'os.nova.server-1.0')
# Verify resp of profile type show API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
profile_type = res['body']
for key in ['name', 'schema']:
self.assertIn(key, profile_type)
self.assertEqual('os.nova.server-1.0', profile_type['name'])
|
Add API test for profile type show/list
|
Add API test for profile type show/list
Change-Id: Ibb260e41f5ddcc9ac1e6603a8e749167927efc54
|
Python
|
apache-2.0
|
openstack/senlin,stackforge/senlin,openstack/senlin,openstack/senlin,stackforge/senlin
|
Add API test for profile type show/list
Change-Id: Ibb260e41f5ddcc9ac1e6603a8e749167927efc54
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
class TestProfileType(base.BaseSenlinTest):
@decorators.idempotent_id('fa0cf9e3-5b75-4d4d-9a0f-1748772b65d3')
def test_profile_type_list(self):
res = self.client.list_objs('profile-types')
# Verify resp of profile type list API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
profile_types = res['body']
for profile_type in profile_types:
self.assertIn('name', profile_type)
@decorators.idempotent_id('198165b3-1c1f-4801-8918-90c1adbf57c8')
def test_profile_type_show(self):
res = self.client.get_obj('profile-types', 'os.nova.server-1.0')
# Verify resp of profile type show API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
profile_type = res['body']
for key in ['name', 'schema']:
self.assertIn(key, profile_type)
self.assertEqual('os.nova.server-1.0', profile_type['name'])
|
<commit_before><commit_msg>Add API test for profile type show/list
Change-Id: Ibb260e41f5ddcc9ac1e6603a8e749167927efc54<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
class TestProfileType(base.BaseSenlinTest):
@decorators.idempotent_id('fa0cf9e3-5b75-4d4d-9a0f-1748772b65d3')
def test_profile_type_list(self):
res = self.client.list_objs('profile-types')
# Verify resp of profile type list API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
profile_types = res['body']
for profile_type in profile_types:
self.assertIn('name', profile_type)
@decorators.idempotent_id('198165b3-1c1f-4801-8918-90c1adbf57c8')
def test_profile_type_show(self):
res = self.client.get_obj('profile-types', 'os.nova.server-1.0')
# Verify resp of profile type show API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
profile_type = res['body']
for key in ['name', 'schema']:
self.assertIn(key, profile_type)
self.assertEqual('os.nova.server-1.0', profile_type['name'])
|
Add API test for profile type show/list
Change-Id: Ibb260e41f5ddcc9ac1e6603a8e749167927efc54# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
class TestProfileType(base.BaseSenlinTest):
@decorators.idempotent_id('fa0cf9e3-5b75-4d4d-9a0f-1748772b65d3')
def test_profile_type_list(self):
res = self.client.list_objs('profile-types')
# Verify resp of profile type list API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
profile_types = res['body']
for profile_type in profile_types:
self.assertIn('name', profile_type)
@decorators.idempotent_id('198165b3-1c1f-4801-8918-90c1adbf57c8')
def test_profile_type_show(self):
res = self.client.get_obj('profile-types', 'os.nova.server-1.0')
# Verify resp of profile type show API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
profile_type = res['body']
for key in ['name', 'schema']:
self.assertIn(key, profile_type)
self.assertEqual('os.nova.server-1.0', profile_type['name'])
|
<commit_before><commit_msg>Add API test for profile type show/list
Change-Id: Ibb260e41f5ddcc9ac1e6603a8e749167927efc54<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
class TestProfileType(base.BaseSenlinTest):
@decorators.idempotent_id('fa0cf9e3-5b75-4d4d-9a0f-1748772b65d3')
def test_profile_type_list(self):
res = self.client.list_objs('profile-types')
# Verify resp of profile type list API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
profile_types = res['body']
for profile_type in profile_types:
self.assertIn('name', profile_type)
@decorators.idempotent_id('198165b3-1c1f-4801-8918-90c1adbf57c8')
def test_profile_type_show(self):
res = self.client.get_obj('profile-types', 'os.nova.server-1.0')
# Verify resp of profile type show API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
profile_type = res['body']
for key in ['name', 'schema']:
self.assertIn(key, profile_type)
self.assertEqual('os.nova.server-1.0', profile_type['name'])
|
|
a4e87163af7be902829bbaccd2076e4ff0f9bb34
|
tools/plot_excit_dist.py
|
tools/plot_excit_dist.py
|
#!/usr/bin/env python
'''Plot excitation distribution from dmqmc output.'''
import os
import sys
import matplotlib.pyplot as pl
import argparse
try:
import pyhande as ph
except ImportError:
_script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(_script_dir, '../pyhande'))
import pyhande as ph
def plot_excit_dist(filename, plotfile, calc, max_excit):
''' Plot excitation distribution.
Paramters
---------
filename : string
file to plot from.
plotfile : string or None
name of file to plot to.
calc : int
calculation number to plot.
max_excit : int or None
maximum excitation level to plot to.
'''
data = ph.extract.extract_data(filename)
(m, d) = data[calc]
if not max_excit:
max_excit = int(m['system']['max_number_excitations'])
for e in range(0, max_excit):
pl.plot(d['iterations']*m['qmc']['tau'], d['Excit. level %s'%e],
label=r'$n_{\mathrm{ex}} = %s$'%e)
pl.legend(numpoints=1, loc='best')
if m['ipdmqmc']['propagate_to_beta']:
pl.xlabel(r'$\tau$')
else:
pl.xlabel(r'$\beta$')
pl.ylabel('Weight')
if plotfile:
pl.savefig(plotfile+'.png', fmt='png')
else:
pl.show()
def parse_args(args):
parser = argparse.ArgumentParser(description='Plot the excitation'
' distribution of DMQMC calulation.')
parser.add_argument('-p', '--plotfile', default=None, help='File to save '
'the graphs to. The graphs are shown interactively by '
'default.')
parser.add_argument('calc', type=int, help='Calculation number to plot. '
'C indexed.', default=0)
parser.add_argument('-m', '--max-excit', action='store', dest='max_excit',
type=int, help='Plot up to maximum excitation '
'distribution. Plot all by default.', default=None)
parser.add_argument('file', help='File to plot.')
opts = parser.parse_args(args)
return (opts.file, opts.plotfile, opts.calc, opts.max_excit)
if __name__ == '__main__':
(datafile, plotfile, calc, max_excit) = parse_args(sys.argv[1:])
plot_excit_dist(datafile, plotfile, calc, max_excit)
|
Add script to plot excitation distribution from output.
|
Add script to plot excitation distribution from output.
I think this is useful enough to warrant a script in the repo.
|
Python
|
lgpl-2.1
|
hande-qmc/hande,hande-qmc/hande,ruthfranklin/hande,hande-qmc/hande,hande-qmc/hande,hande-qmc/hande
|
Add script to plot excitation distribution from output.
I think this is useful enough to warrant a script in the repo.
|
#!/usr/bin/env python
'''Plot excitation distribution from dmqmc output.'''
import os
import sys
import matplotlib.pyplot as pl
import argparse
try:
import pyhande as ph
except ImportError:
_script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(_script_dir, '../pyhande'))
import pyhande as ph
def plot_excit_dist(filename, plotfile, calc, max_excit):
''' Plot excitation distribution.
Paramters
---------
filename : string
file to plot from.
plotfile : string or None
name of file to plot to.
calc : int
calculation number to plot.
max_excit : int or None
maximum excitation level to plot to.
'''
data = ph.extract.extract_data(filename)
(m, d) = data[calc]
if not max_excit:
max_excit = int(m['system']['max_number_excitations'])
for e in range(0, max_excit):
pl.plot(d['iterations']*m['qmc']['tau'], d['Excit. level %s'%e],
label=r'$n_{\mathrm{ex}} = %s$'%e)
pl.legend(numpoints=1, loc='best')
if m['ipdmqmc']['propagate_to_beta']:
pl.xlabel(r'$\tau$')
else:
pl.xlabel(r'$\beta$')
pl.ylabel('Weight')
if plotfile:
pl.savefig(plotfile+'.png', fmt='png')
else:
pl.show()
def parse_args(args):
parser = argparse.ArgumentParser(description='Plot the excitation'
' distribution of DMQMC calulation.')
parser.add_argument('-p', '--plotfile', default=None, help='File to save '
'the graphs to. The graphs are shown interactively by '
'default.')
parser.add_argument('calc', type=int, help='Calculation number to plot. '
'C indexed.', default=0)
parser.add_argument('-m', '--max-excit', action='store', dest='max_excit',
type=int, help='Plot up to maximum excitation '
'distribution. Plot all by default.', default=None)
parser.add_argument('file', help='File to plot.')
opts = parser.parse_args(args)
return (opts.file, opts.plotfile, opts.calc, opts.max_excit)
if __name__ == '__main__':
(datafile, plotfile, calc, max_excit) = parse_args(sys.argv[1:])
plot_excit_dist(datafile, plotfile, calc, max_excit)
|
<commit_before><commit_msg>Add script to plot excitation distribution from output.
I think this is useful enough to warrant a script in the repo.<commit_after>
|
#!/usr/bin/env python
'''Plot excitation distribution from dmqmc output.'''
import os
import sys
import matplotlib.pyplot as pl
import argparse
try:
import pyhande as ph
except ImportError:
_script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(_script_dir, '../pyhande'))
import pyhande as ph
def plot_excit_dist(filename, plotfile, calc, max_excit):
''' Plot excitation distribution.
Paramters
---------
filename : string
file to plot from.
plotfile : string or None
name of file to plot to.
calc : int
calculation number to plot.
max_excit : int or None
maximum excitation level to plot to.
'''
data = ph.extract.extract_data(filename)
(m, d) = data[calc]
if not max_excit:
max_excit = int(m['system']['max_number_excitations'])
for e in range(0, max_excit):
pl.plot(d['iterations']*m['qmc']['tau'], d['Excit. level %s'%e],
label=r'$n_{\mathrm{ex}} = %s$'%e)
pl.legend(numpoints=1, loc='best')
if m['ipdmqmc']['propagate_to_beta']:
pl.xlabel(r'$\tau$')
else:
pl.xlabel(r'$\beta$')
pl.ylabel('Weight')
if plotfile:
pl.savefig(plotfile+'.png', fmt='png')
else:
pl.show()
def parse_args(args):
parser = argparse.ArgumentParser(description='Plot the excitation'
' distribution of DMQMC calulation.')
parser.add_argument('-p', '--plotfile', default=None, help='File to save '
'the graphs to. The graphs are shown interactively by '
'default.')
parser.add_argument('calc', type=int, help='Calculation number to plot. '
'C indexed.', default=0)
parser.add_argument('-m', '--max-excit', action='store', dest='max_excit',
type=int, help='Plot up to maximum excitation '
'distribution. Plot all by default.', default=None)
parser.add_argument('file', help='File to plot.')
opts = parser.parse_args(args)
return (opts.file, opts.plotfile, opts.calc, opts.max_excit)
if __name__ == '__main__':
(datafile, plotfile, calc, max_excit) = parse_args(sys.argv[1:])
plot_excit_dist(datafile, plotfile, calc, max_excit)
|
Add script to plot excitation distribution from output.
I think this is useful enough to warrant a script in the repo.#!/usr/bin/env python
'''Plot excitation distribution from dmqmc output.'''
import os
import sys
import matplotlib.pyplot as pl
import argparse
try:
import pyhande as ph
except ImportError:
_script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(_script_dir, '../pyhande'))
import pyhande as ph
def plot_excit_dist(filename, plotfile, calc, max_excit):
''' Plot excitation distribution.
Paramters
---------
filename : string
file to plot from.
plotfile : string or None
name of file to plot to.
calc : int
calculation number to plot.
max_excit : int or None
maximum excitation level to plot to.
'''
data = ph.extract.extract_data(filename)
(m, d) = data[calc]
if not max_excit:
max_excit = int(m['system']['max_number_excitations'])
for e in range(0, max_excit):
pl.plot(d['iterations']*m['qmc']['tau'], d['Excit. level %s'%e],
label=r'$n_{\mathrm{ex}} = %s$'%e)
pl.legend(numpoints=1, loc='best')
if m['ipdmqmc']['propagate_to_beta']:
pl.xlabel(r'$\tau$')
else:
pl.xlabel(r'$\beta$')
pl.ylabel('Weight')
if plotfile:
pl.savefig(plotfile+'.png', fmt='png')
else:
pl.show()
def parse_args(args):
parser = argparse.ArgumentParser(description='Plot the excitation'
' distribution of DMQMC calulation.')
parser.add_argument('-p', '--plotfile', default=None, help='File to save '
'the graphs to. The graphs are shown interactively by '
'default.')
parser.add_argument('calc', type=int, help='Calculation number to plot. '
'C indexed.', default=0)
parser.add_argument('-m', '--max-excit', action='store', dest='max_excit',
type=int, help='Plot up to maximum excitation '
'distribution. Plot all by default.', default=None)
parser.add_argument('file', help='File to plot.')
opts = parser.parse_args(args)
return (opts.file, opts.plotfile, opts.calc, opts.max_excit)
if __name__ == '__main__':
(datafile, plotfile, calc, max_excit) = parse_args(sys.argv[1:])
plot_excit_dist(datafile, plotfile, calc, max_excit)
|
<commit_before><commit_msg>Add script to plot excitation distribution from output.
I think this is useful enough to warrant a script in the repo.<commit_after>#!/usr/bin/env python
'''Plot excitation distribution from dmqmc output.'''
import os
import sys
import matplotlib.pyplot as pl
import argparse
try:
import pyhande as ph
except ImportError:
_script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(_script_dir, '../pyhande'))
import pyhande as ph
def plot_excit_dist(filename, plotfile, calc, max_excit):
''' Plot excitation distribution.
Paramters
---------
filename : string
file to plot from.
plotfile : string or None
name of file to plot to.
calc : int
calculation number to plot.
max_excit : int or None
maximum excitation level to plot to.
'''
data = ph.extract.extract_data(filename)
(m, d) = data[calc]
if not max_excit:
max_excit = int(m['system']['max_number_excitations'])
for e in range(0, max_excit):
pl.plot(d['iterations']*m['qmc']['tau'], d['Excit. level %s'%e],
label=r'$n_{\mathrm{ex}} = %s$'%e)
pl.legend(numpoints=1, loc='best')
if m['ipdmqmc']['propagate_to_beta']:
pl.xlabel(r'$\tau$')
else:
pl.xlabel(r'$\beta$')
pl.ylabel('Weight')
if plotfile:
pl.savefig(plotfile+'.png', fmt='png')
else:
pl.show()
def parse_args(args):
parser = argparse.ArgumentParser(description='Plot the excitation'
' distribution of DMQMC calulation.')
parser.add_argument('-p', '--plotfile', default=None, help='File to save '
'the graphs to. The graphs are shown interactively by '
'default.')
parser.add_argument('calc', type=int, help='Calculation number to plot. '
'C indexed.', default=0)
parser.add_argument('-m', '--max-excit', action='store', dest='max_excit',
type=int, help='Plot up to maximum excitation '
'distribution. Plot all by default.', default=None)
parser.add_argument('file', help='File to plot.')
opts = parser.parse_args(args)
return (opts.file, opts.plotfile, opts.calc, opts.max_excit)
if __name__ == '__main__':
(datafile, plotfile, calc, max_excit) = parse_args(sys.argv[1:])
plot_excit_dist(datafile, plotfile, calc, max_excit)
|
|
6967884fa4df44a0a7fb3b81986dc2d4dc2818b3
|
hera_mc/tests/test_default_db_schema.py
|
hera_mc/tests/test_default_db_schema.py
|
# -*- mode: python; coding: utf-8 -*-
# Copyright 2016 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""
Test that default database matches code schema.
"""
from sqlalchemy.orm import sessionmaker
from hera_mc import mc, MCDeclarativeBase
from hera_mc.db_check import is_sane_database
def test_default_db_schema():
default_db = mc.connect_to_mc_db(None)
engine = default_db.engine
conn = engine.connect()
trans = conn.begin()
Session = sessionmaker(bind=engine)
session = Session()
assert is_sane_database(MCDeclarativeBase, session) is True
|
Add test for default_db schema
|
Add test for default_db schema
|
Python
|
bsd-2-clause
|
HERA-Team/hera_mc,HERA-Team/Monitor_and_Control,HERA-Team/hera_mc
|
Add test for default_db schema
|
# -*- mode: python; coding: utf-8 -*-
# Copyright 2016 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""
Test that default database matches code schema.
"""
from sqlalchemy.orm import sessionmaker
from hera_mc import mc, MCDeclarativeBase
from hera_mc.db_check import is_sane_database
def test_default_db_schema():
default_db = mc.connect_to_mc_db(None)
engine = default_db.engine
conn = engine.connect()
trans = conn.begin()
Session = sessionmaker(bind=engine)
session = Session()
assert is_sane_database(MCDeclarativeBase, session) is True
|
<commit_before><commit_msg>Add test for default_db schema<commit_after>
|
# -*- mode: python; coding: utf-8 -*-
# Copyright 2016 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""
Test that default database matches code schema.
"""
from sqlalchemy.orm import sessionmaker
from hera_mc import mc, MCDeclarativeBase
from hera_mc.db_check import is_sane_database
def test_default_db_schema():
default_db = mc.connect_to_mc_db(None)
engine = default_db.engine
conn = engine.connect()
trans = conn.begin()
Session = sessionmaker(bind=engine)
session = Session()
assert is_sane_database(MCDeclarativeBase, session) is True
|
Add test for default_db schema# -*- mode: python; coding: utf-8 -*-
# Copyright 2016 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""
Test that default database matches code schema.
"""
from sqlalchemy.orm import sessionmaker
from hera_mc import mc, MCDeclarativeBase
from hera_mc.db_check import is_sane_database
def test_default_db_schema():
default_db = mc.connect_to_mc_db(None)
engine = default_db.engine
conn = engine.connect()
trans = conn.begin()
Session = sessionmaker(bind=engine)
session = Session()
assert is_sane_database(MCDeclarativeBase, session) is True
|
<commit_before><commit_msg>Add test for default_db schema<commit_after># -*- mode: python; coding: utf-8 -*-
# Copyright 2016 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""
Test that default database matches code schema.
"""
from sqlalchemy.orm import sessionmaker
from hera_mc import mc, MCDeclarativeBase
from hera_mc.db_check import is_sane_database
def test_default_db_schema():
default_db = mc.connect_to_mc_db(None)
engine = default_db.engine
conn = engine.connect()
trans = conn.begin()
Session = sessionmaker(bind=engine)
session = Session()
assert is_sane_database(MCDeclarativeBase, session) is True
|
|
85cf22ff02a7c4af576d6553ae721d174364e357
|
opps/core/admin/channel.py
|
opps/core/admin/channel.py
|
# -*- coding: utf-8 -*-
from datetime import datetime
from django.contrib import admin
from opps.core.models import Channel
class ChannelAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
admin.site.register(Channel, ChannelAdmin)
|
Add basic core admin on Channel
|
Add basic core admin on Channel
|
Python
|
mit
|
williamroot/opps,williamroot/opps,YACOWS/opps,williamroot/opps,YACOWS/opps,opps/opps,YACOWS/opps,YACOWS/opps,jeanmask/opps,opps/opps,williamroot/opps,opps/opps,jeanmask/opps,opps/opps,jeanmask/opps,jeanmask/opps
|
Add basic core admin on Channel
|
# -*- coding: utf-8 -*-
from datetime import datetime
from django.contrib import admin
from opps.core.models import Channel
class ChannelAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
admin.site.register(Channel, ChannelAdmin)
|
<commit_before><commit_msg>Add basic core admin on Channel<commit_after>
|
# -*- coding: utf-8 -*-
from datetime import datetime
from django.contrib import admin
from opps.core.models import Channel
class ChannelAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
admin.site.register(Channel, ChannelAdmin)
|
Add basic core admin on Channel# -*- coding: utf-8 -*-
from datetime import datetime
from django.contrib import admin
from opps.core.models import Channel
class ChannelAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
admin.site.register(Channel, ChannelAdmin)
|
<commit_before><commit_msg>Add basic core admin on Channel<commit_after># -*- coding: utf-8 -*-
from datetime import datetime
from django.contrib import admin
from opps.core.models import Channel
class ChannelAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
admin.site.register(Channel, ChannelAdmin)
|
|
d89a95534e3cd3c847a1adc504bc23e271b2ecbe
|
ndtable/datashape/tests/test_records.py
|
ndtable/datashape/tests/test_records.py
|
from ndtable import RecordDecl
from ndtable import float32, int32
from numpy import dtype
class Simple(RecordDecl):
foo = int32
bar = float32
__dummy = True
def test_to_numpy():
converted = Simple.to_numpy()
assert converted == dtype([('foo', '<i4'), ('bar', '<f4')])
|
Test for record dshape <-> dtype conversions.
|
Test for record dshape <-> dtype conversions.
|
Python
|
bsd-2-clause
|
seibert/blaze-core,seibert/blaze-core,seibert/blaze-core,seibert/blaze-core,seibert/blaze-core
|
Test for record dshape <-> dtype conversions.
|
from ndtable import RecordDecl
from ndtable import float32, int32
from numpy import dtype
class Simple(RecordDecl):
foo = int32
bar = float32
__dummy = True
def test_to_numpy():
converted = Simple.to_numpy()
assert converted == dtype([('foo', '<i4'), ('bar', '<f4')])
|
<commit_before><commit_msg>Test for record dshape <-> dtype conversions.<commit_after>
|
from ndtable import RecordDecl
from ndtable import float32, int32
from numpy import dtype
class Simple(RecordDecl):
foo = int32
bar = float32
__dummy = True
def test_to_numpy():
converted = Simple.to_numpy()
assert converted == dtype([('foo', '<i4'), ('bar', '<f4')])
|
Test for record dshape <-> dtype conversions.from ndtable import RecordDecl
from ndtable import float32, int32
from numpy import dtype
class Simple(RecordDecl):
foo = int32
bar = float32
__dummy = True
def test_to_numpy():
converted = Simple.to_numpy()
assert converted == dtype([('foo', '<i4'), ('bar', '<f4')])
|
<commit_before><commit_msg>Test for record dshape <-> dtype conversions.<commit_after>from ndtable import RecordDecl
from ndtable import float32, int32
from numpy import dtype
class Simple(RecordDecl):
foo = int32
bar = float32
__dummy = True
def test_to_numpy():
converted = Simple.to_numpy()
assert converted == dtype([('foo', '<i4'), ('bar', '<f4')])
|
|
5ed02c90e85de86b54f423ed89cc0c8285944046
|
hfetch/tests/python/tryKeyError.py
|
hfetch/tests/python/tryKeyError.py
|
from hfetch import *
import time
contact_names = ['127.0.0.1']
nodePort = 9042
keyspace = 'test'
table = 'particle'
token_ranges = [(8070430489100699999,8070450532247928832)]
num_keys = 10001
non_existent_keys = 10
cache_size = num_keys+non_existent_keys
try:
connectCassandra(contact_names,nodePort)
except Exception:
print 'can\'t connect, verify the contact poins and port',contact_names,nodePort
cache = Hcache(keyspace,table,"",token_ranges,["partid","time"],["ciao","x","y","z"],{'cache_size':cache_size})
# Access the cache, which is empty and queries cassandra to retrieve the data
t1 = time.time()
error_counter = 0
for pk in xrange(0, num_keys+non_existent_keys):
ck = pk*10
try:
result = cache.get_row([pk, ck])
except KeyError as e:
error_counter = error_counter + 1
print 'Retrieved {0} keys in {1} seconds. {2} keys weren\'t found, {3} keys weren\'t supposed to be found'.format(unicode(str(num_keys),'utf-8'),
unicode(str(time.time()-t1),'utf-8'),unicode(str(error_counter),'utf-8'),unicode(str(non_existent_keys),'utf-8'))
assert(error_counter==non_existent_keys)
# Access the cache, which has already all the data and will ask cassandra only if
# the keys asked are not present
t1 = time.time()
error_counter = 0
for pk in xrange(0, num_keys+non_existent_keys):
ck = pk*10
try:
result = cache.get_row([pk, ck])
except KeyError as e:
error_counter = error_counter + 1
print 'Retrieved {0} keys in {1} seconds. {2} keys weren\'t found, {3} keys weren\'t supposed to be found'.format(unicode(str(num_keys),'utf-8'),
unicode(str(time.time()-t1),'utf-8'),unicode(str(error_counter),'utf-8'),unicode(str(non_existent_keys),'utf-8'))
assert(error_counter==non_existent_keys)
|
Test to verify behaviour when key not found
|
Test to verify behaviour when key not found
|
Python
|
apache-2.0
|
bsc-dd/hecuba,bsc-dd/hecuba,bsc-dd/hecuba,bsc-dd/hecuba
|
Test to verify behaviour when key not found
|
from hfetch import *
import time
contact_names = ['127.0.0.1']
nodePort = 9042
keyspace = 'test'
table = 'particle'
token_ranges = [(8070430489100699999,8070450532247928832)]
num_keys = 10001
non_existent_keys = 10
cache_size = num_keys+non_existent_keys
try:
connectCassandra(contact_names,nodePort)
except Exception:
print 'can\'t connect, verify the contact poins and port',contact_names,nodePort
cache = Hcache(keyspace,table,"",token_ranges,["partid","time"],["ciao","x","y","z"],{'cache_size':cache_size})
# Access the cache, which is empty and queries cassandra to retrieve the data
t1 = time.time()
error_counter = 0
for pk in xrange(0, num_keys+non_existent_keys):
ck = pk*10
try:
result = cache.get_row([pk, ck])
except KeyError as e:
error_counter = error_counter + 1
print 'Retrieved {0} keys in {1} seconds. {2} keys weren\'t found, {3} keys weren\'t supposed to be found'.format(unicode(str(num_keys),'utf-8'),
unicode(str(time.time()-t1),'utf-8'),unicode(str(error_counter),'utf-8'),unicode(str(non_existent_keys),'utf-8'))
assert(error_counter==non_existent_keys)
# Access the cache, which has already all the data and will ask cassandra only if
# the keys asked are not present
t1 = time.time()
error_counter = 0
for pk in xrange(0, num_keys+non_existent_keys):
ck = pk*10
try:
result = cache.get_row([pk, ck])
except KeyError as e:
error_counter = error_counter + 1
print 'Retrieved {0} keys in {1} seconds. {2} keys weren\'t found, {3} keys weren\'t supposed to be found'.format(unicode(str(num_keys),'utf-8'),
unicode(str(time.time()-t1),'utf-8'),unicode(str(error_counter),'utf-8'),unicode(str(non_existent_keys),'utf-8'))
assert(error_counter==non_existent_keys)
|
<commit_before><commit_msg>Test to verify behaviour when key not found<commit_after>
|
from hfetch import *
import time
contact_names = ['127.0.0.1']
nodePort = 9042
keyspace = 'test'
table = 'particle'
token_ranges = [(8070430489100699999,8070450532247928832)]
num_keys = 10001
non_existent_keys = 10
cache_size = num_keys+non_existent_keys
try:
connectCassandra(contact_names,nodePort)
except Exception:
print 'can\'t connect, verify the contact poins and port',contact_names,nodePort
cache = Hcache(keyspace,table,"",token_ranges,["partid","time"],["ciao","x","y","z"],{'cache_size':cache_size})
# Access the cache, which is empty and queries cassandra to retrieve the data
t1 = time.time()
error_counter = 0
for pk in xrange(0, num_keys+non_existent_keys):
ck = pk*10
try:
result = cache.get_row([pk, ck])
except KeyError as e:
error_counter = error_counter + 1
print 'Retrieved {0} keys in {1} seconds. {2} keys weren\'t found, {3} keys weren\'t supposed to be found'.format(unicode(str(num_keys),'utf-8'),
unicode(str(time.time()-t1),'utf-8'),unicode(str(error_counter),'utf-8'),unicode(str(non_existent_keys),'utf-8'))
assert(error_counter==non_existent_keys)
# Access the cache, which has already all the data and will ask cassandra only if
# the keys asked are not present
t1 = time.time()
error_counter = 0
for pk in xrange(0, num_keys+non_existent_keys):
ck = pk*10
try:
result = cache.get_row([pk, ck])
except KeyError as e:
error_counter = error_counter + 1
print 'Retrieved {0} keys in {1} seconds. {2} keys weren\'t found, {3} keys weren\'t supposed to be found'.format(unicode(str(num_keys),'utf-8'),
unicode(str(time.time()-t1),'utf-8'),unicode(str(error_counter),'utf-8'),unicode(str(non_existent_keys),'utf-8'))
assert(error_counter==non_existent_keys)
|
Test to verify behaviour when key not foundfrom hfetch import *
import time
contact_names = ['127.0.0.1']
nodePort = 9042
keyspace = 'test'
table = 'particle'
token_ranges = [(8070430489100699999,8070450532247928832)]
num_keys = 10001
non_existent_keys = 10
cache_size = num_keys+non_existent_keys
try:
connectCassandra(contact_names,nodePort)
except Exception:
print 'can\'t connect, verify the contact poins and port',contact_names,nodePort
cache = Hcache(keyspace,table,"",token_ranges,["partid","time"],["ciao","x","y","z"],{'cache_size':cache_size})
# Access the cache, which is empty and queries cassandra to retrieve the data
t1 = time.time()
error_counter = 0
for pk in xrange(0, num_keys+non_existent_keys):
ck = pk*10
try:
result = cache.get_row([pk, ck])
except KeyError as e:
error_counter = error_counter + 1
print 'Retrieved {0} keys in {1} seconds. {2} keys weren\'t found, {3} keys weren\'t supposed to be found'.format(unicode(str(num_keys),'utf-8'),
unicode(str(time.time()-t1),'utf-8'),unicode(str(error_counter),'utf-8'),unicode(str(non_existent_keys),'utf-8'))
assert(error_counter==non_existent_keys)
# Access the cache, which has already all the data and will ask cassandra only if
# the keys asked are not present
t1 = time.time()
error_counter = 0
for pk in xrange(0, num_keys+non_existent_keys):
ck = pk*10
try:
result = cache.get_row([pk, ck])
except KeyError as e:
error_counter = error_counter + 1
print 'Retrieved {0} keys in {1} seconds. {2} keys weren\'t found, {3} keys weren\'t supposed to be found'.format(unicode(str(num_keys),'utf-8'),
unicode(str(time.time()-t1),'utf-8'),unicode(str(error_counter),'utf-8'),unicode(str(non_existent_keys),'utf-8'))
assert(error_counter==non_existent_keys)
|
<commit_before><commit_msg>Test to verify behaviour when key not found<commit_after>from hfetch import *
import time
contact_names = ['127.0.0.1']
nodePort = 9042
keyspace = 'test'
table = 'particle'
token_ranges = [(8070430489100699999,8070450532247928832)]
num_keys = 10001
non_existent_keys = 10
cache_size = num_keys+non_existent_keys
try:
connectCassandra(contact_names,nodePort)
except Exception:
print 'can\'t connect, verify the contact poins and port',contact_names,nodePort
cache = Hcache(keyspace,table,"",token_ranges,["partid","time"],["ciao","x","y","z"],{'cache_size':cache_size})
# Access the cache, which is empty and queries cassandra to retrieve the data
t1 = time.time()
error_counter = 0
for pk in xrange(0, num_keys+non_existent_keys):
ck = pk*10
try:
result = cache.get_row([pk, ck])
except KeyError as e:
error_counter = error_counter + 1
print 'Retrieved {0} keys in {1} seconds. {2} keys weren\'t found, {3} keys weren\'t supposed to be found'.format(unicode(str(num_keys),'utf-8'),
unicode(str(time.time()-t1),'utf-8'),unicode(str(error_counter),'utf-8'),unicode(str(non_existent_keys),'utf-8'))
assert(error_counter==non_existent_keys)
# Access the cache, which has already all the data and will ask cassandra only if
# the keys asked are not present
t1 = time.time()
error_counter = 0
for pk in xrange(0, num_keys+non_existent_keys):
ck = pk*10
try:
result = cache.get_row([pk, ck])
except KeyError as e:
error_counter = error_counter + 1
print 'Retrieved {0} keys in {1} seconds. {2} keys weren\'t found, {3} keys weren\'t supposed to be found'.format(unicode(str(num_keys),'utf-8'),
unicode(str(time.time()-t1),'utf-8'),unicode(str(error_counter),'utf-8'),unicode(str(non_existent_keys),'utf-8'))
assert(error_counter==non_existent_keys)
|
|
4aaad60df38a2fd73caacca7b47fe91276760b53
|
sniffer/sniffer.py
|
sniffer/sniffer.py
|
import threading
from time import sleep
class Sniffer(threading.Thread):
def __init__(self, arg):
# Set thread to run as daemon
self.daemon = True
# Initialize object variables with parameters from arg dictionary
self.arg = arg
# Thread has not come to life yet
self.status = False
def run(self):
self.status = True
while self.is_alive():
sleep(0.1)
def is_alive(self):
# Return if thread is dead or alive
return self.status
def stop(self):
# Kill it with fire!
self.status = False
|
Add Sniffer thread class sketch
|
Add Sniffer thread class sketch
|
Python
|
mit
|
dimkarakostas/rupture,dimriou/rupture,dionyziz/rupture,dimkarakostas/rupture,dimkarakostas/rupture,dionyziz/rupture,dionyziz/rupture,dimkarakostas/rupture,esarafianou/rupture,dimriou/rupture,dimriou/rupture,esarafianou/rupture,dionyziz/rupture,dimriou/rupture,dionyziz/rupture,dimkarakostas/rupture,esarafianou/rupture,esarafianou/rupture,dimriou/rupture
|
Add Sniffer thread class sketch
|
import threading
from time import sleep
class Sniffer(threading.Thread):
def __init__(self, arg):
# Set thread to run as daemon
self.daemon = True
# Initialize object variables with parameters from arg dictionary
self.arg = arg
# Thread has not come to life yet
self.status = False
def run(self):
self.status = True
while self.is_alive():
sleep(0.1)
def is_alive(self):
# Return if thread is dead or alive
return self.status
def stop(self):
# Kill it with fire!
self.status = False
|
<commit_before><commit_msg>Add Sniffer thread class sketch<commit_after>
|
import threading
from time import sleep
class Sniffer(threading.Thread):
def __init__(self, arg):
# Set thread to run as daemon
self.daemon = True
# Initialize object variables with parameters from arg dictionary
self.arg = arg
# Thread has not come to life yet
self.status = False
def run(self):
self.status = True
while self.is_alive():
sleep(0.1)
def is_alive(self):
# Return if thread is dead or alive
return self.status
def stop(self):
# Kill it with fire!
self.status = False
|
Add Sniffer thread class sketchimport threading
from time import sleep
class Sniffer(threading.Thread):
def __init__(self, arg):
# Set thread to run as daemon
self.daemon = True
# Initialize object variables with parameters from arg dictionary
self.arg = arg
# Thread has not come to life yet
self.status = False
def run(self):
self.status = True
while self.is_alive():
sleep(0.1)
def is_alive(self):
# Return if thread is dead or alive
return self.status
def stop(self):
# Kill it with fire!
self.status = False
|
<commit_before><commit_msg>Add Sniffer thread class sketch<commit_after>import threading
from time import sleep
class Sniffer(threading.Thread):
def __init__(self, arg):
# Set thread to run as daemon
self.daemon = True
# Initialize object variables with parameters from arg dictionary
self.arg = arg
# Thread has not come to life yet
self.status = False
def run(self):
self.status = True
while self.is_alive():
sleep(0.1)
def is_alive(self):
# Return if thread is dead or alive
return self.status
def stop(self):
# Kill it with fire!
self.status = False
|
|
04f336f400445b10f54a75110a733c582606d933
|
tests/test_validators.py
|
tests/test_validators.py
|
"""
test_validators
~~~~~~~~~~~~~~
Unittests for bundled validators.
:copyright: 2007-2008 by James Crasta, Thomas Johansson.
:license: MIT, see LICENSE.txt for details.
"""
from py.test import raises
from wtforms.validators import ValidationError, length, url, not_empty, email, ip_address
class DummyForm(object):
pass
class DummyField(object):
def __init__(self, data):
self.data = data
form = DummyForm()
def test_email():
assert email(form, DummyField('foo@bar.dk')) == None
assert email(form, DummyField('123@bar.dk')) == None
assert email(form, DummyField('foo@456.dk')) == None
assert email(form, DummyField('foo@bar456.info')) == None
raises(ValidationError, email, form, DummyField('foo')) == None
raises(ValidationError, email, form, DummyField('bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@')) == None
raises(ValidationError, email, form, DummyField('@bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@bar')) == None
raises(ValidationError, email, form, DummyField('foo@bar.ab12')) == None
raises(ValidationError, email, form, DummyField('foo@bar.abcde')) == None
def test_length():
field = DummyField('foobar')
assert length(min=2, max=6)(form, field) == None
raises(ValidationError, length(min=7), form, field)
raises(ValidationError, length(max=5), form, field)
def test_url():
assert url()(form, DummyField('http://foobar.dk')) == None
assert url()(form, DummyField('http://foobar.dk/')) == None
assert url()(form, DummyField('http://foobar.dk/foobar')) == None
raises(ValidationError, url(), form, DummyField('http://foobar'))
raises(ValidationError, url(), form, DummyField('foobar.dk'))
raises(ValidationError, url(), form, DummyField('http://foobar.12'))
def test_not_empty():
assert not_empty()(form, DummyField('foobar')) == None
raises(ValidationError, not_empty(), form, DummyField(''))
raises(ValidationError, not_empty(), form, DummyField(' '))
def test_ip_address():
assert ip_address(form, DummyField('127.0.0.1')) == None
raises(ValidationError, ip_address, form, DummyField('abc.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('1278.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('127.0.0.abc'))
|
Add first basic unittests using py.test
|
Add first basic unittests using py.test
|
Python
|
bsd-3-clause
|
mobyle2-legacy/WTForms,mobyle2-legacy/WTForms
|
Add first basic unittests using py.test
|
"""
test_validators
~~~~~~~~~~~~~~
Unittests for bundled validators.
:copyright: 2007-2008 by James Crasta, Thomas Johansson.
:license: MIT, see LICENSE.txt for details.
"""
from py.test import raises
from wtforms.validators import ValidationError, length, url, not_empty, email, ip_address
class DummyForm(object):
pass
class DummyField(object):
def __init__(self, data):
self.data = data
form = DummyForm()
def test_email():
assert email(form, DummyField('foo@bar.dk')) == None
assert email(form, DummyField('123@bar.dk')) == None
assert email(form, DummyField('foo@456.dk')) == None
assert email(form, DummyField('foo@bar456.info')) == None
raises(ValidationError, email, form, DummyField('foo')) == None
raises(ValidationError, email, form, DummyField('bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@')) == None
raises(ValidationError, email, form, DummyField('@bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@bar')) == None
raises(ValidationError, email, form, DummyField('foo@bar.ab12')) == None
raises(ValidationError, email, form, DummyField('foo@bar.abcde')) == None
def test_length():
field = DummyField('foobar')
assert length(min=2, max=6)(form, field) == None
raises(ValidationError, length(min=7), form, field)
raises(ValidationError, length(max=5), form, field)
def test_url():
assert url()(form, DummyField('http://foobar.dk')) == None
assert url()(form, DummyField('http://foobar.dk/')) == None
assert url()(form, DummyField('http://foobar.dk/foobar')) == None
raises(ValidationError, url(), form, DummyField('http://foobar'))
raises(ValidationError, url(), form, DummyField('foobar.dk'))
raises(ValidationError, url(), form, DummyField('http://foobar.12'))
def test_not_empty():
assert not_empty()(form, DummyField('foobar')) == None
raises(ValidationError, not_empty(), form, DummyField(''))
raises(ValidationError, not_empty(), form, DummyField(' '))
def test_ip_address():
assert ip_address(form, DummyField('127.0.0.1')) == None
raises(ValidationError, ip_address, form, DummyField('abc.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('1278.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('127.0.0.abc'))
|
<commit_before><commit_msg>Add first basic unittests using py.test<commit_after>
|
"""
test_validators
~~~~~~~~~~~~~~
Unittests for bundled validators.
:copyright: 2007-2008 by James Crasta, Thomas Johansson.
:license: MIT, see LICENSE.txt for details.
"""
from py.test import raises
from wtforms.validators import ValidationError, length, url, not_empty, email, ip_address
class DummyForm(object):
pass
class DummyField(object):
def __init__(self, data):
self.data = data
form = DummyForm()
def test_email():
assert email(form, DummyField('foo@bar.dk')) == None
assert email(form, DummyField('123@bar.dk')) == None
assert email(form, DummyField('foo@456.dk')) == None
assert email(form, DummyField('foo@bar456.info')) == None
raises(ValidationError, email, form, DummyField('foo')) == None
raises(ValidationError, email, form, DummyField('bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@')) == None
raises(ValidationError, email, form, DummyField('@bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@bar')) == None
raises(ValidationError, email, form, DummyField('foo@bar.ab12')) == None
raises(ValidationError, email, form, DummyField('foo@bar.abcde')) == None
def test_length():
field = DummyField('foobar')
assert length(min=2, max=6)(form, field) == None
raises(ValidationError, length(min=7), form, field)
raises(ValidationError, length(max=5), form, field)
def test_url():
assert url()(form, DummyField('http://foobar.dk')) == None
assert url()(form, DummyField('http://foobar.dk/')) == None
assert url()(form, DummyField('http://foobar.dk/foobar')) == None
raises(ValidationError, url(), form, DummyField('http://foobar'))
raises(ValidationError, url(), form, DummyField('foobar.dk'))
raises(ValidationError, url(), form, DummyField('http://foobar.12'))
def test_not_empty():
assert not_empty()(form, DummyField('foobar')) == None
raises(ValidationError, not_empty(), form, DummyField(''))
raises(ValidationError, not_empty(), form, DummyField(' '))
def test_ip_address():
assert ip_address(form, DummyField('127.0.0.1')) == None
raises(ValidationError, ip_address, form, DummyField('abc.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('1278.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('127.0.0.abc'))
|
Add first basic unittests using py.test"""
test_validators
~~~~~~~~~~~~~~
Unittests for bundled validators.
:copyright: 2007-2008 by James Crasta, Thomas Johansson.
:license: MIT, see LICENSE.txt for details.
"""
from py.test import raises
from wtforms.validators import ValidationError, length, url, not_empty, email, ip_address
class DummyForm(object):
pass
class DummyField(object):
def __init__(self, data):
self.data = data
form = DummyForm()
def test_email():
assert email(form, DummyField('foo@bar.dk')) == None
assert email(form, DummyField('123@bar.dk')) == None
assert email(form, DummyField('foo@456.dk')) == None
assert email(form, DummyField('foo@bar456.info')) == None
raises(ValidationError, email, form, DummyField('foo')) == None
raises(ValidationError, email, form, DummyField('bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@')) == None
raises(ValidationError, email, form, DummyField('@bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@bar')) == None
raises(ValidationError, email, form, DummyField('foo@bar.ab12')) == None
raises(ValidationError, email, form, DummyField('foo@bar.abcde')) == None
def test_length():
field = DummyField('foobar')
assert length(min=2, max=6)(form, field) == None
raises(ValidationError, length(min=7), form, field)
raises(ValidationError, length(max=5), form, field)
def test_url():
assert url()(form, DummyField('http://foobar.dk')) == None
assert url()(form, DummyField('http://foobar.dk/')) == None
assert url()(form, DummyField('http://foobar.dk/foobar')) == None
raises(ValidationError, url(), form, DummyField('http://foobar'))
raises(ValidationError, url(), form, DummyField('foobar.dk'))
raises(ValidationError, url(), form, DummyField('http://foobar.12'))
def test_not_empty():
assert not_empty()(form, DummyField('foobar')) == None
raises(ValidationError, not_empty(), form, DummyField(''))
raises(ValidationError, not_empty(), form, DummyField(' '))
def test_ip_address():
assert ip_address(form, DummyField('127.0.0.1')) == None
raises(ValidationError, ip_address, form, DummyField('abc.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('1278.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('127.0.0.abc'))
|
<commit_before><commit_msg>Add first basic unittests using py.test<commit_after>"""
test_validators
~~~~~~~~~~~~~~
Unittests for bundled validators.
:copyright: 2007-2008 by James Crasta, Thomas Johansson.
:license: MIT, see LICENSE.txt for details.
"""
from py.test import raises
from wtforms.validators import ValidationError, length, url, not_empty, email, ip_address
class DummyForm(object):
pass
class DummyField(object):
def __init__(self, data):
self.data = data
form = DummyForm()
def test_email():
assert email(form, DummyField('foo@bar.dk')) == None
assert email(form, DummyField('123@bar.dk')) == None
assert email(form, DummyField('foo@456.dk')) == None
assert email(form, DummyField('foo@bar456.info')) == None
raises(ValidationError, email, form, DummyField('foo')) == None
raises(ValidationError, email, form, DummyField('bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@')) == None
raises(ValidationError, email, form, DummyField('@bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@bar')) == None
raises(ValidationError, email, form, DummyField('foo@bar.ab12')) == None
raises(ValidationError, email, form, DummyField('foo@bar.abcde')) == None
def test_length():
field = DummyField('foobar')
assert length(min=2, max=6)(form, field) == None
raises(ValidationError, length(min=7), form, field)
raises(ValidationError, length(max=5), form, field)
def test_url():
assert url()(form, DummyField('http://foobar.dk')) == None
assert url()(form, DummyField('http://foobar.dk/')) == None
assert url()(form, DummyField('http://foobar.dk/foobar')) == None
raises(ValidationError, url(), form, DummyField('http://foobar'))
raises(ValidationError, url(), form, DummyField('foobar.dk'))
raises(ValidationError, url(), form, DummyField('http://foobar.12'))
def test_not_empty():
assert not_empty()(form, DummyField('foobar')) == None
raises(ValidationError, not_empty(), form, DummyField(''))
raises(ValidationError, not_empty(), form, DummyField(' '))
def test_ip_address():
assert ip_address(form, DummyField('127.0.0.1')) == None
raises(ValidationError, ip_address, form, DummyField('abc.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('1278.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('127.0.0.abc'))
|
|
b0e629cf2451bf2f97d4723ccbdad85889057011
|
EAN/ean_check.py
|
EAN/ean_check.py
|
#!environment python
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import barcode
# Parameters:
file_ean = 'ean.csv'
ean_part = '8000001'
eans = []
double = 0
for ean in open(file_ean, 'r'):
if ean.startswith(ean_gpb):
ean_code = ean[7:12]
if ean_code in eans:
double += 1
else:
eans.append(ean_code)
free = 0
EAN = barcode.get_barcode_class('ean13')
for i in range(1, 100000):
ean_code = '%05d' % i
if ean_code in eans:
continue
free += 1
ean12 = '%s%s' % (ean_part, ean_code)
ean = EAN(ean12)
ean13 = ean.get_fullcode()
print ean13
print 'Used: %s - Double: %s - Free %s' % (
len(eans),
double,
free,
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Add ean check free code
|
Add ean check free code
|
Python
|
agpl-3.0
|
Micronaet/micronaet-script,Micronaet/micronaet-script
|
Add ean check free code
|
#!environment python
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import barcode
# Parameters:
file_ean = 'ean.csv'
ean_part = '8000001'
eans = []
double = 0
for ean in open(file_ean, 'r'):
if ean.startswith(ean_gpb):
ean_code = ean[7:12]
if ean_code in eans:
double += 1
else:
eans.append(ean_code)
free = 0
EAN = barcode.get_barcode_class('ean13')
for i in range(1, 100000):
ean_code = '%05d' % i
if ean_code in eans:
continue
free += 1
ean12 = '%s%s' % (ean_part, ean_code)
ean = EAN(ean12)
ean13 = ean.get_fullcode()
print ean13
print 'Used: %s - Double: %s - Free %s' % (
len(eans),
double,
free,
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
<commit_before><commit_msg>Add ean check free code<commit_after>
|
#!environment python
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import barcode
# Parameters:
file_ean = 'ean.csv'
ean_part = '8000001'
eans = []
double = 0
for ean in open(file_ean, 'r'):
if ean.startswith(ean_gpb):
ean_code = ean[7:12]
if ean_code in eans:
double += 1
else:
eans.append(ean_code)
free = 0
EAN = barcode.get_barcode_class('ean13')
for i in range(1, 100000):
ean_code = '%05d' % i
if ean_code in eans:
continue
free += 1
ean12 = '%s%s' % (ean_part, ean_code)
ean = EAN(ean12)
ean13 = ean.get_fullcode()
print ean13
print 'Used: %s - Double: %s - Free %s' % (
len(eans),
double,
free,
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Add ean check free code#!environment python
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import barcode
# Parameters:
file_ean = 'ean.csv'
ean_part = '8000001'
eans = []
double = 0
for ean in open(file_ean, 'r'):
if ean.startswith(ean_gpb):
ean_code = ean[7:12]
if ean_code in eans:
double += 1
else:
eans.append(ean_code)
free = 0
EAN = barcode.get_barcode_class('ean13')
for i in range(1, 100000):
ean_code = '%05d' % i
if ean_code in eans:
continue
free += 1
ean12 = '%s%s' % (ean_part, ean_code)
ean = EAN(ean12)
ean13 = ean.get_fullcode()
print ean13
print 'Used: %s - Double: %s - Free %s' % (
len(eans),
double,
free,
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
<commit_before><commit_msg>Add ean check free code<commit_after>#!environment python
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import barcode
# Parameters:
file_ean = 'ean.csv'
ean_part = '8000001'
eans = []
double = 0
for ean in open(file_ean, 'r'):
if ean.startswith(ean_gpb):
ean_code = ean[7:12]
if ean_code in eans:
double += 1
else:
eans.append(ean_code)
free = 0
EAN = barcode.get_barcode_class('ean13')
for i in range(1, 100000):
ean_code = '%05d' % i
if ean_code in eans:
continue
free += 1
ean12 = '%s%s' % (ean_part, ean_code)
ean = EAN(ean12)
ean13 = ean.get_fullcode()
print ean13
print 'Used: %s - Double: %s - Free %s' % (
len(eans),
double,
free,
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
e4b99f34c316a36d1fa7adb5ca0cc9c68804708f
|
scripts/netrng-perftest.py
|
scripts/netrng-perftest.py
|
#!/bin/python
import logging
from netrng import NetRNGServer
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
mainHandler = logging.StreamHandler()
mainHandler.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))
log.addHandler(mainHandler)
server = NetRNGServer()
server.calibrate()
|
Add HWRNG performance test script
|
Add HWRNG performance test script
|
Python
|
mit
|
infincia/NetRNG
|
Add HWRNG performance test script
|
#!/bin/python
import logging
from netrng import NetRNGServer
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
mainHandler = logging.StreamHandler()
mainHandler.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))
log.addHandler(mainHandler)
server = NetRNGServer()
server.calibrate()
|
<commit_before><commit_msg>Add HWRNG performance test script<commit_after>
|
#!/bin/python
import logging
from netrng import NetRNGServer
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
mainHandler = logging.StreamHandler()
mainHandler.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))
log.addHandler(mainHandler)
server = NetRNGServer()
server.calibrate()
|
Add HWRNG performance test script#!/bin/python
import logging
from netrng import NetRNGServer
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
mainHandler = logging.StreamHandler()
mainHandler.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))
log.addHandler(mainHandler)
server = NetRNGServer()
server.calibrate()
|
<commit_before><commit_msg>Add HWRNG performance test script<commit_after>#!/bin/python
import logging
from netrng import NetRNGServer
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
mainHandler = logging.StreamHandler()
mainHandler.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))
log.addHandler(mainHandler)
server = NetRNGServer()
server.calibrate()
|
|
b06803118b6ec9b7cb3d5b262e4d09d0492cc588
|
tests/testcoverage.py
|
tests/testcoverage.py
|
from glyphsets.codepoints import CodepointsInSubset
from fontTools.unicodedata.Scripts import NAMES
import pytest
import unicodedata
from collections import defaultdict
import warnings
import sys
try:
import gflanguages
except Exception as e:
pytest.skip(
"Coverage test requires gflanguages to be installed",
allow_module_level=True,
)
MAGIC_CODEPOINTS = set([0x2010, 0xA])
def test_coverage():
langs = gflanguages.LoadLanguages()
missing = defaultdict(set)
bad_langs = defaultdict(list)
failed = False
for langname, metadata in langs.items():
cps_used_in_sample = set()
for _, sample in metadata.sample_text.ListFields():
for cp in sample:
cps_used_in_sample.add(ord(cp))
lang, script = langname.split("_")
# Get Unicode name for script
if script not in NAMES:
continue
namefile = NAMES[script].lower().replace(" ", "-").replace("_", "-")
cps_in_subset = CodepointsInSubset(namefile, unique_glyphs=False)
cps_in_subset |= MAGIC_CODEPOINTS
if not cps_in_subset:
warnings.warn(f"No codepoints for {langname}")
failed = True
cps_in_subset = set(cps_in_subset)
cps_not_available = cps_used_in_sample - cps_in_subset
if cps_not_available:
failed = True
missing[namefile] |= cps_not_available
bad_langs[namefile].append(langname)
if not failed:
return
for namefile, missing_cps in missing.items():
print(
f"\n{namefile} missing the following codepoints to render samples "
f"in {', '.join(bad_langs[namefile])}:\n",
file=sys.stderr,
)
for x in missing_cps:
print(
"0x%04X %s %s" % (x, chr(x), unicodedata.name(chr(x))), file=sys.stderr
)
assert False, "Coverage test failed"
|
Add a coverage test using the sample text files in gflanguages
|
Add a coverage test using the sample text files in gflanguages
|
Python
|
apache-2.0
|
googlefonts/glyphsets,googlefonts/glyphsets
|
Add a coverage test using the sample text files in gflanguages
|
from glyphsets.codepoints import CodepointsInSubset
from fontTools.unicodedata.Scripts import NAMES
import pytest
import unicodedata
from collections import defaultdict
import warnings
import sys
try:
import gflanguages
except Exception as e:
pytest.skip(
"Coverage test requires gflanguages to be installed",
allow_module_level=True,
)
MAGIC_CODEPOINTS = set([0x2010, 0xA])
def test_coverage():
langs = gflanguages.LoadLanguages()
missing = defaultdict(set)
bad_langs = defaultdict(list)
failed = False
for langname, metadata in langs.items():
cps_used_in_sample = set()
for _, sample in metadata.sample_text.ListFields():
for cp in sample:
cps_used_in_sample.add(ord(cp))
lang, script = langname.split("_")
# Get Unicode name for script
if script not in NAMES:
continue
namefile = NAMES[script].lower().replace(" ", "-").replace("_", "-")
cps_in_subset = CodepointsInSubset(namefile, unique_glyphs=False)
cps_in_subset |= MAGIC_CODEPOINTS
if not cps_in_subset:
warnings.warn(f"No codepoints for {langname}")
failed = True
cps_in_subset = set(cps_in_subset)
cps_not_available = cps_used_in_sample - cps_in_subset
if cps_not_available:
failed = True
missing[namefile] |= cps_not_available
bad_langs[namefile].append(langname)
if not failed:
return
for namefile, missing_cps in missing.items():
print(
f"\n{namefile} missing the following codepoints to render samples "
f"in {', '.join(bad_langs[namefile])}:\n",
file=sys.stderr,
)
for x in missing_cps:
print(
"0x%04X %s %s" % (x, chr(x), unicodedata.name(chr(x))), file=sys.stderr
)
assert False, "Coverage test failed"
|
<commit_before><commit_msg>Add a coverage test using the sample text files in gflanguages<commit_after>
|
from glyphsets.codepoints import CodepointsInSubset
from fontTools.unicodedata.Scripts import NAMES
import pytest
import unicodedata
from collections import defaultdict
import warnings
import sys
try:
import gflanguages
except Exception as e:
pytest.skip(
"Coverage test requires gflanguages to be installed",
allow_module_level=True,
)
MAGIC_CODEPOINTS = set([0x2010, 0xA])
def test_coverage():
langs = gflanguages.LoadLanguages()
missing = defaultdict(set)
bad_langs = defaultdict(list)
failed = False
for langname, metadata in langs.items():
cps_used_in_sample = set()
for _, sample in metadata.sample_text.ListFields():
for cp in sample:
cps_used_in_sample.add(ord(cp))
lang, script = langname.split("_")
# Get Unicode name for script
if script not in NAMES:
continue
namefile = NAMES[script].lower().replace(" ", "-").replace("_", "-")
cps_in_subset = CodepointsInSubset(namefile, unique_glyphs=False)
cps_in_subset |= MAGIC_CODEPOINTS
if not cps_in_subset:
warnings.warn(f"No codepoints for {langname}")
failed = True
cps_in_subset = set(cps_in_subset)
cps_not_available = cps_used_in_sample - cps_in_subset
if cps_not_available:
failed = True
missing[namefile] |= cps_not_available
bad_langs[namefile].append(langname)
if not failed:
return
for namefile, missing_cps in missing.items():
print(
f"\n{namefile} missing the following codepoints to render samples "
f"in {', '.join(bad_langs[namefile])}:\n",
file=sys.stderr,
)
for x in missing_cps:
print(
"0x%04X %s %s" % (x, chr(x), unicodedata.name(chr(x))), file=sys.stderr
)
assert False, "Coverage test failed"
|
Add a coverage test using the sample text files in gflanguagesfrom glyphsets.codepoints import CodepointsInSubset
from fontTools.unicodedata.Scripts import NAMES
import pytest
import unicodedata
from collections import defaultdict
import warnings
import sys
try:
import gflanguages
except Exception as e:
pytest.skip(
"Coverage test requires gflanguages to be installed",
allow_module_level=True,
)
MAGIC_CODEPOINTS = set([0x2010, 0xA])
def test_coverage():
langs = gflanguages.LoadLanguages()
missing = defaultdict(set)
bad_langs = defaultdict(list)
failed = False
for langname, metadata in langs.items():
cps_used_in_sample = set()
for _, sample in metadata.sample_text.ListFields():
for cp in sample:
cps_used_in_sample.add(ord(cp))
lang, script = langname.split("_")
# Get Unicode name for script
if script not in NAMES:
continue
namefile = NAMES[script].lower().replace(" ", "-").replace("_", "-")
cps_in_subset = CodepointsInSubset(namefile, unique_glyphs=False)
cps_in_subset |= MAGIC_CODEPOINTS
if not cps_in_subset:
warnings.warn(f"No codepoints for {langname}")
failed = True
cps_in_subset = set(cps_in_subset)
cps_not_available = cps_used_in_sample - cps_in_subset
if cps_not_available:
failed = True
missing[namefile] |= cps_not_available
bad_langs[namefile].append(langname)
if not failed:
return
for namefile, missing_cps in missing.items():
print(
f"\n{namefile} missing the following codepoints to render samples "
f"in {', '.join(bad_langs[namefile])}:\n",
file=sys.stderr,
)
for x in missing_cps:
print(
"0x%04X %s %s" % (x, chr(x), unicodedata.name(chr(x))), file=sys.stderr
)
assert False, "Coverage test failed"
|
<commit_before><commit_msg>Add a coverage test using the sample text files in gflanguages<commit_after>from glyphsets.codepoints import CodepointsInSubset
from fontTools.unicodedata.Scripts import NAMES
import pytest
import unicodedata
from collections import defaultdict
import warnings
import sys
try:
import gflanguages
except Exception as e:
pytest.skip(
"Coverage test requires gflanguages to be installed",
allow_module_level=True,
)
MAGIC_CODEPOINTS = set([0x2010, 0xA])
def test_coverage():
langs = gflanguages.LoadLanguages()
missing = defaultdict(set)
bad_langs = defaultdict(list)
failed = False
for langname, metadata in langs.items():
cps_used_in_sample = set()
for _, sample in metadata.sample_text.ListFields():
for cp in sample:
cps_used_in_sample.add(ord(cp))
lang, script = langname.split("_")
# Get Unicode name for script
if script not in NAMES:
continue
namefile = NAMES[script].lower().replace(" ", "-").replace("_", "-")
cps_in_subset = CodepointsInSubset(namefile, unique_glyphs=False)
cps_in_subset |= MAGIC_CODEPOINTS
if not cps_in_subset:
warnings.warn(f"No codepoints for {langname}")
failed = True
cps_in_subset = set(cps_in_subset)
cps_not_available = cps_used_in_sample - cps_in_subset
if cps_not_available:
failed = True
missing[namefile] |= cps_not_available
bad_langs[namefile].append(langname)
if not failed:
return
for namefile, missing_cps in missing.items():
print(
f"\n{namefile} missing the following codepoints to render samples "
f"in {', '.join(bad_langs[namefile])}:\n",
file=sys.stderr,
)
for x in missing_cps:
print(
"0x%04X %s %s" % (x, chr(x), unicodedata.name(chr(x))), file=sys.stderr
)
assert False, "Coverage test failed"
|
|
d0ddbbec91f37c1e2a5c5e429c3586c57c584f8f
|
dp/fibonacci_number/fibonacci_number_dp.py
|
dp/fibonacci_number/fibonacci_number_dp.py
|
# computes the n_th number of the fibonacci sequence
# with the help of dynamic programming to avoid recursive recomputations
def dyn_fib(n, memo):
if n < 1:
return 0
if n == 1:
return 1
if n in memo:
return memo[n]
memo[n] = dyn_fib(n-1, memo) + dyn_fib(n-2, memo)
return memo[n]
def test():
num = 500
assert dyn_fib(num, {}) == 139423224561697880139724382870407283950070256587697307264108962948325571622863290691557658876222521294125
if __name__ == "__main__":
test()
|
Add fibonacci nth num computation w/ DP in python
|
Add fibonacci nth num computation w/ DP in python
|
Python
|
cc0-1.0
|
ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms
|
Add fibonacci nth num computation w/ DP in python
|
# computes the n_th number of the fibonacci sequence
# with the help of dynamic programming to avoid recursive recomputations
def dyn_fib(n, memo):
if n < 1:
return 0
if n == 1:
return 1
if n in memo:
return memo[n]
memo[n] = dyn_fib(n-1, memo) + dyn_fib(n-2, memo)
return memo[n]
def test():
num = 500
assert dyn_fib(num, {}) == 139423224561697880139724382870407283950070256587697307264108962948325571622863290691557658876222521294125
if __name__ == "__main__":
test()
|
<commit_before><commit_msg>Add fibonacci nth num computation w/ DP in python<commit_after>
|
# computes the n_th number of the fibonacci sequence
# with the help of dynamic programming to avoid recursive recomputations
def dyn_fib(n, memo):
if n < 1:
return 0
if n == 1:
return 1
if n in memo:
return memo[n]
memo[n] = dyn_fib(n-1, memo) + dyn_fib(n-2, memo)
return memo[n]
def test():
num = 500
assert dyn_fib(num, {}) == 139423224561697880139724382870407283950070256587697307264108962948325571622863290691557658876222521294125
if __name__ == "__main__":
test()
|
Add fibonacci nth num computation w/ DP in python# computes the n_th number of the fibonacci sequence
# with the help of dynamic programming to avoid recursive recomputations
def dyn_fib(n, memo):
if n < 1:
return 0
if n == 1:
return 1
if n in memo:
return memo[n]
memo[n] = dyn_fib(n-1, memo) + dyn_fib(n-2, memo)
return memo[n]
def test():
num = 500
assert dyn_fib(num, {}) == 139423224561697880139724382870407283950070256587697307264108962948325571622863290691557658876222521294125
if __name__ == "__main__":
test()
|
<commit_before><commit_msg>Add fibonacci nth num computation w/ DP in python<commit_after># computes the n_th number of the fibonacci sequence
# with the help of dynamic programming to avoid recursive recomputations
def dyn_fib(n, memo):
if n < 1:
return 0
if n == 1:
return 1
if n in memo:
return memo[n]
memo[n] = dyn_fib(n-1, memo) + dyn_fib(n-2, memo)
return memo[n]
def test():
num = 500
assert dyn_fib(num, {}) == 139423224561697880139724382870407283950070256587697307264108962948325571622863290691557658876222521294125
if __name__ == "__main__":
test()
|
|
a03a639ccef6d837a05e22489fe324572b303cd4
|
zephyr/management/commands/colorize_streams.py
|
zephyr/management/commands/colorize_streams.py
|
from optparse import make_option
from django.core.management.base import BaseCommand
from django.db.models import Count
from zephyr.models import Realm, StreamColor, Stream, UserProfile, Subscription, \
Message, Recipient
class Command(BaseCommand):
help = """Colorize streams in a realm for people who have not already colored their streams."""
option_list = BaseCommand.option_list + (
make_option('-d', '--domain',
dest='domain',
type='str',
help='The name of the realm in which you are colorizing streams.'),
)
def handle(self, **options):
if options["domain"] is None:
self.print_help("python manage.py", "colorize_streams")
exit(1)
realm = Realm.objects.get(domain=options["domain"])
user_profiles = UserProfile.objects.filter(realm=realm)
users_who_need_colors = filter(lambda profile: StreamColor.objects.filter(
subscription__user_profile=profile).count() == 0, user_profiles)
# Hand-selected colors from the current swatch options,
# providing reasonable contrast for 1 - 7 streams.
colors = [
"#76ce90", # light forest green
"#f5ce6e", # goldenrod
"#a6c7e5", # light blue
"#b0a5fd", # volet
"#e79ab5", # pink
"#bfd56f", # greenish-yellow
"#f4ae55", # orange
]
print "Setting stream colors for:"
for user_profile in users_who_need_colors:
print " ", user_profile.full_name
stream_ids = [result['recipient__type_id'] for result in Message.objects.filter(
sender__realm=realm).values('recipient__type_id').annotate(
count=Count('recipient__type_id')).order_by('-count')]
print "Setting color for:"
for stream_id, color in zip(stream_ids, colors):
# Give everyone the same color for a stream.
print " ", Stream.objects.get(id=stream_id).name
# If this realm has more streams than preselected colors,
# only color the N most popular.
recipient = Recipient.objects.get(type=Recipient.STREAM, type_id=stream_id)
for user_profile in users_who_need_colors:
try:
subscription = Subscription.objects.get(user_profile=user_profile,
recipient=recipient)
except Subscription.DoesNotExist:
# Not subscribed
continue
StreamColor(subscription=subscription, color=color).save()
|
Add a management script to set stream colors for a domain.
|
Add a management script to set stream colors for a domain.
(imported from commit 186e8226b57d385bbbed756615c0c63315c9d463)
|
Python
|
apache-2.0
|
Gabriel0402/zulip,fw1121/zulip,punchagan/zulip,easyfmxu/zulip,brainwane/zulip,zofuthan/zulip,PaulPetring/zulip,swinghu/zulip,jonesgithub/zulip,shubhamdhama/zulip,susansls/zulip,yuvipanda/zulip,wangdeshui/zulip,lfranchi/zulip,DazWorrall/zulip,he15his/zulip,atomic-labs/zulip,atomic-labs/zulip,hayderimran7/zulip,eeshangarg/zulip,mohsenSy/zulip,praveenaki/zulip,sonali0901/zulip,littledogboy/zulip,hustlzp/zulip,gkotian/zulip,umkay/zulip,voidException/zulip,ApsOps/zulip,jeffcao/zulip,tdr130/zulip,dnmfarrell/zulip,hackerkid/zulip,isht3/zulip,hj3938/zulip,qq1012803704/zulip,brainwane/zulip,punchagan/zulip,noroot/zulip,nicholasbs/zulip,Drooids/zulip,yuvipanda/zulip,avastu/zulip,tommyip/zulip,lfranchi/zulip,grave-w-grave/zulip,voidException/zulip,dattatreya303/zulip,qq1012803704/zulip,ipernet/zulip,guiquanz/zulip,showell/zulip,themass/zulip,saitodisse/zulip,kaiyuanheshang/zulip,kaiyuanheshang/zulip,technicalpickles/zulip,eeshangarg/zulip,Drooids/zulip,nicholasbs/zulip,kaiyuanheshang/zulip,dwrpayne/zulip,amallia/zulip,yocome/zulip,moria/zulip,bitemyapp/zulip,LeeRisk/zulip,rht/zulip,kokoar/zulip,Gabriel0402/zulip,Qgap/zulip,qq1012803704/zulip,saitodisse/zulip,Diptanshu8/zulip,tbutter/zulip,wavelets/zulip,johnny9/zulip,jrowan/zulip,tbutter/zulip,Drooids/zulip,glovebx/zulip,mahim97/zulip,developerfm/zulip,sonali0901/zulip,Juanvulcano/zulip,m1ssou/zulip,jimmy54/zulip,tdr130/zulip,shubhamdhama/zulip,wweiradio/zulip,vikas-parashar/zulip,alliejones/zulip,gkotian/zulip,shrikrishnaholla/zulip,peguin40/zulip,pradiptad/zulip,avastu/zulip,bitemyapp/zulip,j831/zulip,Jianchun1/zulip,sonali0901/zulip,krtkmj/zulip,tiansiyuan/zulip,ahmadassaf/zulip,reyha/zulip,bastianh/zulip,shaunstanislaus/zulip,verma-varsha/zulip,zofuthan/zulip,jainayush975/zulip,akuseru/zulip,themass/zulip,wweiradio/zulip,armooo/zulip,udxxabp/zulip,ipernet/zulip,zulip/zulip,mdavid/zulip,armooo/zulip,aliceriot/zulip,niftynei/zulip,udxxabp/zulip,bluesea/zulip,wangdeshui/zulip,vabs22/zulip,moria/zulip,aliceriot/zulip,hustlzp/zulip,wweiradio/zulip,JPJPJPOPOP/zulip,gigawhitlocks/zulip,samatdav/zulip,PhilSk/zulip,bssrdf/zulip,hayderimran7/zulip,voidException/zulip,shrikrishnaholla/zulip,vakila/zulip,developerfm/zulip,Suninus/zulip,amyliu345/zulip,jrowan/zulip,lfranchi/zulip,willingc/zulip,technicalpickles/zulip,LeeRisk/zulip,peiwei/zulip,MayB/zulip,arpitpanwar/zulip,Batterfii/zulip,esander91/zulip,levixie/zulip,xuanhan863/zulip,amyliu345/zulip,mohsenSy/zulip,umkay/zulip,schatt/zulip,jimmy54/zulip,amallia/zulip,atomic-labs/zulip,wdaher/zulip,joyhchen/zulip,peguin40/zulip,mansilladev/zulip,sharmaeklavya2/zulip,gigawhitlocks/zulip,themass/zulip,adnanh/zulip,LeeRisk/zulip,so0k/zulip,blaze225/zulip,amanharitsh123/zulip,wweiradio/zulip,zulip/zulip,Qgap/zulip,dattatreya303/zulip,xuanhan863/zulip,brockwhittaker/zulip,bastianh/zulip,shubhamdhama/zulip,alliejones/zulip,dotcool/zulip,gkotian/zulip,arpith/zulip,shrikrishnaholla/zulip,ufosky-server/zulip,saitodisse/zulip,jackrzhang/zulip,swinghu/zulip,punchagan/zulip,ryansnowboarder/zulip,calvinleenyc/zulip,jphilipsen05/zulip,udxxabp/zulip,alliejones/zulip,bitemyapp/zulip,wavelets/zulip,isht3/zulip,suxinde2009/zulip,gigawhitlocks/zulip,easyfmxu/zulip,gigawhitlocks/zulip,avastu/zulip,souravbadami/zulip,gkotian/zulip,peguin40/zulip,zorojean/zulip,luyifan/zulip,dawran6/zulip,lfranchi/zulip,PhilSk/zulip,swinghu/zulip,firstblade/zulip,Galexrt/zulip,isht3/zulip,krtkmj/zulip,easyfmxu/zulip,moria/zulip,amallia/zulip,akuseru/zulip,udxxabp/zulip,gkotian/zulip,thomasboyt/zulip,grave-w-grave/zulip,amanharitsh123/zulip,showell/zulip,akuseru/zulip,punchagan/zulip,Gabriel0402/zulip,brockwhittaker/zulip,hj3938/zulip,bastianh/zulip,eastlhu/zulip,voidException/zulip,kou/zulip,noroot/zulip,paxapy/zulip,hj3938/zulip,levixie/zulip,wavelets/zulip,armooo/zulip,dattatreya303/zulip,littledogboy/zulip,ryansnowboarder/zulip,Batterfii/zulip,tommyip/zulip,suxinde2009/zulip,gkotian/zulip,ericzhou2008/zulip,bluesea/zulip,amyliu345/zulip,udxxabp/zulip,levixie/zulip,karamcnair/zulip,mohsenSy/zulip,thomasboyt/zulip,dattatreya303/zulip,joshisa/zulip,guiquanz/zulip,voidException/zulip,PaulPetring/zulip,ryanbackman/zulip,brainwane/zulip,schatt/zulip,ericzhou2008/zulip,m1ssou/zulip,zacps/zulip,kokoar/zulip,jerryge/zulip,guiquanz/zulip,AZtheAsian/zulip,ikasumiwt/zulip,armooo/zulip,joshisa/zulip,dnmfarrell/zulip,bowlofstew/zulip,JanzTam/zulip,tdr130/zulip,suxinde2009/zulip,KJin99/zulip,wavelets/zulip,joshisa/zulip,bssrdf/zulip,hayderimran7/zulip,easyfmxu/zulip,seapasulli/zulip,umkay/zulip,wavelets/zulip,hayderimran7/zulip,willingc/zulip,timabbott/zulip,technicalpickles/zulip,brockwhittaker/zulip,hayderimran7/zulip,LeeRisk/zulip,wangdeshui/zulip,codeKonami/zulip,amallia/zulip,kou/zulip,Jianchun1/zulip,sup95/zulip,jimmy54/zulip,dawran6/zulip,sharmaeklavya2/zulip,qq1012803704/zulip,pradiptad/zulip,zachallaun/zulip,ipernet/zulip,dawran6/zulip,SmartPeople/zulip,esander91/zulip,jeffcao/zulip,Batterfii/zulip,glovebx/zulip,vakila/zulip,jeffcao/zulip,peiwei/zulip,MariaFaBella85/zulip,ryanbackman/zulip,ahmadassaf/zulip,Juanvulcano/zulip,KJin99/zulip,nicholasbs/zulip,itnihao/zulip,Drooids/zulip,xuxiao/zulip,babbage/zulip,Gabriel0402/zulip,ahmadassaf/zulip,lfranchi/zulip,dwrpayne/zulip,jackrzhang/zulip,aakash-cr7/zulip,jphilipsen05/zulip,EasonYi/zulip,KingxBanana/zulip,TigorC/zulip,rht/zulip,Batterfii/zulip,guiquanz/zulip,shaunstanislaus/zulip,hustlzp/zulip,JanzTam/zulip,xuanhan863/zulip,aps-sids/zulip,EasonYi/zulip,ikasumiwt/zulip,dhcrzf/zulip,LeeRisk/zulip,akuseru/zulip,MayB/zulip,synicalsyntax/zulip,pradiptad/zulip,yocome/zulip,itnihao/zulip,Gabriel0402/zulip,jrowan/zulip,ufosky-server/zulip,johnnygaddarr/zulip,themass/zulip,bssrdf/zulip,mdavid/zulip,saitodisse/zulip,showell/zulip,showell/zulip,stamhe/zulip,jphilipsen05/zulip,amanharitsh123/zulip,m1ssou/zulip,easyfmxu/zulip,ApsOps/zulip,willingc/zulip,wavelets/zulip,aps-sids/zulip,MariaFaBella85/zulip,gigawhitlocks/zulip,saitodisse/zulip,Galexrt/zulip,dxq-git/zulip,RobotCaleb/zulip,stamhe/zulip,Galexrt/zulip,vaidap/zulip,hj3938/zulip,xuxiao/zulip,samatdav/zulip,KingxBanana/zulip,mahim97/zulip,easyfmxu/zulip,Suninus/zulip,natanovia/zulip,praveenaki/zulip,Batterfii/zulip,ryansnowboarder/zulip,yuvipanda/zulip,jimmy54/zulip,cosmicAsymmetry/zulip,vikas-parashar/zulip,atomic-labs/zulip,glovebx/zulip,dawran6/zulip,yocome/zulip,so0k/zulip,ashwinirudrappa/zulip,hafeez3000/zulip,thomasboyt/zulip,LAndreas/zulip,paxapy/zulip,dhcrzf/zulip,seapasulli/zulip,MayB/zulip,umkay/zulip,technicalpickles/zulip,thomasboyt/zulip,karamcnair/zulip,jrowan/zulip,praveenaki/zulip,zacps/zulip,tiansiyuan/zulip,joyhchen/zulip,LAndreas/zulip,akuseru/zulip,AZtheAsian/zulip,zulip/zulip,brainwane/zulip,esander91/zulip,krtkmj/zulip,Juanvulcano/zulip,eastlhu/zulip,developerfm/zulip,jessedhillon/zulip,mansilladev/zulip,jonesgithub/zulip,andersk/zulip,hackerkid/zulip,avastu/zulip,showell/zulip,amanharitsh123/zulip,niftynei/zulip,hafeez3000/zulip,isht3/zulip,eastlhu/zulip,rishig/zulip,rishig/zulip,jphilipsen05/zulip,fw1121/zulip,akuseru/zulip,mohsenSy/zulip,Galexrt/zulip,itnihao/zulip,joshisa/zulip,cosmicAsymmetry/zulip,deer-hope/zulip,rht/zulip,dwrpayne/zulip,bssrdf/zulip,johnnygaddarr/zulip,bowlofstew/zulip,so0k/zulip,proliming/zulip,kaiyuanheshang/zulip,armooo/zulip,synicalsyntax/zulip,guiquanz/zulip,guiquanz/zulip,firstblade/zulip,ikasumiwt/zulip,ipernet/zulip,zofuthan/zulip,technicalpickles/zulip,hafeez3000/zulip,arpith/zulip,levixie/zulip,Qgap/zulip,proliming/zulip,Diptanshu8/zulip,zhaoweigg/zulip,tdr130/zulip,jphilipsen05/zulip,wangdeshui/zulip,j831/zulip,bastianh/zulip,dnmfarrell/zulip,sup95/zulip,aliceriot/zulip,praveenaki/zulip,fw1121/zulip,dxq-git/zulip,nicholasbs/zulip,ryansnowboarder/zulip,themass/zulip,m1ssou/zulip,levixie/zulip,hustlzp/zulip,jessedhillon/zulip,Diptanshu8/zulip,wangdeshui/zulip,bowlofstew/zulip,JPJPJPOPOP/zulip,adnanh/zulip,fw1121/zulip,zachallaun/zulip,reyha/zulip,SmartPeople/zulip,verma-varsha/zulip,Suninus/zulip,he15his/zulip,seapasulli/zulip,eastlhu/zulip,zorojean/zulip,samatdav/zulip,codeKonami/zulip,luyifan/zulip,Jianchun1/zulip,zwily/zulip,dotcool/zulip,umkay/zulip,mansilladev/zulip,xuxiao/zulip,babbage/zulip,synicalsyntax/zulip,PaulPetring/zulip,calvinleenyc/zulip,shaunstanislaus/zulip,zwily/zulip,dxq-git/zulip,qq1012803704/zulip,johnny9/zulip,jessedhillon/zulip,tommyip/zulip,joshisa/zulip,arpitpanwar/zulip,eeshangarg/zulip,wdaher/zulip,joyhchen/zulip,cosmicAsymmetry/zulip,kokoar/zulip,brockwhittaker/zulip,deer-hope/zulip,sup95/zulip,christi3k/zulip,KJin99/zulip,vaidap/zulip,aliceriot/zulip,synicalsyntax/zulip,xuanhan863/zulip,jonesgithub/zulip,Drooids/zulip,zachallaun/zulip,suxinde2009/zulip,RobotCaleb/zulip,KJin99/zulip,verma-varsha/zulip,grave-w-grave/zulip,firstblade/zulip,jerryge/zulip,tiansiyuan/zulip,johnnygaddarr/zulip,babbage/zulip,ApsOps/zulip,Diptanshu8/zulip,souravbadami/zulip,huangkebo/zulip,jerryge/zulip,zwily/zulip,littledogboy/zulip,hengqujushi/zulip,susansls/zulip,xuxiao/zulip,JPJPJPOPOP/zulip,babbage/zulip,karamcnair/zulip,calvinleenyc/zulip,pradiptad/zulip,Frouk/zulip,blaze225/zulip,AZtheAsian/zulip,alliejones/zulip,tbutter/zulip,huangkebo/zulip,mansilladev/zulip,alliejones/zulip,noroot/zulip,bowlofstew/zulip,bitemyapp/zulip,ryanbackman/zulip,adnanh/zulip,zofuthan/zulip,aakash-cr7/zulip,vaidap/zulip,bastianh/zulip,vabs22/zulip,johnnygaddarr/zulip,Frouk/zulip,qq1012803704/zulip,yocome/zulip,jonesgithub/zulip,amyliu345/zulip,PaulPetring/zulip,developerfm/zulip,stamhe/zulip,Vallher/zulip,deer-hope/zulip,noroot/zulip,dawran6/zulip,jeffcao/zulip,tommyip/zulip,mahim97/zulip,dwrpayne/zulip,dhcrzf/zulip,shrikrishnaholla/zulip,Jianchun1/zulip,dotcool/zulip,tommyip/zulip,joyhchen/zulip,moria/zulip,kou/zulip,jrowan/zulip,suxinde2009/zulip,punchagan/zulip,Cheppers/zulip,tdr130/zulip,dnmfarrell/zulip,wdaher/zulip,Suninus/zulip,AZtheAsian/zulip,codeKonami/zulip,proliming/zulip,shubhamdhama/zulip,seapasulli/zulip,voidException/zulip,m1ssou/zulip,technicalpickles/zulip,jimmy54/zulip,dxq-git/zulip,bitemyapp/zulip,AZtheAsian/zulip,vakila/zulip,SmartPeople/zulip,mdavid/zulip,jonesgithub/zulip,jphilipsen05/zulip,dnmfarrell/zulip,jackrzhang/zulip,adnanh/zulip,akuseru/zulip,blaze225/zulip,PhilSk/zulip,zachallaun/zulip,PaulPetring/zulip,thomasboyt/zulip,jerryge/zulip,wdaher/zulip,mahim97/zulip,brockwhittaker/zulip,nicholasbs/zulip,xuxiao/zulip,brainwane/zulip,brainwane/zulip,zachallaun/zulip,armooo/zulip,willingc/zulip,karamcnair/zulip,schatt/zulip,eeshangarg/zulip,ApsOps/zulip,dhcrzf/zulip,huangkebo/zulip,hafeez3000/zulip,timabbott/zulip,ryansnowboarder/zulip,shrikrishnaholla/zulip,alliejones/zulip,samatdav/zulip,tommyip/zulip,hayderimran7/zulip,ApsOps/zulip,hj3938/zulip,luyifan/zulip,jerryge/zulip,xuanhan863/zulip,eeshangarg/zulip,zacps/zulip,peguin40/zulip,zhaoweigg/zulip,verma-varsha/zulip,Batterfii/zulip,vikas-parashar/zulip,joshisa/zulip,mdavid/zulip,zulip/zulip,KingxBanana/zulip,ashwinirudrappa/zulip,jainayush975/zulip,stamhe/zulip,LeeRisk/zulip,mansilladev/zulip,alliejones/zulip,proliming/zulip,vakila/zulip,Cheppers/zulip,developerfm/zulip,LAndreas/zulip,so0k/zulip,rht/zulip,synicalsyntax/zulip,xuanhan863/zulip,vabs22/zulip,bssrdf/zulip,itnihao/zulip,natanovia/zulip,dxq-git/zulip,yocome/zulip,ahmadassaf/zulip,huangkebo/zulip,eastlhu/zulip,isht3/zulip,amanharitsh123/zulip,hackerkid/zulip,ahmadassaf/zulip,MariaFaBella85/zulip,tommyip/zulip,jessedhillon/zulip,timabbott/zulip,dotcool/zulip,arpitpanwar/zulip,bluesea/zulip,developerfm/zulip,zulip/zulip,Qgap/zulip,seapasulli/zulip,Galexrt/zulip,Juanvulcano/zulip,jessedhillon/zulip,JanzTam/zulip,praveenaki/zulip,jackrzhang/zulip,sup95/zulip,LAndreas/zulip,avastu/zulip,Vallher/zulip,saitodisse/zulip,MayB/zulip,glovebx/zulip,hackerkid/zulip,he15his/zulip,souravbadami/zulip,KingxBanana/zulip,moria/zulip,kaiyuanheshang/zulip,ahmadassaf/zulip,jonesgithub/zulip,kokoar/zulip,MariaFaBella85/zulip,grave-w-grave/zulip,thomasboyt/zulip,MariaFaBella85/zulip,jackrzhang/zulip,rishig/zulip,aliceriot/zulip,verma-varsha/zulip,jeffcao/zulip,aps-sids/zulip,christi3k/zulip,deer-hope/zulip,susansls/zulip,eastlhu/zulip,voidException/zulip,amyliu345/zulip,ryanbackman/zulip,johnny9/zulip,punchagan/zulip,paxapy/zulip,esander91/zulip,PhilSk/zulip,andersk/zulip,jimmy54/zulip,yocome/zulip,tiansiyuan/zulip,nicholasbs/zulip,arpith/zulip,karamcnair/zulip,stamhe/zulip,schatt/zulip,timabbott/zulip,vaidap/zulip,deer-hope/zulip,vaidap/zulip,adnanh/zulip,huangkebo/zulip,zacps/zulip,niftynei/zulip,aps-sids/zulip,shaunstanislaus/zulip,dattatreya303/zulip,synicalsyntax/zulip,shubhamdhama/zulip,shrikrishnaholla/zulip,zhaoweigg/zulip,arpitpanwar/zulip,grave-w-grave/zulip,jessedhillon/zulip,he15his/zulip,ericzhou2008/zulip,luyifan/zulip,ericzhou2008/zulip,peguin40/zulip,christi3k/zulip,andersk/zulip,bowlofstew/zulip,Jianchun1/zulip,zorojean/zulip,guiquanz/zulip,rishig/zulip,JanzTam/zulip,moria/zulip,samatdav/zulip,umkay/zulip,paxapy/zulip,stamhe/zulip,natanovia/zulip,schatt/zulip,wweiradio/zulip,hackerkid/zulip,zorojean/zulip,technicalpickles/zulip,natanovia/zulip,bssrdf/zulip,andersk/zulip,zachallaun/zulip,mansilladev/zulip,hustlzp/zulip,vabs22/zulip,jeffcao/zulip,aakash-cr7/zulip,hengqujushi/zulip,Drooids/zulip,jrowan/zulip,reyha/zulip,kou/zulip,rht/zulip,Qgap/zulip,dwrpayne/zulip,JanzTam/zulip,dhcrzf/zulip,xuanhan863/zulip,gigawhitlocks/zulip,susansls/zulip,TigorC/zulip,aliceriot/zulip,JPJPJPOPOP/zulip,PaulPetring/zulip,noroot/zulip,glovebx/zulip,Cheppers/zulip,Galexrt/zulip,karamcnair/zulip,hustlzp/zulip,johnny9/zulip,jerryge/zulip,LAndreas/zulip,proliming/zulip,reyha/zulip,SmartPeople/zulip,dhcrzf/zulip,pradiptad/zulip,dnmfarrell/zulip,arpitpanwar/zulip,aakash-cr7/zulip,jeffcao/zulip,ashwinirudrappa/zulip,arpitpanwar/zulip,rishig/zulip,udxxabp/zulip,luyifan/zulip,hengqujushi/zulip,natanovia/zulip,susansls/zulip,firstblade/zulip,dawran6/zulip,m1ssou/zulip,gkotian/zulip,Frouk/zulip,krtkmj/zulip,huangkebo/zulip,praveenaki/zulip,sonali0901/zulip,ufosky-server/zulip,niftynei/zulip,shaunstanislaus/zulip,blaze225/zulip,johnnygaddarr/zulip,vabs22/zulip,themass/zulip,andersk/zulip,brainwane/zulip,natanovia/zulip,luyifan/zulip,DazWorrall/zulip,so0k/zulip,wdaher/zulip,kokoar/zulip,wavelets/zulip,punchagan/zulip,zofuthan/zulip,RobotCaleb/zulip,thomasboyt/zulip,blaze225/zulip,mdavid/zulip,Suninus/zulip,bluesea/zulip,zorojean/zulip,jackrzhang/zulip,yuvipanda/zulip,ericzhou2008/zulip,joyhchen/zulip,itnihao/zulip,Gabriel0402/zulip,mahim97/zulip,arpith/zulip,Vallher/zulip,peiwei/zulip,calvinleenyc/zulip,ufosky-server/zulip,EasonYi/zulip,vaidap/zulip,niftynei/zulip,littledogboy/zulip,shaunstanislaus/zulip,ikasumiwt/zulip,udxxabp/zulip,sharmaeklavya2/zulip,fw1121/zulip,johnnygaddarr/zulip,timabbott/zulip,ryanbackman/zulip,shaunstanislaus/zulip,sharmaeklavya2/zulip,MariaFaBella85/zulip,EasonYi/zulip,bastianh/zulip,wdaher/zulip,DazWorrall/zulip,Drooids/zulip,Frouk/zulip,Suninus/zulip,levixie/zulip,EasonYi/zulip,Cheppers/zulip,deer-hope/zulip,glovebx/zulip,bowlofstew/zulip,bluesea/zulip,zwily/zulip,ipernet/zulip,showell/zulip,shubhamdhama/zulip,Vallher/zulip,shrikrishnaholla/zulip,zachallaun/zulip,yuvipanda/zulip,avastu/zulip,EasonYi/zulip,mahim97/zulip,ashwinirudrappa/zulip,krtkmj/zulip,Frouk/zulip,amallia/zulip,hafeez3000/zulip,tiansiyuan/zulip,karamcnair/zulip,bluesea/zulip,babbage/zulip,atomic-labs/zulip,wweiradio/zulip,suxinde2009/zulip,ryanbackman/zulip,adnanh/zulip,atomic-labs/zulip,natanovia/zulip,easyfmxu/zulip,Gabriel0402/zulip,praveenaki/zulip,willingc/zulip,MayB/zulip,swinghu/zulip,KingxBanana/zulip,PhilSk/zulip,fw1121/zulip,zofuthan/zulip,tiansiyuan/zulip,xuxiao/zulip,jainayush975/zulip,christi3k/zulip,ApsOps/zulip,hayderimran7/zulip,codeKonami/zulip,blaze225/zulip,codeKonami/zulip,zwily/zulip,so0k/zulip,PaulPetring/zulip,DazWorrall/zulip,bitemyapp/zulip,pradiptad/zulip,amallia/zulip,sonali0901/zulip,johnny9/zulip,noroot/zulip,babbage/zulip,Diptanshu8/zulip,ufosky-server/zulip,cosmicAsymmetry/zulip,hengqujushi/zulip,TigorC/zulip,JPJPJPOPOP/zulip,grave-w-grave/zulip,Jianchun1/zulip,calvinleenyc/zulip,jainayush975/zulip,firstblade/zulip,j831/zulip,hafeez3000/zulip,paxapy/zulip,amallia/zulip,littledogboy/zulip,tiansiyuan/zulip,johnny9/zulip,luyifan/zulip,Qgap/zulip,dhcrzf/zulip,joshisa/zulip,he15his/zulip,jonesgithub/zulip,esander91/zulip,KJin99/zulip,rht/zulip,ufosky-server/zulip,Galexrt/zulip,bastianh/zulip,levixie/zulip,sup95/zulip,RobotCaleb/zulip,LAndreas/zulip,glovebx/zulip,christi3k/zulip,babbage/zulip,mdavid/zulip,RobotCaleb/zulip,aps-sids/zulip,souravbadami/zulip,aliceriot/zulip,deer-hope/zulip,arpith/zulip,TigorC/zulip,ahmadassaf/zulip,joyhchen/zulip,eeshangarg/zulip,peiwei/zulip,stamhe/zulip,codeKonami/zulip,Vallher/zulip,kaiyuanheshang/zulip,ipernet/zulip,RobotCaleb/zulip,swinghu/zulip,armooo/zulip,aps-sids/zulip,gigawhitlocks/zulip,ApsOps/zulip,sonali0901/zulip,vikas-parashar/zulip,huangkebo/zulip,Cheppers/zulip,proliming/zulip,souravbadami/zulip,ericzhou2008/zulip,atomic-labs/zulip,zwily/zulip,moria/zulip,peiwei/zulip,jerryge/zulip,zwily/zulip,schatt/zulip,vakila/zulip,zulip/zulip,wdaher/zulip,tdr130/zulip,swinghu/zulip,calvinleenyc/zulip,samatdav/zulip,Batterfii/zulip,ryansnowboarder/zulip,nicholasbs/zulip,Vallher/zulip,itnihao/zulip,ikasumiwt/zulip,aakash-cr7/zulip,umkay/zulip,seapasulli/zulip,dotcool/zulip,schatt/zulip,he15his/zulip,timabbott/zulip,zhaoweigg/zulip,so0k/zulip,johnnygaddarr/zulip,tdr130/zulip,vikas-parashar/zulip,yocome/zulip,tbutter/zulip,aps-sids/zulip,ipernet/zulip,jainayush975/zulip,DazWorrall/zulip,zhaoweigg/zulip,synicalsyntax/zulip,Cheppers/zulip,niftynei/zulip,KJin99/zulip,zorojean/zulip,cosmicAsymmetry/zulip,LeeRisk/zulip,zorojean/zulip,avastu/zulip,PhilSk/zulip,mansilladev/zulip,zhaoweigg/zulip,hackerkid/zulip,lfranchi/zulip,littledogboy/zulip,RobotCaleb/zulip,dxq-git/zulip,TigorC/zulip,proliming/zulip,Frouk/zulip,Vallher/zulip,hengqujushi/zulip,dnmfarrell/zulip,zulip/zulip,ufosky-server/zulip,mohsenSy/zulip,susansls/zulip,swinghu/zulip,peiwei/zulip,KingxBanana/zulip,krtkmj/zulip,vakila/zulip,dotcool/zulip,LAndreas/zulip,amanharitsh123/zulip,cosmicAsymmetry/zulip,dotcool/zulip,DazWorrall/zulip,zacps/zulip,JPJPJPOPOP/zulip,amyliu345/zulip,reyha/zulip,willingc/zulip,DazWorrall/zulip,Cheppers/zulip,jessedhillon/zulip,hj3938/zulip,bluesea/zulip,tbutter/zulip,dattatreya303/zulip,themass/zulip,j831/zulip,brockwhittaker/zulip,sharmaeklavya2/zulip,wangdeshui/zulip,MayB/zulip,adnanh/zulip,jackrzhang/zulip,reyha/zulip,JanzTam/zulip,Diptanshu8/zulip,noroot/zulip,wweiradio/zulip,kou/zulip,andersk/zulip,vakila/zulip,qq1012803704/zulip,zofuthan/zulip,bssrdf/zulip,Frouk/zulip,seapasulli/zulip,sharmaeklavya2/zulip,andersk/zulip,tbutter/zulip,dwrpayne/zulip,j831/zulip,arpith/zulip,showell/zulip,firstblade/zulip,kou/zulip,arpitpanwar/zulip,MariaFaBella85/zulip,ryansnowboarder/zulip,kokoar/zulip,littledogboy/zulip,kaiyuanheshang/zulip,he15his/zulip,johnny9/zulip,souravbadami/zulip,Juanvulcano/zulip,Suninus/zulip,paxapy/zulip,EasonYi/zulip,pradiptad/zulip,christi3k/zulip,saitodisse/zulip,itnihao/zulip,lfranchi/zulip,eeshangarg/zulip,ashwinirudrappa/zulip,peiwei/zulip,kokoar/zulip,eastlhu/zulip,vabs22/zulip,ashwinirudrappa/zulip,esander91/zulip,timabbott/zulip,sup95/zulip,Qgap/zulip,dxq-git/zulip,hustlzp/zulip,yuvipanda/zulip,codeKonami/zulip,firstblade/zulip,developerfm/zulip,hengqujushi/zulip,ashwinirudrappa/zulip,MayB/zulip,esander91/zulip,kou/zulip,dwrpayne/zulip,krtkmj/zulip,willingc/zulip,KJin99/zulip,hengqujushi/zulip,rishig/zulip,AZtheAsian/zulip,yuvipanda/zulip,tbutter/zulip,TigorC/zulip,bowlofstew/zulip,rishig/zulip,hackerkid/zulip,hafeez3000/zulip,vikas-parashar/zulip,jimmy54/zulip,mohsenSy/zulip,rht/zulip,suxinde2009/zulip,zhaoweigg/zulip,ericzhou2008/zulip,zacps/zulip,Juanvulcano/zulip,SmartPeople/zulip,SmartPeople/zulip,isht3/zulip,j831/zulip,ikasumiwt/zulip,jainayush975/zulip,wangdeshui/zulip,ikasumiwt/zulip,hj3938/zulip,shubhamdhama/zulip,aakash-cr7/zulip,xuxiao/zulip,peguin40/zulip,mdavid/zulip,JanzTam/zulip,verma-varsha/zulip,bitemyapp/zulip,m1ssou/zulip,fw1121/zulip
|
Add a management script to set stream colors for a domain.
(imported from commit 186e8226b57d385bbbed756615c0c63315c9d463)
|
from optparse import make_option
from django.core.management.base import BaseCommand
from django.db.models import Count
from zephyr.models import Realm, StreamColor, Stream, UserProfile, Subscription, \
Message, Recipient
class Command(BaseCommand):
help = """Colorize streams in a realm for people who have not already colored their streams."""
option_list = BaseCommand.option_list + (
make_option('-d', '--domain',
dest='domain',
type='str',
help='The name of the realm in which you are colorizing streams.'),
)
def handle(self, **options):
if options["domain"] is None:
self.print_help("python manage.py", "colorize_streams")
exit(1)
realm = Realm.objects.get(domain=options["domain"])
user_profiles = UserProfile.objects.filter(realm=realm)
users_who_need_colors = filter(lambda profile: StreamColor.objects.filter(
subscription__user_profile=profile).count() == 0, user_profiles)
# Hand-selected colors from the current swatch options,
# providing reasonable contrast for 1 - 7 streams.
colors = [
"#76ce90", # light forest green
"#f5ce6e", # goldenrod
"#a6c7e5", # light blue
"#b0a5fd", # volet
"#e79ab5", # pink
"#bfd56f", # greenish-yellow
"#f4ae55", # orange
]
print "Setting stream colors for:"
for user_profile in users_who_need_colors:
print " ", user_profile.full_name
stream_ids = [result['recipient__type_id'] for result in Message.objects.filter(
sender__realm=realm).values('recipient__type_id').annotate(
count=Count('recipient__type_id')).order_by('-count')]
print "Setting color for:"
for stream_id, color in zip(stream_ids, colors):
# Give everyone the same color for a stream.
print " ", Stream.objects.get(id=stream_id).name
# If this realm has more streams than preselected colors,
# only color the N most popular.
recipient = Recipient.objects.get(type=Recipient.STREAM, type_id=stream_id)
for user_profile in users_who_need_colors:
try:
subscription = Subscription.objects.get(user_profile=user_profile,
recipient=recipient)
except Subscription.DoesNotExist:
# Not subscribed
continue
StreamColor(subscription=subscription, color=color).save()
|
<commit_before><commit_msg>Add a management script to set stream colors for a domain.
(imported from commit 186e8226b57d385bbbed756615c0c63315c9d463)<commit_after>
|
from optparse import make_option
from django.core.management.base import BaseCommand
from django.db.models import Count
from zephyr.models import Realm, StreamColor, Stream, UserProfile, Subscription, \
Message, Recipient
class Command(BaseCommand):
help = """Colorize streams in a realm for people who have not already colored their streams."""
option_list = BaseCommand.option_list + (
make_option('-d', '--domain',
dest='domain',
type='str',
help='The name of the realm in which you are colorizing streams.'),
)
def handle(self, **options):
if options["domain"] is None:
self.print_help("python manage.py", "colorize_streams")
exit(1)
realm = Realm.objects.get(domain=options["domain"])
user_profiles = UserProfile.objects.filter(realm=realm)
users_who_need_colors = filter(lambda profile: StreamColor.objects.filter(
subscription__user_profile=profile).count() == 0, user_profiles)
# Hand-selected colors from the current swatch options,
# providing reasonable contrast for 1 - 7 streams.
colors = [
"#76ce90", # light forest green
"#f5ce6e", # goldenrod
"#a6c7e5", # light blue
"#b0a5fd", # volet
"#e79ab5", # pink
"#bfd56f", # greenish-yellow
"#f4ae55", # orange
]
print "Setting stream colors for:"
for user_profile in users_who_need_colors:
print " ", user_profile.full_name
stream_ids = [result['recipient__type_id'] for result in Message.objects.filter(
sender__realm=realm).values('recipient__type_id').annotate(
count=Count('recipient__type_id')).order_by('-count')]
print "Setting color for:"
for stream_id, color in zip(stream_ids, colors):
# Give everyone the same color for a stream.
print " ", Stream.objects.get(id=stream_id).name
# If this realm has more streams than preselected colors,
# only color the N most popular.
recipient = Recipient.objects.get(type=Recipient.STREAM, type_id=stream_id)
for user_profile in users_who_need_colors:
try:
subscription = Subscription.objects.get(user_profile=user_profile,
recipient=recipient)
except Subscription.DoesNotExist:
# Not subscribed
continue
StreamColor(subscription=subscription, color=color).save()
|
Add a management script to set stream colors for a domain.
(imported from commit 186e8226b57d385bbbed756615c0c63315c9d463)from optparse import make_option
from django.core.management.base import BaseCommand
from django.db.models import Count
from zephyr.models import Realm, StreamColor, Stream, UserProfile, Subscription, \
Message, Recipient
class Command(BaseCommand):
help = """Colorize streams in a realm for people who have not already colored their streams."""
option_list = BaseCommand.option_list + (
make_option('-d', '--domain',
dest='domain',
type='str',
help='The name of the realm in which you are colorizing streams.'),
)
def handle(self, **options):
if options["domain"] is None:
self.print_help("python manage.py", "colorize_streams")
exit(1)
realm = Realm.objects.get(domain=options["domain"])
user_profiles = UserProfile.objects.filter(realm=realm)
users_who_need_colors = filter(lambda profile: StreamColor.objects.filter(
subscription__user_profile=profile).count() == 0, user_profiles)
# Hand-selected colors from the current swatch options,
# providing reasonable contrast for 1 - 7 streams.
colors = [
"#76ce90", # light forest green
"#f5ce6e", # goldenrod
"#a6c7e5", # light blue
"#b0a5fd", # volet
"#e79ab5", # pink
"#bfd56f", # greenish-yellow
"#f4ae55", # orange
]
print "Setting stream colors for:"
for user_profile in users_who_need_colors:
print " ", user_profile.full_name
stream_ids = [result['recipient__type_id'] for result in Message.objects.filter(
sender__realm=realm).values('recipient__type_id').annotate(
count=Count('recipient__type_id')).order_by('-count')]
print "Setting color for:"
for stream_id, color in zip(stream_ids, colors):
# Give everyone the same color for a stream.
print " ", Stream.objects.get(id=stream_id).name
# If this realm has more streams than preselected colors,
# only color the N most popular.
recipient = Recipient.objects.get(type=Recipient.STREAM, type_id=stream_id)
for user_profile in users_who_need_colors:
try:
subscription = Subscription.objects.get(user_profile=user_profile,
recipient=recipient)
except Subscription.DoesNotExist:
# Not subscribed
continue
StreamColor(subscription=subscription, color=color).save()
|
<commit_before><commit_msg>Add a management script to set stream colors for a domain.
(imported from commit 186e8226b57d385bbbed756615c0c63315c9d463)<commit_after>from optparse import make_option
from django.core.management.base import BaseCommand
from django.db.models import Count
from zephyr.models import Realm, StreamColor, Stream, UserProfile, Subscription, \
Message, Recipient
class Command(BaseCommand):
help = """Colorize streams in a realm for people who have not already colored their streams."""
option_list = BaseCommand.option_list + (
make_option('-d', '--domain',
dest='domain',
type='str',
help='The name of the realm in which you are colorizing streams.'),
)
def handle(self, **options):
if options["domain"] is None:
self.print_help("python manage.py", "colorize_streams")
exit(1)
realm = Realm.objects.get(domain=options["domain"])
user_profiles = UserProfile.objects.filter(realm=realm)
users_who_need_colors = filter(lambda profile: StreamColor.objects.filter(
subscription__user_profile=profile).count() == 0, user_profiles)
# Hand-selected colors from the current swatch options,
# providing reasonable contrast for 1 - 7 streams.
colors = [
"#76ce90", # light forest green
"#f5ce6e", # goldenrod
"#a6c7e5", # light blue
"#b0a5fd", # volet
"#e79ab5", # pink
"#bfd56f", # greenish-yellow
"#f4ae55", # orange
]
print "Setting stream colors for:"
for user_profile in users_who_need_colors:
print " ", user_profile.full_name
stream_ids = [result['recipient__type_id'] for result in Message.objects.filter(
sender__realm=realm).values('recipient__type_id').annotate(
count=Count('recipient__type_id')).order_by('-count')]
print "Setting color for:"
for stream_id, color in zip(stream_ids, colors):
# Give everyone the same color for a stream.
print " ", Stream.objects.get(id=stream_id).name
# If this realm has more streams than preselected colors,
# only color the N most popular.
recipient = Recipient.objects.get(type=Recipient.STREAM, type_id=stream_id)
for user_profile in users_who_need_colors:
try:
subscription = Subscription.objects.get(user_profile=user_profile,
recipient=recipient)
except Subscription.DoesNotExist:
# Not subscribed
continue
StreamColor(subscription=subscription, color=color).save()
|
|
3452d21aa9e7e427296c05e38a9caf70a62c7bb6
|
server/localfinance/scripts/addincome.py
|
server/localfinance/scripts/addincome.py
|
# -*- coding: utf-8 -*-
import os
import sys
import transaction
import pandas as pd
from sqlalchemy import engine_from_config
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from ..models import (
DBSession,
AdminZone,
AdminZoneFinance,
)
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> <dirpath>\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def get_income_per_year(year, dirpath):
short_year = str(year)[-2:]
columns = ['RFPQ1%s'%short_year, 'RFPQ2%s'%short_year, 'RFPQ3%s'%short_year, 'RFPIQ%s'%short_year, 'RFPET%s'%short_year, 'RFPMO%s'%short_year]
xls = pd.ExcelFile(os.path.join(dirpath, 'RFDP%sCOM.xls'%year))
com = xls.parse('D_P', skiprows=6)[['COM'] + columns]
com.set_index('COM', inplace=True)
return com
def main(argv=sys.argv):
if len(argv) < 3:
usage(argv)
config_uri = argv[1]
dirpath = argv[2]
setup_logging(config_uri)
settings = get_appsettings(config_uri)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
az_id = pd.DataFrame(
(DBSession.query(AdminZone.code_insee, AdminZone.id).all()),
columns=['COM', 'AZ_ID'],
)
az_id.set_index('COM', inplace=True)
for year in range(2001, 2012):
joined_data = get_income_per_year(year, dirpath).join(az_id)
joined_data = joined_data[joined_data.AZ_ID.notnull()].reindex()
with transaction.manager:
for _, item in joined_data.iterrows():
dico = item.to_dict()
az_id = dico.pop('AZ_ID')
income = dico.pop('RFPQ2%s'%str(year)[-2:])
store = AdminZoneFinance.data
DBSession.query(AdminZoneFinance)\
.filter(AdminZoneFinance.adminzone_id==az_id)\
.update({store: store + {'revenu-par-personne': str(income)}},
synchronize_session=False)
if __name__ == '__main__':
main()
|
Add script to import income data
|
Add script to import income data
|
Python
|
mit
|
regardscitoyens/nosfinanceslocales,regardscitoyens/nosfinanceslocales,regardscitoyens/nosfinanceslocales
|
Add script to import income data
|
# -*- coding: utf-8 -*-
import os
import sys
import transaction
import pandas as pd
from sqlalchemy import engine_from_config
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from ..models import (
DBSession,
AdminZone,
AdminZoneFinance,
)
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> <dirpath>\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def get_income_per_year(year, dirpath):
short_year = str(year)[-2:]
columns = ['RFPQ1%s'%short_year, 'RFPQ2%s'%short_year, 'RFPQ3%s'%short_year, 'RFPIQ%s'%short_year, 'RFPET%s'%short_year, 'RFPMO%s'%short_year]
xls = pd.ExcelFile(os.path.join(dirpath, 'RFDP%sCOM.xls'%year))
com = xls.parse('D_P', skiprows=6)[['COM'] + columns]
com.set_index('COM', inplace=True)
return com
def main(argv=sys.argv):
if len(argv) < 3:
usage(argv)
config_uri = argv[1]
dirpath = argv[2]
setup_logging(config_uri)
settings = get_appsettings(config_uri)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
az_id = pd.DataFrame(
(DBSession.query(AdminZone.code_insee, AdminZone.id).all()),
columns=['COM', 'AZ_ID'],
)
az_id.set_index('COM', inplace=True)
for year in range(2001, 2012):
joined_data = get_income_per_year(year, dirpath).join(az_id)
joined_data = joined_data[joined_data.AZ_ID.notnull()].reindex()
with transaction.manager:
for _, item in joined_data.iterrows():
dico = item.to_dict()
az_id = dico.pop('AZ_ID')
income = dico.pop('RFPQ2%s'%str(year)[-2:])
store = AdminZoneFinance.data
DBSession.query(AdminZoneFinance)\
.filter(AdminZoneFinance.adminzone_id==az_id)\
.update({store: store + {'revenu-par-personne': str(income)}},
synchronize_session=False)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to import income data<commit_after>
|
# -*- coding: utf-8 -*-
import os
import sys
import transaction
import pandas as pd
from sqlalchemy import engine_from_config
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from ..models import (
DBSession,
AdminZone,
AdminZoneFinance,
)
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> <dirpath>\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def get_income_per_year(year, dirpath):
short_year = str(year)[-2:]
columns = ['RFPQ1%s'%short_year, 'RFPQ2%s'%short_year, 'RFPQ3%s'%short_year, 'RFPIQ%s'%short_year, 'RFPET%s'%short_year, 'RFPMO%s'%short_year]
xls = pd.ExcelFile(os.path.join(dirpath, 'RFDP%sCOM.xls'%year))
com = xls.parse('D_P', skiprows=6)[['COM'] + columns]
com.set_index('COM', inplace=True)
return com
def main(argv=sys.argv):
if len(argv) < 3:
usage(argv)
config_uri = argv[1]
dirpath = argv[2]
setup_logging(config_uri)
settings = get_appsettings(config_uri)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
az_id = pd.DataFrame(
(DBSession.query(AdminZone.code_insee, AdminZone.id).all()),
columns=['COM', 'AZ_ID'],
)
az_id.set_index('COM', inplace=True)
for year in range(2001, 2012):
joined_data = get_income_per_year(year, dirpath).join(az_id)
joined_data = joined_data[joined_data.AZ_ID.notnull()].reindex()
with transaction.manager:
for _, item in joined_data.iterrows():
dico = item.to_dict()
az_id = dico.pop('AZ_ID')
income = dico.pop('RFPQ2%s'%str(year)[-2:])
store = AdminZoneFinance.data
DBSession.query(AdminZoneFinance)\
.filter(AdminZoneFinance.adminzone_id==az_id)\
.update({store: store + {'revenu-par-personne': str(income)}},
synchronize_session=False)
if __name__ == '__main__':
main()
|
Add script to import income data# -*- coding: utf-8 -*-
import os
import sys
import transaction
import pandas as pd
from sqlalchemy import engine_from_config
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from ..models import (
DBSession,
AdminZone,
AdminZoneFinance,
)
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> <dirpath>\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def get_income_per_year(year, dirpath):
short_year = str(year)[-2:]
columns = ['RFPQ1%s'%short_year, 'RFPQ2%s'%short_year, 'RFPQ3%s'%short_year, 'RFPIQ%s'%short_year, 'RFPET%s'%short_year, 'RFPMO%s'%short_year]
xls = pd.ExcelFile(os.path.join(dirpath, 'RFDP%sCOM.xls'%year))
com = xls.parse('D_P', skiprows=6)[['COM'] + columns]
com.set_index('COM', inplace=True)
return com
def main(argv=sys.argv):
if len(argv) < 3:
usage(argv)
config_uri = argv[1]
dirpath = argv[2]
setup_logging(config_uri)
settings = get_appsettings(config_uri)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
az_id = pd.DataFrame(
(DBSession.query(AdminZone.code_insee, AdminZone.id).all()),
columns=['COM', 'AZ_ID'],
)
az_id.set_index('COM', inplace=True)
for year in range(2001, 2012):
joined_data = get_income_per_year(year, dirpath).join(az_id)
joined_data = joined_data[joined_data.AZ_ID.notnull()].reindex()
with transaction.manager:
for _, item in joined_data.iterrows():
dico = item.to_dict()
az_id = dico.pop('AZ_ID')
income = dico.pop('RFPQ2%s'%str(year)[-2:])
store = AdminZoneFinance.data
DBSession.query(AdminZoneFinance)\
.filter(AdminZoneFinance.adminzone_id==az_id)\
.update({store: store + {'revenu-par-personne': str(income)}},
synchronize_session=False)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to import income data<commit_after># -*- coding: utf-8 -*-
import os
import sys
import transaction
import pandas as pd
from sqlalchemy import engine_from_config
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from ..models import (
DBSession,
AdminZone,
AdminZoneFinance,
)
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> <dirpath>\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def get_income_per_year(year, dirpath):
short_year = str(year)[-2:]
columns = ['RFPQ1%s'%short_year, 'RFPQ2%s'%short_year, 'RFPQ3%s'%short_year, 'RFPIQ%s'%short_year, 'RFPET%s'%short_year, 'RFPMO%s'%short_year]
xls = pd.ExcelFile(os.path.join(dirpath, 'RFDP%sCOM.xls'%year))
com = xls.parse('D_P', skiprows=6)[['COM'] + columns]
com.set_index('COM', inplace=True)
return com
def main(argv=sys.argv):
if len(argv) < 3:
usage(argv)
config_uri = argv[1]
dirpath = argv[2]
setup_logging(config_uri)
settings = get_appsettings(config_uri)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
az_id = pd.DataFrame(
(DBSession.query(AdminZone.code_insee, AdminZone.id).all()),
columns=['COM', 'AZ_ID'],
)
az_id.set_index('COM', inplace=True)
for year in range(2001, 2012):
joined_data = get_income_per_year(year, dirpath).join(az_id)
joined_data = joined_data[joined_data.AZ_ID.notnull()].reindex()
with transaction.manager:
for _, item in joined_data.iterrows():
dico = item.to_dict()
az_id = dico.pop('AZ_ID')
income = dico.pop('RFPQ2%s'%str(year)[-2:])
store = AdminZoneFinance.data
DBSession.query(AdminZoneFinance)\
.filter(AdminZoneFinance.adminzone_id==az_id)\
.update({store: store + {'revenu-par-personne': str(income)}},
synchronize_session=False)
if __name__ == '__main__':
main()
|
|
2ef9c50edb9e7d6488ec2b580c552f4c66fe5209
|
tensorflow/tools/ci_build/builds/check_system_libs.py
|
tensorflow/tools/ci_build/builds/check_system_libs.py
|
#!/usr/bin/env python
# Checks that the options mentioned in syslibs_configure.bzl are consistent with the valid options in workspace.bzl
# Expects the tensorflow source folder as the first argument
import sys
import os
from glob import glob
tf_source_path = sys.argv[1]
if not os.path.isdir(tf_source_path):
raise ValueError('The path to the TensorFlow source must be passed as'
' the first argument')
syslibs_configure_path = os.path.join(tf_source_path, 'third_party',
'systemlibs', 'syslibs_configure.bzl')
workspace_path = os.path.join(tf_source_path, 'tensorflow', 'workspace.bzl')
third_party_path = os.path.join(tf_source_path, 'third_party')
third_party_glob = os.path.join(third_party_path, '*', 'workspace.bzl')
# Stub only
def repository_rule(**kwargs):
del kwargs
# Populates VALID_LIBS
with open(syslibs_configure_path, 'r') as f:
exec(f.read())
syslibs = set(VALID_LIBS)
syslibs_from_workspace = set()
def extract_system_builds(filepath):
current_name = None
with open(filepath, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('name = '):
current_name = line[7:-1].strip('"')
elif line.startswith('system_build_file = '):
syslibs_from_workspace.add(current_name)
for current_path in [workspace_path] + glob(third_party_glob):
extract_system_builds(current_path)
if syslibs != syslibs_from_workspace:
missing_syslibs = syslibs_from_workspace - syslibs
if missing_syslibs:
libs = ', '.join(sorted(missing_syslibs))
print('Libs missing from syslibs_configure: ' + libs)
additional_syslibs = syslibs - syslibs_from_workspace
if additional_syslibs:
libs = ', '.join(sorted(additional_syslibs))
print('Libs missing in workspace (or superfluous in syslibs_configure): '
+ libs)
sys.exit(1)
|
Add script to validate system libs
|
Add script to validate system libs
|
Python
|
apache-2.0
|
karllessard/tensorflow,karllessard/tensorflow,frreiss/tensorflow-fred,Intel-Corporation/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,frreiss/tensorflow-fred,petewarden/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,annarev/tensorflow,gautam1858/tensorflow,gautam1858/tensorflow,freedomtan/tensorflow,freedomtan/tensorflow,Intel-Corporation/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,Intel-Corporation/tensorflow,gautam1858/tensorflow,cxxgtxy/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,cxxgtxy/tensorflow,sarvex/tensorflow,frreiss/tensorflow-fred,freedomtan/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,annarev/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,frreiss/tensorflow-fred,Intel-Corporation/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,petewarden/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,karllessard/tensorflow,yongtang/tensorflow,petewarden/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,petewarden/tensorflow,tensorflow/tensorflow,annarev/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,annarev/tensorflow,annarev/tensorflow,sarvex/tensorflow,petewarden/tensorflow,frreiss/tensorflow-fred,yongtang/tensorflow,Intel-Corporation/tensorflow,petewarden/tensorflow,tensorflow/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_tf_optimizer,sarvex/tensorflow,petewarden/tensorflow,annarev/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,petewarden/tensorflow,gautam1858/tensorflow,sarvex/tensorflow,sarvex/tensorflow,cxxgtxy/tensorflow,freedomtan/tensorflow,Intel-tensorflow/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,karllessard/tensorflow,tensorflow/tensorflow,freedomtan/tensorflow,paolodedios/tensorflow,cxxgtxy/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,karllessard/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,sarvex/tensorflow,petewarden/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,freedomtan/tensorflow,tensorflow/tensorflow,annarev/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-pywrap_saved_model,annarev/tensorflow,cxxgtxy/tensorflow,sarvex/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_saved_model,annarev/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,petewarden/tensorflow,annarev/tensorflow,karllessard/tensorflow,frreiss/tensorflow-fred,gautam1858/tensorflow,yongtang/tensorflow,cxxgtxy/tensorflow,Intel-tensorflow/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,freedomtan/tensorflow,sarvex/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,Intel-Corporation/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-Corporation/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,yongtang/tensorflow,annarev/tensorflow,frreiss/tensorflow-fred,freedomtan/tensorflow,gautam1858/tensorflow,freedomtan/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once
|
Add script to validate system libs
|
#!/usr/bin/env python
# Checks that the options mentioned in syslibs_configure.bzl are consistent with the valid options in workspace.bzl
# Expects the tensorflow source folder as the first argument
import sys
import os
from glob import glob
tf_source_path = sys.argv[1]
if not os.path.isdir(tf_source_path):
raise ValueError('The path to the TensorFlow source must be passed as'
' the first argument')
syslibs_configure_path = os.path.join(tf_source_path, 'third_party',
'systemlibs', 'syslibs_configure.bzl')
workspace_path = os.path.join(tf_source_path, 'tensorflow', 'workspace.bzl')
third_party_path = os.path.join(tf_source_path, 'third_party')
third_party_glob = os.path.join(third_party_path, '*', 'workspace.bzl')
# Stub only
def repository_rule(**kwargs):
del kwargs
# Populates VALID_LIBS
with open(syslibs_configure_path, 'r') as f:
exec(f.read())
syslibs = set(VALID_LIBS)
syslibs_from_workspace = set()
def extract_system_builds(filepath):
current_name = None
with open(filepath, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('name = '):
current_name = line[7:-1].strip('"')
elif line.startswith('system_build_file = '):
syslibs_from_workspace.add(current_name)
for current_path in [workspace_path] + glob(third_party_glob):
extract_system_builds(current_path)
if syslibs != syslibs_from_workspace:
missing_syslibs = syslibs_from_workspace - syslibs
if missing_syslibs:
libs = ', '.join(sorted(missing_syslibs))
print('Libs missing from syslibs_configure: ' + libs)
additional_syslibs = syslibs - syslibs_from_workspace
if additional_syslibs:
libs = ', '.join(sorted(additional_syslibs))
print('Libs missing in workspace (or superfluous in syslibs_configure): '
+ libs)
sys.exit(1)
|
<commit_before><commit_msg>Add script to validate system libs<commit_after>
|
#!/usr/bin/env python
# Checks that the options mentioned in syslibs_configure.bzl are consistent with the valid options in workspace.bzl
# Expects the tensorflow source folder as the first argument
import sys
import os
from glob import glob
tf_source_path = sys.argv[1]
if not os.path.isdir(tf_source_path):
raise ValueError('The path to the TensorFlow source must be passed as'
' the first argument')
syslibs_configure_path = os.path.join(tf_source_path, 'third_party',
'systemlibs', 'syslibs_configure.bzl')
workspace_path = os.path.join(tf_source_path, 'tensorflow', 'workspace.bzl')
third_party_path = os.path.join(tf_source_path, 'third_party')
third_party_glob = os.path.join(third_party_path, '*', 'workspace.bzl')
# Stub only
def repository_rule(**kwargs):
del kwargs
# Populates VALID_LIBS
with open(syslibs_configure_path, 'r') as f:
exec(f.read())
syslibs = set(VALID_LIBS)
syslibs_from_workspace = set()
def extract_system_builds(filepath):
current_name = None
with open(filepath, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('name = '):
current_name = line[7:-1].strip('"')
elif line.startswith('system_build_file = '):
syslibs_from_workspace.add(current_name)
for current_path in [workspace_path] + glob(third_party_glob):
extract_system_builds(current_path)
if syslibs != syslibs_from_workspace:
missing_syslibs = syslibs_from_workspace - syslibs
if missing_syslibs:
libs = ', '.join(sorted(missing_syslibs))
print('Libs missing from syslibs_configure: ' + libs)
additional_syslibs = syslibs - syslibs_from_workspace
if additional_syslibs:
libs = ', '.join(sorted(additional_syslibs))
print('Libs missing in workspace (or superfluous in syslibs_configure): '
+ libs)
sys.exit(1)
|
Add script to validate system libs#!/usr/bin/env python
# Checks that the options mentioned in syslibs_configure.bzl are consistent with the valid options in workspace.bzl
# Expects the tensorflow source folder as the first argument
import sys
import os
from glob import glob
tf_source_path = sys.argv[1]
if not os.path.isdir(tf_source_path):
raise ValueError('The path to the TensorFlow source must be passed as'
' the first argument')
syslibs_configure_path = os.path.join(tf_source_path, 'third_party',
'systemlibs', 'syslibs_configure.bzl')
workspace_path = os.path.join(tf_source_path, 'tensorflow', 'workspace.bzl')
third_party_path = os.path.join(tf_source_path, 'third_party')
third_party_glob = os.path.join(third_party_path, '*', 'workspace.bzl')
# Stub only
def repository_rule(**kwargs):
del kwargs
# Populates VALID_LIBS
with open(syslibs_configure_path, 'r') as f:
exec(f.read())
syslibs = set(VALID_LIBS)
syslibs_from_workspace = set()
def extract_system_builds(filepath):
current_name = None
with open(filepath, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('name = '):
current_name = line[7:-1].strip('"')
elif line.startswith('system_build_file = '):
syslibs_from_workspace.add(current_name)
for current_path in [workspace_path] + glob(third_party_glob):
extract_system_builds(current_path)
if syslibs != syslibs_from_workspace:
missing_syslibs = syslibs_from_workspace - syslibs
if missing_syslibs:
libs = ', '.join(sorted(missing_syslibs))
print('Libs missing from syslibs_configure: ' + libs)
additional_syslibs = syslibs - syslibs_from_workspace
if additional_syslibs:
libs = ', '.join(sorted(additional_syslibs))
print('Libs missing in workspace (or superfluous in syslibs_configure): '
+ libs)
sys.exit(1)
|
<commit_before><commit_msg>Add script to validate system libs<commit_after>#!/usr/bin/env python
# Checks that the options mentioned in syslibs_configure.bzl are consistent with the valid options in workspace.bzl
# Expects the tensorflow source folder as the first argument
import sys
import os
from glob import glob
tf_source_path = sys.argv[1]
if not os.path.isdir(tf_source_path):
raise ValueError('The path to the TensorFlow source must be passed as'
' the first argument')
syslibs_configure_path = os.path.join(tf_source_path, 'third_party',
'systemlibs', 'syslibs_configure.bzl')
workspace_path = os.path.join(tf_source_path, 'tensorflow', 'workspace.bzl')
third_party_path = os.path.join(tf_source_path, 'third_party')
third_party_glob = os.path.join(third_party_path, '*', 'workspace.bzl')
# Stub only
def repository_rule(**kwargs):
del kwargs
# Populates VALID_LIBS
with open(syslibs_configure_path, 'r') as f:
exec(f.read())
syslibs = set(VALID_LIBS)
syslibs_from_workspace = set()
def extract_system_builds(filepath):
current_name = None
with open(filepath, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('name = '):
current_name = line[7:-1].strip('"')
elif line.startswith('system_build_file = '):
syslibs_from_workspace.add(current_name)
for current_path in [workspace_path] + glob(third_party_glob):
extract_system_builds(current_path)
if syslibs != syslibs_from_workspace:
missing_syslibs = syslibs_from_workspace - syslibs
if missing_syslibs:
libs = ', '.join(sorted(missing_syslibs))
print('Libs missing from syslibs_configure: ' + libs)
additional_syslibs = syslibs - syslibs_from_workspace
if additional_syslibs:
libs = ', '.join(sorted(additional_syslibs))
print('Libs missing in workspace (or superfluous in syslibs_configure): '
+ libs)
sys.exit(1)
|
|
c0d7fe0548bb8b00fd612e494ee8eb38a24f927d
|
tests/parser_db.py
|
tests/parser_db.py
|
from compiler import error, parse
class ParserDB():
"""A class for parsing with memoized parsers."""
parsers = {}
@classmethod
def _parse(cls, data, start='program'):
mock = error.LoggerMock()
try:
parser = cls.parsers[start]
except KeyError:
parser = cls.parsers[start] = parse.Parser(
logger=mock,
start=start
)
tree = parser.parse(data=data)
return tree
|
Add class to abstract parser memoization.
|
Tests: Add class to abstract parser memoization.
|
Python
|
mit
|
Renelvon/llama,dionyziz/llama,Renelvon/llama,dionyziz/llama
|
Tests: Add class to abstract parser memoization.
|
from compiler import error, parse
class ParserDB():
"""A class for parsing with memoized parsers."""
parsers = {}
@classmethod
def _parse(cls, data, start='program'):
mock = error.LoggerMock()
try:
parser = cls.parsers[start]
except KeyError:
parser = cls.parsers[start] = parse.Parser(
logger=mock,
start=start
)
tree = parser.parse(data=data)
return tree
|
<commit_before><commit_msg>Tests: Add class to abstract parser memoization.<commit_after>
|
from compiler import error, parse
class ParserDB():
"""A class for parsing with memoized parsers."""
parsers = {}
@classmethod
def _parse(cls, data, start='program'):
mock = error.LoggerMock()
try:
parser = cls.parsers[start]
except KeyError:
parser = cls.parsers[start] = parse.Parser(
logger=mock,
start=start
)
tree = parser.parse(data=data)
return tree
|
Tests: Add class to abstract parser memoization.from compiler import error, parse
class ParserDB():
"""A class for parsing with memoized parsers."""
parsers = {}
@classmethod
def _parse(cls, data, start='program'):
mock = error.LoggerMock()
try:
parser = cls.parsers[start]
except KeyError:
parser = cls.parsers[start] = parse.Parser(
logger=mock,
start=start
)
tree = parser.parse(data=data)
return tree
|
<commit_before><commit_msg>Tests: Add class to abstract parser memoization.<commit_after>from compiler import error, parse
class ParserDB():
"""A class for parsing with memoized parsers."""
parsers = {}
@classmethod
def _parse(cls, data, start='program'):
mock = error.LoggerMock()
try:
parser = cls.parsers[start]
except KeyError:
parser = cls.parsers[start] = parse.Parser(
logger=mock,
start=start
)
tree = parser.parse(data=data)
return tree
|
|
670f08e73404c219df707a7a90b3c7b658086d1e
|
scripts/update_user_location.py
|
scripts/update_user_location.py
|
from google.cloud import firestore
import argparse
import datetime
import helpers
import random
def updateLocation(db, user, ref_lat, ref_lon, range):
doc_ref = db.collection(u'users').document(user.id)
lat = ref_lat + random.uniform(-range, range)
lon = ref_lon + random.uniform(-range, range)
doc_ref.update({
u'timestamp': datetime.datetime.utcnow(),
u'location': [lat, lon]
})
doc = doc_ref.get()
helpers.printSnapshot(doc)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--all")
parser.add_argument("-c", "--count", type=int, default=0)
parser.add_argument("--lat", type=float, default=37.0)
parser.add_argument("--lon", type=float, default=-122.0)
parser.add_argument("--range", type=float, default=-0.000001)
args = parser.parse_args()
db = firestore.Client()
users = helpers.queryUsers(db)
index = 0
for user in users:
userDict = user.to_dict()
if args.all is not None or (u'createdByScript' in userDict and userDict.get(u'createdByScript')):
updateLocation(db, user, args.lat, args.lon, args.range)
index = index + 1
if index >= args.count and args.count != 0:
break
|
Add script to update user location
|
Add script to update user location
|
Python
|
mit
|
frinder/frinder-app,frinder/frinder-app,frinder/frinder-app
|
Add script to update user location
|
from google.cloud import firestore
import argparse
import datetime
import helpers
import random
def updateLocation(db, user, ref_lat, ref_lon, range):
doc_ref = db.collection(u'users').document(user.id)
lat = ref_lat + random.uniform(-range, range)
lon = ref_lon + random.uniform(-range, range)
doc_ref.update({
u'timestamp': datetime.datetime.utcnow(),
u'location': [lat, lon]
})
doc = doc_ref.get()
helpers.printSnapshot(doc)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--all")
parser.add_argument("-c", "--count", type=int, default=0)
parser.add_argument("--lat", type=float, default=37.0)
parser.add_argument("--lon", type=float, default=-122.0)
parser.add_argument("--range", type=float, default=-0.000001)
args = parser.parse_args()
db = firestore.Client()
users = helpers.queryUsers(db)
index = 0
for user in users:
userDict = user.to_dict()
if args.all is not None or (u'createdByScript' in userDict and userDict.get(u'createdByScript')):
updateLocation(db, user, args.lat, args.lon, args.range)
index = index + 1
if index >= args.count and args.count != 0:
break
|
<commit_before><commit_msg>Add script to update user location<commit_after>
|
from google.cloud import firestore
import argparse
import datetime
import helpers
import random
def updateLocation(db, user, ref_lat, ref_lon, range):
doc_ref = db.collection(u'users').document(user.id)
lat = ref_lat + random.uniform(-range, range)
lon = ref_lon + random.uniform(-range, range)
doc_ref.update({
u'timestamp': datetime.datetime.utcnow(),
u'location': [lat, lon]
})
doc = doc_ref.get()
helpers.printSnapshot(doc)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--all")
parser.add_argument("-c", "--count", type=int, default=0)
parser.add_argument("--lat", type=float, default=37.0)
parser.add_argument("--lon", type=float, default=-122.0)
parser.add_argument("--range", type=float, default=-0.000001)
args = parser.parse_args()
db = firestore.Client()
users = helpers.queryUsers(db)
index = 0
for user in users:
userDict = user.to_dict()
if args.all is not None or (u'createdByScript' in userDict and userDict.get(u'createdByScript')):
updateLocation(db, user, args.lat, args.lon, args.range)
index = index + 1
if index >= args.count and args.count != 0:
break
|
Add script to update user locationfrom google.cloud import firestore
import argparse
import datetime
import helpers
import random
def updateLocation(db, user, ref_lat, ref_lon, range):
doc_ref = db.collection(u'users').document(user.id)
lat = ref_lat + random.uniform(-range, range)
lon = ref_lon + random.uniform(-range, range)
doc_ref.update({
u'timestamp': datetime.datetime.utcnow(),
u'location': [lat, lon]
})
doc = doc_ref.get()
helpers.printSnapshot(doc)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--all")
parser.add_argument("-c", "--count", type=int, default=0)
parser.add_argument("--lat", type=float, default=37.0)
parser.add_argument("--lon", type=float, default=-122.0)
parser.add_argument("--range", type=float, default=-0.000001)
args = parser.parse_args()
db = firestore.Client()
users = helpers.queryUsers(db)
index = 0
for user in users:
userDict = user.to_dict()
if args.all is not None or (u'createdByScript' in userDict and userDict.get(u'createdByScript')):
updateLocation(db, user, args.lat, args.lon, args.range)
index = index + 1
if index >= args.count and args.count != 0:
break
|
<commit_before><commit_msg>Add script to update user location<commit_after>from google.cloud import firestore
import argparse
import datetime
import helpers
import random
def updateLocation(db, user, ref_lat, ref_lon, range):
doc_ref = db.collection(u'users').document(user.id)
lat = ref_lat + random.uniform(-range, range)
lon = ref_lon + random.uniform(-range, range)
doc_ref.update({
u'timestamp': datetime.datetime.utcnow(),
u'location': [lat, lon]
})
doc = doc_ref.get()
helpers.printSnapshot(doc)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--all")
parser.add_argument("-c", "--count", type=int, default=0)
parser.add_argument("--lat", type=float, default=37.0)
parser.add_argument("--lon", type=float, default=-122.0)
parser.add_argument("--range", type=float, default=-0.000001)
args = parser.parse_args()
db = firestore.Client()
users = helpers.queryUsers(db)
index = 0
for user in users:
userDict = user.to_dict()
if args.all is not None or (u'createdByScript' in userDict and userDict.get(u'createdByScript')):
updateLocation(db, user, args.lat, args.lon, args.range)
index = index + 1
if index >= args.count and args.count != 0:
break
|
|
788700899de2b6576af6b7230b2d68e6dff278a5
|
numba/tests/test_func_lifetime.py
|
numba/tests/test_func_lifetime.py
|
from __future__ import print_function, absolute_import
import gc
import weakref
from numba import unittest_support as unittest
from numba.utils import IS_PY3
from numba import jit, types
from .support import TestCase
def global_func(x):
return x + 1
class TestFuncLifetime(TestCase):
"""
Test the lifetime of compiled function objects.
"""
# NOTE: there's a test for closure lifetime in test_closure
def check_local_func_lifetime(self, **jitargs):
def f(x):
return x + 1
c_f = jit(**jitargs)(f)
self.assertPreciseEqual(c_f(1), 2)
refs = [weakref.ref(obj) for obj in (f, c_f)]
obj = f = c_f = None
gc.collect()
self.assertEqual([wr() for wr in refs], [None] * len(refs))
def test_local_func_lifetime(self):
self.check_local_func_lifetime(forceobj=True)
def test_local_func_lifetime_npm(self):
self.check_local_func_lifetime(nopython=True)
def check_global_func_lifetime(self, **jitargs):
c_f = jit(**jitargs)(global_func)
self.assertPreciseEqual(c_f(1), 2)
wr = weakref.ref(c_f)
c_f = None
gc.collect()
self.assertIs(wr(), None)
def test_global_func_lifetime(self):
self.check_global_func_lifetime(forceobj=True)
def test_global_func_lifetime_npm(self):
self.check_global_func_lifetime(nopython=True)
if __name__ == '__main__':
unittest.main()
|
Add a simple test for function lifetime
|
Add a simple test for function lifetime
|
Python
|
bsd-2-clause
|
cpcloud/numba,gmarkall/numba,stuartarchibald/numba,pitrou/numba,gmarkall/numba,pombredanne/numba,IntelLabs/numba,IntelLabs/numba,stuartarchibald/numba,gdementen/numba,ssarangi/numba,GaZ3ll3/numba,IntelLabs/numba,jriehl/numba,GaZ3ll3/numba,GaZ3ll3/numba,numba/numba,sklam/numba,gmarkall/numba,ssarangi/numba,stonebig/numba,seibert/numba,numba/numba,stonebig/numba,stonebig/numba,cpcloud/numba,gdementen/numba,pitrou/numba,cpcloud/numba,gdementen/numba,stefanseefeld/numba,stefanseefeld/numba,jriehl/numba,gmarkall/numba,gdementen/numba,jriehl/numba,pombredanne/numba,seibert/numba,numba/numba,sklam/numba,gdementen/numba,sklam/numba,jriehl/numba,seibert/numba,cpcloud/numba,stefanseefeld/numba,stonebig/numba,pitrou/numba,ssarangi/numba,pitrou/numba,jriehl/numba,sklam/numba,sklam/numba,stefanseefeld/numba,stonebig/numba,gmarkall/numba,pombredanne/numba,IntelLabs/numba,seibert/numba,cpcloud/numba,stuartarchibald/numba,numba/numba,pitrou/numba,ssarangi/numba,IntelLabs/numba,numba/numba,stefanseefeld/numba,pombredanne/numba,stuartarchibald/numba,seibert/numba,stuartarchibald/numba,GaZ3ll3/numba,GaZ3ll3/numba,pombredanne/numba,ssarangi/numba
|
Add a simple test for function lifetime
|
from __future__ import print_function, absolute_import
import gc
import weakref
from numba import unittest_support as unittest
from numba.utils import IS_PY3
from numba import jit, types
from .support import TestCase
def global_func(x):
return x + 1
class TestFuncLifetime(TestCase):
"""
Test the lifetime of compiled function objects.
"""
# NOTE: there's a test for closure lifetime in test_closure
def check_local_func_lifetime(self, **jitargs):
def f(x):
return x + 1
c_f = jit(**jitargs)(f)
self.assertPreciseEqual(c_f(1), 2)
refs = [weakref.ref(obj) for obj in (f, c_f)]
obj = f = c_f = None
gc.collect()
self.assertEqual([wr() for wr in refs], [None] * len(refs))
def test_local_func_lifetime(self):
self.check_local_func_lifetime(forceobj=True)
def test_local_func_lifetime_npm(self):
self.check_local_func_lifetime(nopython=True)
def check_global_func_lifetime(self, **jitargs):
c_f = jit(**jitargs)(global_func)
self.assertPreciseEqual(c_f(1), 2)
wr = weakref.ref(c_f)
c_f = None
gc.collect()
self.assertIs(wr(), None)
def test_global_func_lifetime(self):
self.check_global_func_lifetime(forceobj=True)
def test_global_func_lifetime_npm(self):
self.check_global_func_lifetime(nopython=True)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a simple test for function lifetime<commit_after>
|
from __future__ import print_function, absolute_import
import gc
import weakref
from numba import unittest_support as unittest
from numba.utils import IS_PY3
from numba import jit, types
from .support import TestCase
def global_func(x):
return x + 1
class TestFuncLifetime(TestCase):
"""
Test the lifetime of compiled function objects.
"""
# NOTE: there's a test for closure lifetime in test_closure
def check_local_func_lifetime(self, **jitargs):
def f(x):
return x + 1
c_f = jit(**jitargs)(f)
self.assertPreciseEqual(c_f(1), 2)
refs = [weakref.ref(obj) for obj in (f, c_f)]
obj = f = c_f = None
gc.collect()
self.assertEqual([wr() for wr in refs], [None] * len(refs))
def test_local_func_lifetime(self):
self.check_local_func_lifetime(forceobj=True)
def test_local_func_lifetime_npm(self):
self.check_local_func_lifetime(nopython=True)
def check_global_func_lifetime(self, **jitargs):
c_f = jit(**jitargs)(global_func)
self.assertPreciseEqual(c_f(1), 2)
wr = weakref.ref(c_f)
c_f = None
gc.collect()
self.assertIs(wr(), None)
def test_global_func_lifetime(self):
self.check_global_func_lifetime(forceobj=True)
def test_global_func_lifetime_npm(self):
self.check_global_func_lifetime(nopython=True)
if __name__ == '__main__':
unittest.main()
|
Add a simple test for function lifetime
from __future__ import print_function, absolute_import
import gc
import weakref
from numba import unittest_support as unittest
from numba.utils import IS_PY3
from numba import jit, types
from .support import TestCase
def global_func(x):
return x + 1
class TestFuncLifetime(TestCase):
"""
Test the lifetime of compiled function objects.
"""
# NOTE: there's a test for closure lifetime in test_closure
def check_local_func_lifetime(self, **jitargs):
def f(x):
return x + 1
c_f = jit(**jitargs)(f)
self.assertPreciseEqual(c_f(1), 2)
refs = [weakref.ref(obj) for obj in (f, c_f)]
obj = f = c_f = None
gc.collect()
self.assertEqual([wr() for wr in refs], [None] * len(refs))
def test_local_func_lifetime(self):
self.check_local_func_lifetime(forceobj=True)
def test_local_func_lifetime_npm(self):
self.check_local_func_lifetime(nopython=True)
def check_global_func_lifetime(self, **jitargs):
c_f = jit(**jitargs)(global_func)
self.assertPreciseEqual(c_f(1), 2)
wr = weakref.ref(c_f)
c_f = None
gc.collect()
self.assertIs(wr(), None)
def test_global_func_lifetime(self):
self.check_global_func_lifetime(forceobj=True)
def test_global_func_lifetime_npm(self):
self.check_global_func_lifetime(nopython=True)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a simple test for function lifetime<commit_after>
from __future__ import print_function, absolute_import
import gc
import weakref
from numba import unittest_support as unittest
from numba.utils import IS_PY3
from numba import jit, types
from .support import TestCase
def global_func(x):
return x + 1
class TestFuncLifetime(TestCase):
"""
Test the lifetime of compiled function objects.
"""
# NOTE: there's a test for closure lifetime in test_closure
def check_local_func_lifetime(self, **jitargs):
def f(x):
return x + 1
c_f = jit(**jitargs)(f)
self.assertPreciseEqual(c_f(1), 2)
refs = [weakref.ref(obj) for obj in (f, c_f)]
obj = f = c_f = None
gc.collect()
self.assertEqual([wr() for wr in refs], [None] * len(refs))
def test_local_func_lifetime(self):
self.check_local_func_lifetime(forceobj=True)
def test_local_func_lifetime_npm(self):
self.check_local_func_lifetime(nopython=True)
def check_global_func_lifetime(self, **jitargs):
c_f = jit(**jitargs)(global_func)
self.assertPreciseEqual(c_f(1), 2)
wr = weakref.ref(c_f)
c_f = None
gc.collect()
self.assertIs(wr(), None)
def test_global_func_lifetime(self):
self.check_global_func_lifetime(forceobj=True)
def test_global_func_lifetime_npm(self):
self.check_global_func_lifetime(nopython=True)
if __name__ == '__main__':
unittest.main()
|
|
48c7e914357199759321250341f54fa2d06c7c45
|
support/patchapply.py
|
support/patchapply.py
|
#!/usr/bin/env python
##
# Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DRI: Cyrus Daboo, cdaboo@apple.com
##
import os
import sys
#
# Apply patches to dependent projects.
#
#projects = ("Twisted", "vobject", "dateutil", "xattr")
projects = ("Twisted", "vobject",)
cwd = os.getcwd()
libpatches = os.path.join(cwd, "lib-patches")
cmd = "/usr/bin/patch"
def applypatch(project, patch):
stat = os.system("%s -s -d ../%s/ -p0 --forward --dry-run -i %s > /dev/null" % (cmd, project, patch, ))
if stat == 0:
print "+++ Patching %s with %s" % (project, patch[len(cwd) + 1:],)
os.system("%s -s -d ../%s/ -p0 --forward -i %s" % (cmd, project, patch, ))
else:
print "*** Failed to patch %s with %s" % (project, patch[len(cwd) + 1:],)
def applypatches(project):
# Iterate over each patch file in the patches directory
path = os.path.join(libpatches, project)
for file in os.listdir(path):
fpath = os.path.join(path, file)
if os.path.isfile(fpath) and fpath.endswith(".patch"):
applypatch(project, fpath)
if __name__ == "__main__":
try:
for project in projects:
applypatches(project)
except Exception, e:
sys.exit(str(e))
|
Patch applier. This is useful when you need to revert or switch a branch on a dependent project and need to re-apply patches without going through the ./run script's full update/patch cycle for all projects.
|
Patch applier. This is useful when you need to revert or switch a branch on a dependent project and need to re-apply
patches without going through the ./run script's full update/patch cycle for all projects.
git-svn-id: 81e381228600e5752b80483efd2b45b26c451ea2@513 e27351fd-9f3e-4f54-a53b-843176b1656c
|
Python
|
apache-2.0
|
trevor/calendarserver,trevor/calendarserver,trevor/calendarserver
|
Patch applier. This is useful when you need to revert or switch a branch on a dependent project and need to re-apply
patches without going through the ./run script's full update/patch cycle for all projects.
git-svn-id: 81e381228600e5752b80483efd2b45b26c451ea2@513 e27351fd-9f3e-4f54-a53b-843176b1656c
|
#!/usr/bin/env python
##
# Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DRI: Cyrus Daboo, cdaboo@apple.com
##
import os
import sys
#
# Apply patches to dependent projects.
#
#projects = ("Twisted", "vobject", "dateutil", "xattr")
projects = ("Twisted", "vobject",)
cwd = os.getcwd()
libpatches = os.path.join(cwd, "lib-patches")
cmd = "/usr/bin/patch"
def applypatch(project, patch):
stat = os.system("%s -s -d ../%s/ -p0 --forward --dry-run -i %s > /dev/null" % (cmd, project, patch, ))
if stat == 0:
print "+++ Patching %s with %s" % (project, patch[len(cwd) + 1:],)
os.system("%s -s -d ../%s/ -p0 --forward -i %s" % (cmd, project, patch, ))
else:
print "*** Failed to patch %s with %s" % (project, patch[len(cwd) + 1:],)
def applypatches(project):
# Iterate over each patch file in the patches directory
path = os.path.join(libpatches, project)
for file in os.listdir(path):
fpath = os.path.join(path, file)
if os.path.isfile(fpath) and fpath.endswith(".patch"):
applypatch(project, fpath)
if __name__ == "__main__":
try:
for project in projects:
applypatches(project)
except Exception, e:
sys.exit(str(e))
|
<commit_before><commit_msg>Patch applier. This is useful when you need to revert or switch a branch on a dependent project and need to re-apply
patches without going through the ./run script's full update/patch cycle for all projects.
git-svn-id: 81e381228600e5752b80483efd2b45b26c451ea2@513 e27351fd-9f3e-4f54-a53b-843176b1656c<commit_after>
|
#!/usr/bin/env python
##
# Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DRI: Cyrus Daboo, cdaboo@apple.com
##
import os
import sys
#
# Apply patches to dependent projects.
#
#projects = ("Twisted", "vobject", "dateutil", "xattr")
projects = ("Twisted", "vobject",)
cwd = os.getcwd()
libpatches = os.path.join(cwd, "lib-patches")
cmd = "/usr/bin/patch"
def applypatch(project, patch):
stat = os.system("%s -s -d ../%s/ -p0 --forward --dry-run -i %s > /dev/null" % (cmd, project, patch, ))
if stat == 0:
print "+++ Patching %s with %s" % (project, patch[len(cwd) + 1:],)
os.system("%s -s -d ../%s/ -p0 --forward -i %s" % (cmd, project, patch, ))
else:
print "*** Failed to patch %s with %s" % (project, patch[len(cwd) + 1:],)
def applypatches(project):
# Iterate over each patch file in the patches directory
path = os.path.join(libpatches, project)
for file in os.listdir(path):
fpath = os.path.join(path, file)
if os.path.isfile(fpath) and fpath.endswith(".patch"):
applypatch(project, fpath)
if __name__ == "__main__":
try:
for project in projects:
applypatches(project)
except Exception, e:
sys.exit(str(e))
|
Patch applier. This is useful when you need to revert or switch a branch on a dependent project and need to re-apply
patches without going through the ./run script's full update/patch cycle for all projects.
git-svn-id: 81e381228600e5752b80483efd2b45b26c451ea2@513 e27351fd-9f3e-4f54-a53b-843176b1656c#!/usr/bin/env python
##
# Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DRI: Cyrus Daboo, cdaboo@apple.com
##
import os
import sys
#
# Apply patches to dependent projects.
#
#projects = ("Twisted", "vobject", "dateutil", "xattr")
projects = ("Twisted", "vobject",)
cwd = os.getcwd()
libpatches = os.path.join(cwd, "lib-patches")
cmd = "/usr/bin/patch"
def applypatch(project, patch):
stat = os.system("%s -s -d ../%s/ -p0 --forward --dry-run -i %s > /dev/null" % (cmd, project, patch, ))
if stat == 0:
print "+++ Patching %s with %s" % (project, patch[len(cwd) + 1:],)
os.system("%s -s -d ../%s/ -p0 --forward -i %s" % (cmd, project, patch, ))
else:
print "*** Failed to patch %s with %s" % (project, patch[len(cwd) + 1:],)
def applypatches(project):
# Iterate over each patch file in the patches directory
path = os.path.join(libpatches, project)
for file in os.listdir(path):
fpath = os.path.join(path, file)
if os.path.isfile(fpath) and fpath.endswith(".patch"):
applypatch(project, fpath)
if __name__ == "__main__":
try:
for project in projects:
applypatches(project)
except Exception, e:
sys.exit(str(e))
|
<commit_before><commit_msg>Patch applier. This is useful when you need to revert or switch a branch on a dependent project and need to re-apply
patches without going through the ./run script's full update/patch cycle for all projects.
git-svn-id: 81e381228600e5752b80483efd2b45b26c451ea2@513 e27351fd-9f3e-4f54-a53b-843176b1656c<commit_after>#!/usr/bin/env python
##
# Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DRI: Cyrus Daboo, cdaboo@apple.com
##
import os
import sys
#
# Apply patches to dependent projects.
#
#projects = ("Twisted", "vobject", "dateutil", "xattr")
projects = ("Twisted", "vobject",)
cwd = os.getcwd()
libpatches = os.path.join(cwd, "lib-patches")
cmd = "/usr/bin/patch"
def applypatch(project, patch):
stat = os.system("%s -s -d ../%s/ -p0 --forward --dry-run -i %s > /dev/null" % (cmd, project, patch, ))
if stat == 0:
print "+++ Patching %s with %s" % (project, patch[len(cwd) + 1:],)
os.system("%s -s -d ../%s/ -p0 --forward -i %s" % (cmd, project, patch, ))
else:
print "*** Failed to patch %s with %s" % (project, patch[len(cwd) + 1:],)
def applypatches(project):
# Iterate over each patch file in the patches directory
path = os.path.join(libpatches, project)
for file in os.listdir(path):
fpath = os.path.join(path, file)
if os.path.isfile(fpath) and fpath.endswith(".patch"):
applypatch(project, fpath)
if __name__ == "__main__":
try:
for project in projects:
applypatches(project)
except Exception, e:
sys.exit(str(e))
|
|
9bcd3e71f86d4a3b74c57276e65420cc8dc01fa1
|
test/test_decorate.py
|
test/test_decorate.py
|
import py.test
from tiddlyweb.model.policy import UserRequiredError
from tiddlywebplugins.utils import (entitle, do_html, require_role,
require_any_user)
STATUS = ''
HEADERS = []
def start_responser(status, headers, exc_info=None):
global STATUS
global HEADERS
STATUS = status
HEADERS = headers
def setup_module(module):
module.environ = {}
def test_entitle():
@entitle('monkey')
def wsgi_app(environ, start_response):
pass
assert 'tiddlyweb.title' not in environ
wsgi_app(environ, start_responser)
assert 'tiddlyweb.title' in environ
assert environ['tiddlyweb.title'] == 'monkey'
def test_do_html():
@do_html()
def wsgi_app(environ, start_response):
pass
assert STATUS == ''
wsgi_app(environ, start_responser)
assert STATUS == '200 OK'
assert ('Content-Type', 'text/html; charset=UTF-8') in HEADERS
def test_require_role():
@require_role('ADMIN')
def wsgi_app(environ, start_response):
return 1
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'roles': []}
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'roles': ['fan']}
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'roles': ['ADMIN']}
output = wsgi_app(environ, start_responser)
assert output == 1
def test_require_any_user():
@require_any_user()
def wsgi_app(environ, start_response):
return 1
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'name': 'GUEST'}
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'name': 'monkey!'}
output = wsgi_app(environ, start_responser)
assert output == 1
|
Add a test file which tests the various decorator functions.
|
Add a test file which tests the various decorator functions.
|
Python
|
bsd-3-clause
|
tiddlyweb/tiddlywebplugins.utils
|
Add a test file which tests the various decorator functions.
|
import py.test
from tiddlyweb.model.policy import UserRequiredError
from tiddlywebplugins.utils import (entitle, do_html, require_role,
require_any_user)
STATUS = ''
HEADERS = []
def start_responser(status, headers, exc_info=None):
global STATUS
global HEADERS
STATUS = status
HEADERS = headers
def setup_module(module):
module.environ = {}
def test_entitle():
@entitle('monkey')
def wsgi_app(environ, start_response):
pass
assert 'tiddlyweb.title' not in environ
wsgi_app(environ, start_responser)
assert 'tiddlyweb.title' in environ
assert environ['tiddlyweb.title'] == 'monkey'
def test_do_html():
@do_html()
def wsgi_app(environ, start_response):
pass
assert STATUS == ''
wsgi_app(environ, start_responser)
assert STATUS == '200 OK'
assert ('Content-Type', 'text/html; charset=UTF-8') in HEADERS
def test_require_role():
@require_role('ADMIN')
def wsgi_app(environ, start_response):
return 1
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'roles': []}
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'roles': ['fan']}
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'roles': ['ADMIN']}
output = wsgi_app(environ, start_responser)
assert output == 1
def test_require_any_user():
@require_any_user()
def wsgi_app(environ, start_response):
return 1
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'name': 'GUEST'}
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'name': 'monkey!'}
output = wsgi_app(environ, start_responser)
assert output == 1
|
<commit_before><commit_msg>Add a test file which tests the various decorator functions.<commit_after>
|
import py.test
from tiddlyweb.model.policy import UserRequiredError
from tiddlywebplugins.utils import (entitle, do_html, require_role,
require_any_user)
STATUS = ''
HEADERS = []
def start_responser(status, headers, exc_info=None):
global STATUS
global HEADERS
STATUS = status
HEADERS = headers
def setup_module(module):
module.environ = {}
def test_entitle():
@entitle('monkey')
def wsgi_app(environ, start_response):
pass
assert 'tiddlyweb.title' not in environ
wsgi_app(environ, start_responser)
assert 'tiddlyweb.title' in environ
assert environ['tiddlyweb.title'] == 'monkey'
def test_do_html():
@do_html()
def wsgi_app(environ, start_response):
pass
assert STATUS == ''
wsgi_app(environ, start_responser)
assert STATUS == '200 OK'
assert ('Content-Type', 'text/html; charset=UTF-8') in HEADERS
def test_require_role():
@require_role('ADMIN')
def wsgi_app(environ, start_response):
return 1
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'roles': []}
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'roles': ['fan']}
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'roles': ['ADMIN']}
output = wsgi_app(environ, start_responser)
assert output == 1
def test_require_any_user():
@require_any_user()
def wsgi_app(environ, start_response):
return 1
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'name': 'GUEST'}
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'name': 'monkey!'}
output = wsgi_app(environ, start_responser)
assert output == 1
|
Add a test file which tests the various decorator functions.
import py.test
from tiddlyweb.model.policy import UserRequiredError
from tiddlywebplugins.utils import (entitle, do_html, require_role,
require_any_user)
STATUS = ''
HEADERS = []
def start_responser(status, headers, exc_info=None):
global STATUS
global HEADERS
STATUS = status
HEADERS = headers
def setup_module(module):
module.environ = {}
def test_entitle():
@entitle('monkey')
def wsgi_app(environ, start_response):
pass
assert 'tiddlyweb.title' not in environ
wsgi_app(environ, start_responser)
assert 'tiddlyweb.title' in environ
assert environ['tiddlyweb.title'] == 'monkey'
def test_do_html():
@do_html()
def wsgi_app(environ, start_response):
pass
assert STATUS == ''
wsgi_app(environ, start_responser)
assert STATUS == '200 OK'
assert ('Content-Type', 'text/html; charset=UTF-8') in HEADERS
def test_require_role():
@require_role('ADMIN')
def wsgi_app(environ, start_response):
return 1
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'roles': []}
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'roles': ['fan']}
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'roles': ['ADMIN']}
output = wsgi_app(environ, start_responser)
assert output == 1
def test_require_any_user():
@require_any_user()
def wsgi_app(environ, start_response):
return 1
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'name': 'GUEST'}
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'name': 'monkey!'}
output = wsgi_app(environ, start_responser)
assert output == 1
|
<commit_before><commit_msg>Add a test file which tests the various decorator functions.<commit_after>
import py.test
from tiddlyweb.model.policy import UserRequiredError
from tiddlywebplugins.utils import (entitle, do_html, require_role,
require_any_user)
STATUS = ''
HEADERS = []
def start_responser(status, headers, exc_info=None):
global STATUS
global HEADERS
STATUS = status
HEADERS = headers
def setup_module(module):
module.environ = {}
def test_entitle():
@entitle('monkey')
def wsgi_app(environ, start_response):
pass
assert 'tiddlyweb.title' not in environ
wsgi_app(environ, start_responser)
assert 'tiddlyweb.title' in environ
assert environ['tiddlyweb.title'] == 'monkey'
def test_do_html():
@do_html()
def wsgi_app(environ, start_response):
pass
assert STATUS == ''
wsgi_app(environ, start_responser)
assert STATUS == '200 OK'
assert ('Content-Type', 'text/html; charset=UTF-8') in HEADERS
def test_require_role():
@require_role('ADMIN')
def wsgi_app(environ, start_response):
return 1
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'roles': []}
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'roles': ['fan']}
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'roles': ['ADMIN']}
output = wsgi_app(environ, start_responser)
assert output == 1
def test_require_any_user():
@require_any_user()
def wsgi_app(environ, start_response):
return 1
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'name': 'GUEST'}
with py.test.raises(UserRequiredError):
wsgi_app(environ, start_responser)
environ['tiddlyweb.usersign'] = {'name': 'monkey!'}
output = wsgi_app(environ, start_responser)
assert output == 1
|
|
2b7954302c5238bfde09ff8aed655263d3b238bb
|
maxwellbloch/mb_solve.py
|
maxwellbloch/mb_solve.py
|
# -*- coding: utf-8 -*-
import sys
from numpy import linspace, insert
from maxwellbloch import ob_solve
class MBSolve(ob_solve.OBSolve):
def __init__(self, ob_atom={}, t_min=0.0, t_max=1.0, t_steps=100,
method='mesolve', opts={}, savefile=None, z_min=0.0,
z_max=1.0, z_steps=5, z_steps_inner=2,
num_density_z_func=None, num_density_z_args={},
velocity_classes={}):
super().__init__(ob_atom, t_min, t_max, t_steps,
method, opts, savefile)
self.build_zlist(z_min, z_max, z_steps, z_steps_inner)
self.num_density_z_func = num_density_z_func
self.num_density_z_args = num_density_z_args
self.build_velocity_classes(velocity_classes)
def __repr__(self):
return ("MBSolve(ob_atom={0}, " +
"t_min={1}, " +
"t_max={2}, " +
"t_steps={3}, " +
"method={4}, " +
"opts={5}, " +
"savefile={6}, " +
"z_min={7}, " +
"z_max={8}, " +
"z_steps={9}, " +
"z_steps_inner={10}, " +
"num_density_z_func={11}, " +
"velocity_classes={12})").format(self.ob_atom,
self.t_min,
self.t_max,
self.t_steps,
self.method,
self.opts,
self.z_min,
self.z_max,
self.z_steps,
self.z_steps_inner,
self.num_density_z_func,
self.num_density_z_args,
self.velocity_classes)
## TODO: move opts and savefile to end
def build_zlist(self, z_min, z_max, z_steps, z_steps_inner):
self.z_min = z_min
self.z_max = z_max
self.z_steps = z_steps
self.z_steps_inner = z_steps_inner
# TODO: does this even work? Did it ever? Where are inner steps?
z_inner_stepsize = (z_max - z_min)/(z_steps*z_steps_inner + 1)
zlist = linspace(z_min + z_inner_stepsize, z_max, z_steps + 1)
zlist = insert(zlist, 0, z_min) # One more for first step
print(zlist)
return zlist
def build_velocity_classes(self, velocity_classes):
self.velocity_classes = velocity_classes
# TODO: Build it here
def main():
print(MBSolve())
if __name__ == '__main__':
status = main()
sys.exit(status)
|
Make MBSolve inherit from OBSolve
|
Make MBSolve inherit from OBSolve
|
Python
|
mit
|
tommyogden/maxwellbloch,tommyogden/maxwellbloch
|
Make MBSolve inherit from OBSolve
|
# -*- coding: utf-8 -*-
import sys
from numpy import linspace, insert
from maxwellbloch import ob_solve
class MBSolve(ob_solve.OBSolve):
def __init__(self, ob_atom={}, t_min=0.0, t_max=1.0, t_steps=100,
method='mesolve', opts={}, savefile=None, z_min=0.0,
z_max=1.0, z_steps=5, z_steps_inner=2,
num_density_z_func=None, num_density_z_args={},
velocity_classes={}):
super().__init__(ob_atom, t_min, t_max, t_steps,
method, opts, savefile)
self.build_zlist(z_min, z_max, z_steps, z_steps_inner)
self.num_density_z_func = num_density_z_func
self.num_density_z_args = num_density_z_args
self.build_velocity_classes(velocity_classes)
def __repr__(self):
return ("MBSolve(ob_atom={0}, " +
"t_min={1}, " +
"t_max={2}, " +
"t_steps={3}, " +
"method={4}, " +
"opts={5}, " +
"savefile={6}, " +
"z_min={7}, " +
"z_max={8}, " +
"z_steps={9}, " +
"z_steps_inner={10}, " +
"num_density_z_func={11}, " +
"velocity_classes={12})").format(self.ob_atom,
self.t_min,
self.t_max,
self.t_steps,
self.method,
self.opts,
self.z_min,
self.z_max,
self.z_steps,
self.z_steps_inner,
self.num_density_z_func,
self.num_density_z_args,
self.velocity_classes)
## TODO: move opts and savefile to end
def build_zlist(self, z_min, z_max, z_steps, z_steps_inner):
self.z_min = z_min
self.z_max = z_max
self.z_steps = z_steps
self.z_steps_inner = z_steps_inner
# TODO: does this even work? Did it ever? Where are inner steps?
z_inner_stepsize = (z_max - z_min)/(z_steps*z_steps_inner + 1)
zlist = linspace(z_min + z_inner_stepsize, z_max, z_steps + 1)
zlist = insert(zlist, 0, z_min) # One more for first step
print(zlist)
return zlist
def build_velocity_classes(self, velocity_classes):
self.velocity_classes = velocity_classes
# TODO: Build it here
def main():
print(MBSolve())
if __name__ == '__main__':
status = main()
sys.exit(status)
|
<commit_before><commit_msg>Make MBSolve inherit from OBSolve<commit_after>
|
# -*- coding: utf-8 -*-
import sys
from numpy import linspace, insert
from maxwellbloch import ob_solve
class MBSolve(ob_solve.OBSolve):
def __init__(self, ob_atom={}, t_min=0.0, t_max=1.0, t_steps=100,
method='mesolve', opts={}, savefile=None, z_min=0.0,
z_max=1.0, z_steps=5, z_steps_inner=2,
num_density_z_func=None, num_density_z_args={},
velocity_classes={}):
super().__init__(ob_atom, t_min, t_max, t_steps,
method, opts, savefile)
self.build_zlist(z_min, z_max, z_steps, z_steps_inner)
self.num_density_z_func = num_density_z_func
self.num_density_z_args = num_density_z_args
self.build_velocity_classes(velocity_classes)
def __repr__(self):
return ("MBSolve(ob_atom={0}, " +
"t_min={1}, " +
"t_max={2}, " +
"t_steps={3}, " +
"method={4}, " +
"opts={5}, " +
"savefile={6}, " +
"z_min={7}, " +
"z_max={8}, " +
"z_steps={9}, " +
"z_steps_inner={10}, " +
"num_density_z_func={11}, " +
"velocity_classes={12})").format(self.ob_atom,
self.t_min,
self.t_max,
self.t_steps,
self.method,
self.opts,
self.z_min,
self.z_max,
self.z_steps,
self.z_steps_inner,
self.num_density_z_func,
self.num_density_z_args,
self.velocity_classes)
## TODO: move opts and savefile to end
def build_zlist(self, z_min, z_max, z_steps, z_steps_inner):
self.z_min = z_min
self.z_max = z_max
self.z_steps = z_steps
self.z_steps_inner = z_steps_inner
# TODO: does this even work? Did it ever? Where are inner steps?
z_inner_stepsize = (z_max - z_min)/(z_steps*z_steps_inner + 1)
zlist = linspace(z_min + z_inner_stepsize, z_max, z_steps + 1)
zlist = insert(zlist, 0, z_min) # One more for first step
print(zlist)
return zlist
def build_velocity_classes(self, velocity_classes):
self.velocity_classes = velocity_classes
# TODO: Build it here
def main():
print(MBSolve())
if __name__ == '__main__':
status = main()
sys.exit(status)
|
Make MBSolve inherit from OBSolve# -*- coding: utf-8 -*-
import sys
from numpy import linspace, insert
from maxwellbloch import ob_solve
class MBSolve(ob_solve.OBSolve):
def __init__(self, ob_atom={}, t_min=0.0, t_max=1.0, t_steps=100,
method='mesolve', opts={}, savefile=None, z_min=0.0,
z_max=1.0, z_steps=5, z_steps_inner=2,
num_density_z_func=None, num_density_z_args={},
velocity_classes={}):
super().__init__(ob_atom, t_min, t_max, t_steps,
method, opts, savefile)
self.build_zlist(z_min, z_max, z_steps, z_steps_inner)
self.num_density_z_func = num_density_z_func
self.num_density_z_args = num_density_z_args
self.build_velocity_classes(velocity_classes)
def __repr__(self):
return ("MBSolve(ob_atom={0}, " +
"t_min={1}, " +
"t_max={2}, " +
"t_steps={3}, " +
"method={4}, " +
"opts={5}, " +
"savefile={6}, " +
"z_min={7}, " +
"z_max={8}, " +
"z_steps={9}, " +
"z_steps_inner={10}, " +
"num_density_z_func={11}, " +
"velocity_classes={12})").format(self.ob_atom,
self.t_min,
self.t_max,
self.t_steps,
self.method,
self.opts,
self.z_min,
self.z_max,
self.z_steps,
self.z_steps_inner,
self.num_density_z_func,
self.num_density_z_args,
self.velocity_classes)
## TODO: move opts and savefile to end
def build_zlist(self, z_min, z_max, z_steps, z_steps_inner):
self.z_min = z_min
self.z_max = z_max
self.z_steps = z_steps
self.z_steps_inner = z_steps_inner
# TODO: does this even work? Did it ever? Where are inner steps?
z_inner_stepsize = (z_max - z_min)/(z_steps*z_steps_inner + 1)
zlist = linspace(z_min + z_inner_stepsize, z_max, z_steps + 1)
zlist = insert(zlist, 0, z_min) # One more for first step
print(zlist)
return zlist
def build_velocity_classes(self, velocity_classes):
self.velocity_classes = velocity_classes
# TODO: Build it here
def main():
print(MBSolve())
if __name__ == '__main__':
status = main()
sys.exit(status)
|
<commit_before><commit_msg>Make MBSolve inherit from OBSolve<commit_after># -*- coding: utf-8 -*-
import sys
from numpy import linspace, insert
from maxwellbloch import ob_solve
class MBSolve(ob_solve.OBSolve):
def __init__(self, ob_atom={}, t_min=0.0, t_max=1.0, t_steps=100,
method='mesolve', opts={}, savefile=None, z_min=0.0,
z_max=1.0, z_steps=5, z_steps_inner=2,
num_density_z_func=None, num_density_z_args={},
velocity_classes={}):
super().__init__(ob_atom, t_min, t_max, t_steps,
method, opts, savefile)
self.build_zlist(z_min, z_max, z_steps, z_steps_inner)
self.num_density_z_func = num_density_z_func
self.num_density_z_args = num_density_z_args
self.build_velocity_classes(velocity_classes)
def __repr__(self):
return ("MBSolve(ob_atom={0}, " +
"t_min={1}, " +
"t_max={2}, " +
"t_steps={3}, " +
"method={4}, " +
"opts={5}, " +
"savefile={6}, " +
"z_min={7}, " +
"z_max={8}, " +
"z_steps={9}, " +
"z_steps_inner={10}, " +
"num_density_z_func={11}, " +
"velocity_classes={12})").format(self.ob_atom,
self.t_min,
self.t_max,
self.t_steps,
self.method,
self.opts,
self.z_min,
self.z_max,
self.z_steps,
self.z_steps_inner,
self.num_density_z_func,
self.num_density_z_args,
self.velocity_classes)
## TODO: move opts and savefile to end
def build_zlist(self, z_min, z_max, z_steps, z_steps_inner):
self.z_min = z_min
self.z_max = z_max
self.z_steps = z_steps
self.z_steps_inner = z_steps_inner
# TODO: does this even work? Did it ever? Where are inner steps?
z_inner_stepsize = (z_max - z_min)/(z_steps*z_steps_inner + 1)
zlist = linspace(z_min + z_inner_stepsize, z_max, z_steps + 1)
zlist = insert(zlist, 0, z_min) # One more for first step
print(zlist)
return zlist
def build_velocity_classes(self, velocity_classes):
self.velocity_classes = velocity_classes
# TODO: Build it here
def main():
print(MBSolve())
if __name__ == '__main__':
status = main()
sys.exit(status)
|
|
72e7fc3d7fe284ea7a8b47606708a35992576fdc
|
pdsspect/pds_image_view_canvas.py
|
pdsspect/pds_image_view_canvas.py
|
from ginga.qtw.ImageViewCanvasQt import ImageViewCanvas
class PDSImageViewCanvas(ImageViewCanvas):
def __init__(self):
super(PDSImageViewCanvas, self).__init__(render='widget')
self._subviews = []
self.set_autocut_params('zscale')
self.enable_autozoom('override')
self.enable_autocuts('override')
self.set_bg(0, 0, 0)
self.ui_setActive(True)
def add_subview(self, subview):
self._subviews.append(subview)
def cut_levels(self, cut_low, cut_high):
super(PDSImageViewCanvas, self).cut_levels(cut_low, cut_high)
for subview in self._subviews:
subview.cut_levels(cut_low, cut_high)
def transform(self, flip_x, flip_y, swap_xy):
super(PDSImageViewCanvas, self).transform(flip_x, flip_y, swap_xy)
for subview in self._subviews:
subview.transform(flip_x, flip_y, swap_xy)
|
Create subclass of ImageViewCanvas for pdsspect
|
Create subclass of ImageViewCanvas for pdsspect
|
Python
|
bsd-3-clause
|
planetarypy/pdsspect
|
Create subclass of ImageViewCanvas for pdsspect
|
from ginga.qtw.ImageViewCanvasQt import ImageViewCanvas
class PDSImageViewCanvas(ImageViewCanvas):
def __init__(self):
super(PDSImageViewCanvas, self).__init__(render='widget')
self._subviews = []
self.set_autocut_params('zscale')
self.enable_autozoom('override')
self.enable_autocuts('override')
self.set_bg(0, 0, 0)
self.ui_setActive(True)
def add_subview(self, subview):
self._subviews.append(subview)
def cut_levels(self, cut_low, cut_high):
super(PDSImageViewCanvas, self).cut_levels(cut_low, cut_high)
for subview in self._subviews:
subview.cut_levels(cut_low, cut_high)
def transform(self, flip_x, flip_y, swap_xy):
super(PDSImageViewCanvas, self).transform(flip_x, flip_y, swap_xy)
for subview in self._subviews:
subview.transform(flip_x, flip_y, swap_xy)
|
<commit_before><commit_msg>Create subclass of ImageViewCanvas for pdsspect<commit_after>
|
from ginga.qtw.ImageViewCanvasQt import ImageViewCanvas
class PDSImageViewCanvas(ImageViewCanvas):
def __init__(self):
super(PDSImageViewCanvas, self).__init__(render='widget')
self._subviews = []
self.set_autocut_params('zscale')
self.enable_autozoom('override')
self.enable_autocuts('override')
self.set_bg(0, 0, 0)
self.ui_setActive(True)
def add_subview(self, subview):
self._subviews.append(subview)
def cut_levels(self, cut_low, cut_high):
super(PDSImageViewCanvas, self).cut_levels(cut_low, cut_high)
for subview in self._subviews:
subview.cut_levels(cut_low, cut_high)
def transform(self, flip_x, flip_y, swap_xy):
super(PDSImageViewCanvas, self).transform(flip_x, flip_y, swap_xy)
for subview in self._subviews:
subview.transform(flip_x, flip_y, swap_xy)
|
Create subclass of ImageViewCanvas for pdsspectfrom ginga.qtw.ImageViewCanvasQt import ImageViewCanvas
class PDSImageViewCanvas(ImageViewCanvas):
def __init__(self):
super(PDSImageViewCanvas, self).__init__(render='widget')
self._subviews = []
self.set_autocut_params('zscale')
self.enable_autozoom('override')
self.enable_autocuts('override')
self.set_bg(0, 0, 0)
self.ui_setActive(True)
def add_subview(self, subview):
self._subviews.append(subview)
def cut_levels(self, cut_low, cut_high):
super(PDSImageViewCanvas, self).cut_levels(cut_low, cut_high)
for subview in self._subviews:
subview.cut_levels(cut_low, cut_high)
def transform(self, flip_x, flip_y, swap_xy):
super(PDSImageViewCanvas, self).transform(flip_x, flip_y, swap_xy)
for subview in self._subviews:
subview.transform(flip_x, flip_y, swap_xy)
|
<commit_before><commit_msg>Create subclass of ImageViewCanvas for pdsspect<commit_after>from ginga.qtw.ImageViewCanvasQt import ImageViewCanvas
class PDSImageViewCanvas(ImageViewCanvas):
def __init__(self):
super(PDSImageViewCanvas, self).__init__(render='widget')
self._subviews = []
self.set_autocut_params('zscale')
self.enable_autozoom('override')
self.enable_autocuts('override')
self.set_bg(0, 0, 0)
self.ui_setActive(True)
def add_subview(self, subview):
self._subviews.append(subview)
def cut_levels(self, cut_low, cut_high):
super(PDSImageViewCanvas, self).cut_levels(cut_low, cut_high)
for subview in self._subviews:
subview.cut_levels(cut_low, cut_high)
def transform(self, flip_x, flip_y, swap_xy):
super(PDSImageViewCanvas, self).transform(flip_x, flip_y, swap_xy)
for subview in self._subviews:
subview.transform(flip_x, flip_y, swap_xy)
|
|
b91009404f0ad4b5450259a70cfa480cc1d8e6f5
|
powerline/ext/terminal/segments.py
|
powerline/ext/terminal/segments.py
|
# -*- coding: utf-8 -*-
import os
import re
import socket
from powerline.lib.vcs import guess
def hostname():
if not os.environ.get('SSH_CLIENT'):
return None
return socket.gethostname()
def user():
user = os.environ.get('USER')
euid = os.geteuid()
return {
'contents': user,
'highlight': 'user' if euid != 0 else ['superuser', 'user'],
}
def branch():
repo = guess(os.path.abspath(os.getcwd()))
if repo:
return repo.branch()
return None
def cwd(dir_shorten_len=None, dir_limit_depth=None):
cwd = os.getcwdu()
home = os.environ.get('HOME')
if home:
cwd = re.sub('^' + re.escape(home), '~', cwd, 1)
cwd_split = cwd.split(os.sep)
cwd_split_len = len(cwd_split)
if cwd_split_len > dir_limit_depth + 1:
del(cwd_split[0:-dir_limit_depth])
cwd_split.insert(0, u'…')
cwd = [i[0:dir_shorten_len] if dir_shorten_len and i else i for i in cwd_split[:-1]] + [cwd_split[-1]]
cwd = os.path.join(*cwd)
return cwd
|
# -*- coding: utf-8 -*-
import os
import re
import socket
from powerline.lib.vcs import guess
def hostname():
if not os.environ.get('SSH_CLIENT'):
return None
return socket.gethostname()
def user():
user = os.environ.get('USER')
euid = os.geteuid()
return {
'contents': user,
'highlight': 'user' if euid != 0 else ['superuser', 'user'],
}
def branch():
repo = guess(os.path.abspath(os.getcwd()))
if repo:
return repo.branch()
return None
def cwd(dir_shorten_len=None, dir_limit_depth=None):
cwd = os.getcwdu()
home = os.environ.get('HOME')
if home:
cwd = re.sub('^' + re.escape(home), '~', cwd, 1)
cwd_split = cwd.split(os.sep)
cwd_split_len = len(cwd_split)
if cwd_split_len > dir_limit_depth + 1:
del(cwd_split[0:-dir_limit_depth])
cwd_split.insert(0, u'⋯')
cwd = [i[0:dir_shorten_len] if dir_shorten_len and i else i for i in cwd_split[:-1]] + [cwd_split[-1]]
cwd = os.path.join(*cwd)
return cwd
|
Use midline ellipsis for dir shortening
|
Use midline ellipsis for dir shortening
|
Python
|
mit
|
junix/powerline,QuLogic/powerline,EricSB/powerline,areteix/powerline,russellb/powerline,xfumihiro/powerline,cyrixhero/powerline,S0lll0s/powerline,keelerm84/powerline,firebitsbr/powerline,IvanAli/powerline,xfumihiro/powerline,blindFS/powerline,Liangjianghao/powerline,s0undt3ch/powerline,xxxhycl2010/powerline,S0lll0s/powerline,kenrachynski/powerline,dragon788/powerline,blindFS/powerline,russellb/powerline,firebitsbr/powerline,cyrixhero/powerline,russellb/powerline,magus424/powerline,lukw00/powerline,seanfisk/powerline,firebitsbr/powerline,kenrachynski/powerline,IvanAli/powerline,wfscheper/powerline,lukw00/powerline,Luffin/powerline,prvnkumar/powerline,QuLogic/powerline,seanfisk/powerline,bezhermoso/powerline,bartvm/powerline,magus424/powerline,areteix/powerline,xxxhycl2010/powerline,wfscheper/powerline,bezhermoso/powerline,areteix/powerline,QuLogic/powerline,prvnkumar/powerline,s0undt3ch/powerline,cyrixhero/powerline,DoctorJellyface/powerline,S0lll0s/powerline,kenrachynski/powerline,keelerm84/powerline,Luffin/powerline,Luffin/powerline,darac/powerline,Liangjianghao/powerline,xfumihiro/powerline,bartvm/powerline,IvanAli/powerline,lukw00/powerline,junix/powerline,dragon788/powerline,xxxhycl2010/powerline,EricSB/powerline,Liangjianghao/powerline,seanfisk/powerline,wfscheper/powerline,dragon788/powerline,bartvm/powerline,junix/powerline,prvnkumar/powerline,magus424/powerline,EricSB/powerline,blindFS/powerline,darac/powerline,bezhermoso/powerline,darac/powerline,s0undt3ch/powerline,DoctorJellyface/powerline,DoctorJellyface/powerline
|
# -*- coding: utf-8 -*-
import os
import re
import socket
from powerline.lib.vcs import guess
def hostname():
if not os.environ.get('SSH_CLIENT'):
return None
return socket.gethostname()
def user():
user = os.environ.get('USER')
euid = os.geteuid()
return {
'contents': user,
'highlight': 'user' if euid != 0 else ['superuser', 'user'],
}
def branch():
repo = guess(os.path.abspath(os.getcwd()))
if repo:
return repo.branch()
return None
def cwd(dir_shorten_len=None, dir_limit_depth=None):
cwd = os.getcwdu()
home = os.environ.get('HOME')
if home:
cwd = re.sub('^' + re.escape(home), '~', cwd, 1)
cwd_split = cwd.split(os.sep)
cwd_split_len = len(cwd_split)
if cwd_split_len > dir_limit_depth + 1:
del(cwd_split[0:-dir_limit_depth])
cwd_split.insert(0, u'…')
cwd = [i[0:dir_shorten_len] if dir_shorten_len and i else i for i in cwd_split[:-1]] + [cwd_split[-1]]
cwd = os.path.join(*cwd)
return cwd
Use midline ellipsis for dir shortening
|
# -*- coding: utf-8 -*-
import os
import re
import socket
from powerline.lib.vcs import guess
def hostname():
if not os.environ.get('SSH_CLIENT'):
return None
return socket.gethostname()
def user():
user = os.environ.get('USER')
euid = os.geteuid()
return {
'contents': user,
'highlight': 'user' if euid != 0 else ['superuser', 'user'],
}
def branch():
repo = guess(os.path.abspath(os.getcwd()))
if repo:
return repo.branch()
return None
def cwd(dir_shorten_len=None, dir_limit_depth=None):
cwd = os.getcwdu()
home = os.environ.get('HOME')
if home:
cwd = re.sub('^' + re.escape(home), '~', cwd, 1)
cwd_split = cwd.split(os.sep)
cwd_split_len = len(cwd_split)
if cwd_split_len > dir_limit_depth + 1:
del(cwd_split[0:-dir_limit_depth])
cwd_split.insert(0, u'⋯')
cwd = [i[0:dir_shorten_len] if dir_shorten_len and i else i for i in cwd_split[:-1]] + [cwd_split[-1]]
cwd = os.path.join(*cwd)
return cwd
|
<commit_before># -*- coding: utf-8 -*-
import os
import re
import socket
from powerline.lib.vcs import guess
def hostname():
if not os.environ.get('SSH_CLIENT'):
return None
return socket.gethostname()
def user():
user = os.environ.get('USER')
euid = os.geteuid()
return {
'contents': user,
'highlight': 'user' if euid != 0 else ['superuser', 'user'],
}
def branch():
repo = guess(os.path.abspath(os.getcwd()))
if repo:
return repo.branch()
return None
def cwd(dir_shorten_len=None, dir_limit_depth=None):
cwd = os.getcwdu()
home = os.environ.get('HOME')
if home:
cwd = re.sub('^' + re.escape(home), '~', cwd, 1)
cwd_split = cwd.split(os.sep)
cwd_split_len = len(cwd_split)
if cwd_split_len > dir_limit_depth + 1:
del(cwd_split[0:-dir_limit_depth])
cwd_split.insert(0, u'…')
cwd = [i[0:dir_shorten_len] if dir_shorten_len and i else i for i in cwd_split[:-1]] + [cwd_split[-1]]
cwd = os.path.join(*cwd)
return cwd
<commit_msg>Use midline ellipsis for dir shortening<commit_after>
|
# -*- coding: utf-8 -*-
import os
import re
import socket
from powerline.lib.vcs import guess
def hostname():
if not os.environ.get('SSH_CLIENT'):
return None
return socket.gethostname()
def user():
user = os.environ.get('USER')
euid = os.geteuid()
return {
'contents': user,
'highlight': 'user' if euid != 0 else ['superuser', 'user'],
}
def branch():
repo = guess(os.path.abspath(os.getcwd()))
if repo:
return repo.branch()
return None
def cwd(dir_shorten_len=None, dir_limit_depth=None):
cwd = os.getcwdu()
home = os.environ.get('HOME')
if home:
cwd = re.sub('^' + re.escape(home), '~', cwd, 1)
cwd_split = cwd.split(os.sep)
cwd_split_len = len(cwd_split)
if cwd_split_len > dir_limit_depth + 1:
del(cwd_split[0:-dir_limit_depth])
cwd_split.insert(0, u'⋯')
cwd = [i[0:dir_shorten_len] if dir_shorten_len and i else i for i in cwd_split[:-1]] + [cwd_split[-1]]
cwd = os.path.join(*cwd)
return cwd
|
# -*- coding: utf-8 -*-
import os
import re
import socket
from powerline.lib.vcs import guess
def hostname():
if not os.environ.get('SSH_CLIENT'):
return None
return socket.gethostname()
def user():
user = os.environ.get('USER')
euid = os.geteuid()
return {
'contents': user,
'highlight': 'user' if euid != 0 else ['superuser', 'user'],
}
def branch():
repo = guess(os.path.abspath(os.getcwd()))
if repo:
return repo.branch()
return None
def cwd(dir_shorten_len=None, dir_limit_depth=None):
cwd = os.getcwdu()
home = os.environ.get('HOME')
if home:
cwd = re.sub('^' + re.escape(home), '~', cwd, 1)
cwd_split = cwd.split(os.sep)
cwd_split_len = len(cwd_split)
if cwd_split_len > dir_limit_depth + 1:
del(cwd_split[0:-dir_limit_depth])
cwd_split.insert(0, u'…')
cwd = [i[0:dir_shorten_len] if dir_shorten_len and i else i for i in cwd_split[:-1]] + [cwd_split[-1]]
cwd = os.path.join(*cwd)
return cwd
Use midline ellipsis for dir shortening# -*- coding: utf-8 -*-
import os
import re
import socket
from powerline.lib.vcs import guess
def hostname():
if not os.environ.get('SSH_CLIENT'):
return None
return socket.gethostname()
def user():
user = os.environ.get('USER')
euid = os.geteuid()
return {
'contents': user,
'highlight': 'user' if euid != 0 else ['superuser', 'user'],
}
def branch():
repo = guess(os.path.abspath(os.getcwd()))
if repo:
return repo.branch()
return None
def cwd(dir_shorten_len=None, dir_limit_depth=None):
cwd = os.getcwdu()
home = os.environ.get('HOME')
if home:
cwd = re.sub('^' + re.escape(home), '~', cwd, 1)
cwd_split = cwd.split(os.sep)
cwd_split_len = len(cwd_split)
if cwd_split_len > dir_limit_depth + 1:
del(cwd_split[0:-dir_limit_depth])
cwd_split.insert(0, u'⋯')
cwd = [i[0:dir_shorten_len] if dir_shorten_len and i else i for i in cwd_split[:-1]] + [cwd_split[-1]]
cwd = os.path.join(*cwd)
return cwd
|
<commit_before># -*- coding: utf-8 -*-
import os
import re
import socket
from powerline.lib.vcs import guess
def hostname():
if not os.environ.get('SSH_CLIENT'):
return None
return socket.gethostname()
def user():
user = os.environ.get('USER')
euid = os.geteuid()
return {
'contents': user,
'highlight': 'user' if euid != 0 else ['superuser', 'user'],
}
def branch():
repo = guess(os.path.abspath(os.getcwd()))
if repo:
return repo.branch()
return None
def cwd(dir_shorten_len=None, dir_limit_depth=None):
cwd = os.getcwdu()
home = os.environ.get('HOME')
if home:
cwd = re.sub('^' + re.escape(home), '~', cwd, 1)
cwd_split = cwd.split(os.sep)
cwd_split_len = len(cwd_split)
if cwd_split_len > dir_limit_depth + 1:
del(cwd_split[0:-dir_limit_depth])
cwd_split.insert(0, u'…')
cwd = [i[0:dir_shorten_len] if dir_shorten_len and i else i for i in cwd_split[:-1]] + [cwd_split[-1]]
cwd = os.path.join(*cwd)
return cwd
<commit_msg>Use midline ellipsis for dir shortening<commit_after># -*- coding: utf-8 -*-
import os
import re
import socket
from powerline.lib.vcs import guess
def hostname():
if not os.environ.get('SSH_CLIENT'):
return None
return socket.gethostname()
def user():
user = os.environ.get('USER')
euid = os.geteuid()
return {
'contents': user,
'highlight': 'user' if euid != 0 else ['superuser', 'user'],
}
def branch():
repo = guess(os.path.abspath(os.getcwd()))
if repo:
return repo.branch()
return None
def cwd(dir_shorten_len=None, dir_limit_depth=None):
cwd = os.getcwdu()
home = os.environ.get('HOME')
if home:
cwd = re.sub('^' + re.escape(home), '~', cwd, 1)
cwd_split = cwd.split(os.sep)
cwd_split_len = len(cwd_split)
if cwd_split_len > dir_limit_depth + 1:
del(cwd_split[0:-dir_limit_depth])
cwd_split.insert(0, u'⋯')
cwd = [i[0:dir_shorten_len] if dir_shorten_len and i else i for i in cwd_split[:-1]] + [cwd_split[-1]]
cwd = os.path.join(*cwd)
return cwd
|
ebb5a2f56c691456b5b65b9448d11b113c4efa46
|
fedmsg/meta/announce.py
|
fedmsg/meta/announce.py
|
# This file is part of fedmsg.
# Copyright (C) 2012 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
from fedmsg.meta.base import BaseProcessor
class AnnounceProcessor(BaseProcessor):
__name__ = "announce"
__description__ = "Official Fedora Announcements"
__link__ = "http://fedoraproject.org/"
__docs__ = "http://fedoraproject.org/"
__obj__ = "Announcements"
def subtitle(self, msg, **config):
return msg['msg']['message']
def link(self, msg, **config):
return msg['msg']['link']
def usernames(self, msg, **config):
return set([msg['username']])
|
# This file is part of fedmsg.
# Copyright (C) 2012 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
from fedmsg.meta.base import BaseProcessor
class AnnounceProcessor(BaseProcessor):
__name__ = "announce"
__description__ = "Official Fedora Announcements"
__link__ = "http://fedoraproject.org/"
__docs__ = "http://fedoraproject.org/"
__obj__ = "Announcements"
def subtitle(self, msg, **config):
return msg['msg']['message']
def link(self, msg, **config):
return msg['msg']['link']
def usernames(self, msg, **config):
users = set()
if 'username' in msg:
users.update(set([msg['username']]))
return users
|
Handle the situation where in old message the 'username' key does not exists
|
Handle the situation where in old message the 'username' key does not exists
With this commit processing an old message with fedmsg_meta will not break
if that old message does not have the 'username' key.
|
Python
|
lgpl-2.1
|
chaiku/fedmsg,vivekanand1101/fedmsg,vivekanand1101/fedmsg,cicku/fedmsg,mathstuf/fedmsg,mathstuf/fedmsg,maxamillion/fedmsg,mathstuf/fedmsg,chaiku/fedmsg,fedora-infra/fedmsg,fedora-infra/fedmsg,pombredanne/fedmsg,pombredanne/fedmsg,cicku/fedmsg,maxamillion/fedmsg,chaiku/fedmsg,vivekanand1101/fedmsg,pombredanne/fedmsg,cicku/fedmsg,maxamillion/fedmsg,fedora-infra/fedmsg
|
# This file is part of fedmsg.
# Copyright (C) 2012 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
from fedmsg.meta.base import BaseProcessor
class AnnounceProcessor(BaseProcessor):
__name__ = "announce"
__description__ = "Official Fedora Announcements"
__link__ = "http://fedoraproject.org/"
__docs__ = "http://fedoraproject.org/"
__obj__ = "Announcements"
def subtitle(self, msg, **config):
return msg['msg']['message']
def link(self, msg, **config):
return msg['msg']['link']
def usernames(self, msg, **config):
return set([msg['username']])
Handle the situation where in old message the 'username' key does not exists
With this commit processing an old message with fedmsg_meta will not break
if that old message does not have the 'username' key.
|
# This file is part of fedmsg.
# Copyright (C) 2012 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
from fedmsg.meta.base import BaseProcessor
class AnnounceProcessor(BaseProcessor):
__name__ = "announce"
__description__ = "Official Fedora Announcements"
__link__ = "http://fedoraproject.org/"
__docs__ = "http://fedoraproject.org/"
__obj__ = "Announcements"
def subtitle(self, msg, **config):
return msg['msg']['message']
def link(self, msg, **config):
return msg['msg']['link']
def usernames(self, msg, **config):
users = set()
if 'username' in msg:
users.update(set([msg['username']]))
return users
|
<commit_before># This file is part of fedmsg.
# Copyright (C) 2012 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
from fedmsg.meta.base import BaseProcessor
class AnnounceProcessor(BaseProcessor):
__name__ = "announce"
__description__ = "Official Fedora Announcements"
__link__ = "http://fedoraproject.org/"
__docs__ = "http://fedoraproject.org/"
__obj__ = "Announcements"
def subtitle(self, msg, **config):
return msg['msg']['message']
def link(self, msg, **config):
return msg['msg']['link']
def usernames(self, msg, **config):
return set([msg['username']])
<commit_msg>Handle the situation where in old message the 'username' key does not exists
With this commit processing an old message with fedmsg_meta will not break
if that old message does not have the 'username' key.<commit_after>
|
# This file is part of fedmsg.
# Copyright (C) 2012 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
from fedmsg.meta.base import BaseProcessor
class AnnounceProcessor(BaseProcessor):
__name__ = "announce"
__description__ = "Official Fedora Announcements"
__link__ = "http://fedoraproject.org/"
__docs__ = "http://fedoraproject.org/"
__obj__ = "Announcements"
def subtitle(self, msg, **config):
return msg['msg']['message']
def link(self, msg, **config):
return msg['msg']['link']
def usernames(self, msg, **config):
users = set()
if 'username' in msg:
users.update(set([msg['username']]))
return users
|
# This file is part of fedmsg.
# Copyright (C) 2012 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
from fedmsg.meta.base import BaseProcessor
class AnnounceProcessor(BaseProcessor):
__name__ = "announce"
__description__ = "Official Fedora Announcements"
__link__ = "http://fedoraproject.org/"
__docs__ = "http://fedoraproject.org/"
__obj__ = "Announcements"
def subtitle(self, msg, **config):
return msg['msg']['message']
def link(self, msg, **config):
return msg['msg']['link']
def usernames(self, msg, **config):
return set([msg['username']])
Handle the situation where in old message the 'username' key does not exists
With this commit processing an old message with fedmsg_meta will not break
if that old message does not have the 'username' key.# This file is part of fedmsg.
# Copyright (C) 2012 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
from fedmsg.meta.base import BaseProcessor
class AnnounceProcessor(BaseProcessor):
__name__ = "announce"
__description__ = "Official Fedora Announcements"
__link__ = "http://fedoraproject.org/"
__docs__ = "http://fedoraproject.org/"
__obj__ = "Announcements"
def subtitle(self, msg, **config):
return msg['msg']['message']
def link(self, msg, **config):
return msg['msg']['link']
def usernames(self, msg, **config):
users = set()
if 'username' in msg:
users.update(set([msg['username']]))
return users
|
<commit_before># This file is part of fedmsg.
# Copyright (C) 2012 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
from fedmsg.meta.base import BaseProcessor
class AnnounceProcessor(BaseProcessor):
__name__ = "announce"
__description__ = "Official Fedora Announcements"
__link__ = "http://fedoraproject.org/"
__docs__ = "http://fedoraproject.org/"
__obj__ = "Announcements"
def subtitle(self, msg, **config):
return msg['msg']['message']
def link(self, msg, **config):
return msg['msg']['link']
def usernames(self, msg, **config):
return set([msg['username']])
<commit_msg>Handle the situation where in old message the 'username' key does not exists
With this commit processing an old message with fedmsg_meta will not break
if that old message does not have the 'username' key.<commit_after># This file is part of fedmsg.
# Copyright (C) 2012 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
from fedmsg.meta.base import BaseProcessor
class AnnounceProcessor(BaseProcessor):
__name__ = "announce"
__description__ = "Official Fedora Announcements"
__link__ = "http://fedoraproject.org/"
__docs__ = "http://fedoraproject.org/"
__obj__ = "Announcements"
def subtitle(self, msg, **config):
return msg['msg']['message']
def link(self, msg, **config):
return msg['msg']['link']
def usernames(self, msg, **config):
users = set()
if 'username' in msg:
users.update(set([msg['username']]))
return users
|
23aac06f1b06ee3839023152f1f6fd420be1c13a
|
VehicleDetectionTracking/template_match.py
|
VehicleDetectionTracking/template_match.py
|
# Code given by Udacity, complete by Andres Guijarro
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = mpimg.imread('bbox-example-image.jpg')
#image = mpimg.imread('temp-matching-example-2.jpg')
templist = ['cutout1.jpg']
#templist = ['cutout1.jpg', 'cutout2.jpg', 'cutout3.jpg',
# 'cutout4.jpg', 'cutout5.jpg', 'cutout6.jpg']
# Here is your draw_boxes function from the previous exercise
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
# Define a function that takes an image and a list of templates as inputs
# then searches the image and returns the a list of bounding boxes
# for matched templates
def find_matches(img, template_list):
# Make a copy of the image to draw on
# Define an empty list to take bbox coords
bbox_list = []
# Iterate through template list
# Read in templates one by one
# Use cv2.matchTemplate() to search the image
# using whichever of the OpenCV search methods you prefer
# Use cv2.minMaxLoc() to extract the location of the best match
# Determine bounding box corners for the match
# Return the list of bounding boxes
method = cv2.TM_CCOEFF_NORMED
for temp in templist:
tmp = mpimg.imread(temp)
# Apply template Matching
res = cv2.matchTemplate(img,tmp,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
w, h = (tmp.shape[1], tmp.shape[0])
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
bbox_list.append((top_left, bottom_right))
return bbox_list
def main():
bboxes = find_matches(image, templist)
result = draw_boxes(image, bboxes)
plt.imshow(result)
plt.show()
if __name__ == '__main__':
main()
|
Add scripts which Define a function that takes an image and a list of templates as inputs then searches the image and returns the a list of bounding boxes for matched templates
|
feat: Add scripts which Define a function that takes an image and a list of templates as inputs then searches the image and returns the a list of bounding boxes for matched templates
|
Python
|
mit
|
aguijarro/SelfDrivingCar
|
feat: Add scripts which Define a function that takes an image and a list of templates as inputs then searches the image and returns the a list of bounding boxes for matched templates
|
# Code given by Udacity, complete by Andres Guijarro
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = mpimg.imread('bbox-example-image.jpg')
#image = mpimg.imread('temp-matching-example-2.jpg')
templist = ['cutout1.jpg']
#templist = ['cutout1.jpg', 'cutout2.jpg', 'cutout3.jpg',
# 'cutout4.jpg', 'cutout5.jpg', 'cutout6.jpg']
# Here is your draw_boxes function from the previous exercise
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
# Define a function that takes an image and a list of templates as inputs
# then searches the image and returns the a list of bounding boxes
# for matched templates
def find_matches(img, template_list):
# Make a copy of the image to draw on
# Define an empty list to take bbox coords
bbox_list = []
# Iterate through template list
# Read in templates one by one
# Use cv2.matchTemplate() to search the image
# using whichever of the OpenCV search methods you prefer
# Use cv2.minMaxLoc() to extract the location of the best match
# Determine bounding box corners for the match
# Return the list of bounding boxes
method = cv2.TM_CCOEFF_NORMED
for temp in templist:
tmp = mpimg.imread(temp)
# Apply template Matching
res = cv2.matchTemplate(img,tmp,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
w, h = (tmp.shape[1], tmp.shape[0])
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
bbox_list.append((top_left, bottom_right))
return bbox_list
def main():
bboxes = find_matches(image, templist)
result = draw_boxes(image, bboxes)
plt.imshow(result)
plt.show()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>feat: Add scripts which Define a function that takes an image and a list of templates as inputs then searches the image and returns the a list of bounding boxes for matched templates<commit_after>
|
# Code given by Udacity, complete by Andres Guijarro
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = mpimg.imread('bbox-example-image.jpg')
#image = mpimg.imread('temp-matching-example-2.jpg')
templist = ['cutout1.jpg']
#templist = ['cutout1.jpg', 'cutout2.jpg', 'cutout3.jpg',
# 'cutout4.jpg', 'cutout5.jpg', 'cutout6.jpg']
# Here is your draw_boxes function from the previous exercise
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
# Define a function that takes an image and a list of templates as inputs
# then searches the image and returns the a list of bounding boxes
# for matched templates
def find_matches(img, template_list):
# Make a copy of the image to draw on
# Define an empty list to take bbox coords
bbox_list = []
# Iterate through template list
# Read in templates one by one
# Use cv2.matchTemplate() to search the image
# using whichever of the OpenCV search methods you prefer
# Use cv2.minMaxLoc() to extract the location of the best match
# Determine bounding box corners for the match
# Return the list of bounding boxes
method = cv2.TM_CCOEFF_NORMED
for temp in templist:
tmp = mpimg.imread(temp)
# Apply template Matching
res = cv2.matchTemplate(img,tmp,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
w, h = (tmp.shape[1], tmp.shape[0])
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
bbox_list.append((top_left, bottom_right))
return bbox_list
def main():
bboxes = find_matches(image, templist)
result = draw_boxes(image, bboxes)
plt.imshow(result)
plt.show()
if __name__ == '__main__':
main()
|
feat: Add scripts which Define a function that takes an image and a list of templates as inputs then searches the image and returns the a list of bounding boxes for matched templates# Code given by Udacity, complete by Andres Guijarro
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = mpimg.imread('bbox-example-image.jpg')
#image = mpimg.imread('temp-matching-example-2.jpg')
templist = ['cutout1.jpg']
#templist = ['cutout1.jpg', 'cutout2.jpg', 'cutout3.jpg',
# 'cutout4.jpg', 'cutout5.jpg', 'cutout6.jpg']
# Here is your draw_boxes function from the previous exercise
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
# Define a function that takes an image and a list of templates as inputs
# then searches the image and returns the a list of bounding boxes
# for matched templates
def find_matches(img, template_list):
# Make a copy of the image to draw on
# Define an empty list to take bbox coords
bbox_list = []
# Iterate through template list
# Read in templates one by one
# Use cv2.matchTemplate() to search the image
# using whichever of the OpenCV search methods you prefer
# Use cv2.minMaxLoc() to extract the location of the best match
# Determine bounding box corners for the match
# Return the list of bounding boxes
method = cv2.TM_CCOEFF_NORMED
for temp in templist:
tmp = mpimg.imread(temp)
# Apply template Matching
res = cv2.matchTemplate(img,tmp,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
w, h = (tmp.shape[1], tmp.shape[0])
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
bbox_list.append((top_left, bottom_right))
return bbox_list
def main():
bboxes = find_matches(image, templist)
result = draw_boxes(image, bboxes)
plt.imshow(result)
plt.show()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>feat: Add scripts which Define a function that takes an image and a list of templates as inputs then searches the image and returns the a list of bounding boxes for matched templates<commit_after># Code given by Udacity, complete by Andres Guijarro
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = mpimg.imread('bbox-example-image.jpg')
#image = mpimg.imread('temp-matching-example-2.jpg')
templist = ['cutout1.jpg']
#templist = ['cutout1.jpg', 'cutout2.jpg', 'cutout3.jpg',
# 'cutout4.jpg', 'cutout5.jpg', 'cutout6.jpg']
# Here is your draw_boxes function from the previous exercise
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
# Define a function that takes an image and a list of templates as inputs
# then searches the image and returns the a list of bounding boxes
# for matched templates
def find_matches(img, template_list):
# Make a copy of the image to draw on
# Define an empty list to take bbox coords
bbox_list = []
# Iterate through template list
# Read in templates one by one
# Use cv2.matchTemplate() to search the image
# using whichever of the OpenCV search methods you prefer
# Use cv2.minMaxLoc() to extract the location of the best match
# Determine bounding box corners for the match
# Return the list of bounding boxes
method = cv2.TM_CCOEFF_NORMED
for temp in templist:
tmp = mpimg.imread(temp)
# Apply template Matching
res = cv2.matchTemplate(img,tmp,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
w, h = (tmp.shape[1], tmp.shape[0])
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
bbox_list.append((top_left, bottom_right))
return bbox_list
def main():
bboxes = find_matches(image, templist)
result = draw_boxes(image, bboxes)
plt.imshow(result)
plt.show()
if __name__ == '__main__':
main()
|
|
10b407b632b2aad1c8fc85e8f242ee4139b5b2a1
|
test/integration/generate_partitions.py
|
test/integration/generate_partitions.py
|
import sys
import random
if len(sys.argv) != 3:
print >> sys.stderr, "USAGE: python generate_partitions.py nodes partitions_per_node"
sys.exit()
FORMAT_WIDTH = 10
nodes = int(sys.argv[1])
partitions = int(sys.argv[2])
ids = range(nodes * partitions)
# use known seed so this is repeatable
random.seed(92873498274)
random.shuffle(ids)
for i in xrange(nodes):
print
print 'node', i
print '<partitions>'
print ' ',
node_ids = sorted(ids[i*partitions:(i+1)*partitions])
for j in xrange(len(node_ids)):
print str(node_ids[j]) + ',',
if j % FORMAT_WIDTH == FORMAT_WIDTH - 1:
print
print ' ',
print '</partitions>'
|
Add script to generate partition ids.
|
Add script to generate partition ids.
|
Python
|
apache-2.0
|
nassim-git/project-voldemort,nassim-git/project-voldemort,nassim-git/project-voldemort,nassim-git/project-voldemort
|
Add script to generate partition ids.
|
import sys
import random
if len(sys.argv) != 3:
print >> sys.stderr, "USAGE: python generate_partitions.py nodes partitions_per_node"
sys.exit()
FORMAT_WIDTH = 10
nodes = int(sys.argv[1])
partitions = int(sys.argv[2])
ids = range(nodes * partitions)
# use known seed so this is repeatable
random.seed(92873498274)
random.shuffle(ids)
for i in xrange(nodes):
print
print 'node', i
print '<partitions>'
print ' ',
node_ids = sorted(ids[i*partitions:(i+1)*partitions])
for j in xrange(len(node_ids)):
print str(node_ids[j]) + ',',
if j % FORMAT_WIDTH == FORMAT_WIDTH - 1:
print
print ' ',
print '</partitions>'
|
<commit_before><commit_msg>Add script to generate partition ids.<commit_after>
|
import sys
import random
if len(sys.argv) != 3:
print >> sys.stderr, "USAGE: python generate_partitions.py nodes partitions_per_node"
sys.exit()
FORMAT_WIDTH = 10
nodes = int(sys.argv[1])
partitions = int(sys.argv[2])
ids = range(nodes * partitions)
# use known seed so this is repeatable
random.seed(92873498274)
random.shuffle(ids)
for i in xrange(nodes):
print
print 'node', i
print '<partitions>'
print ' ',
node_ids = sorted(ids[i*partitions:(i+1)*partitions])
for j in xrange(len(node_ids)):
print str(node_ids[j]) + ',',
if j % FORMAT_WIDTH == FORMAT_WIDTH - 1:
print
print ' ',
print '</partitions>'
|
Add script to generate partition ids.import sys
import random
if len(sys.argv) != 3:
print >> sys.stderr, "USAGE: python generate_partitions.py nodes partitions_per_node"
sys.exit()
FORMAT_WIDTH = 10
nodes = int(sys.argv[1])
partitions = int(sys.argv[2])
ids = range(nodes * partitions)
# use known seed so this is repeatable
random.seed(92873498274)
random.shuffle(ids)
for i in xrange(nodes):
print
print 'node', i
print '<partitions>'
print ' ',
node_ids = sorted(ids[i*partitions:(i+1)*partitions])
for j in xrange(len(node_ids)):
print str(node_ids[j]) + ',',
if j % FORMAT_WIDTH == FORMAT_WIDTH - 1:
print
print ' ',
print '</partitions>'
|
<commit_before><commit_msg>Add script to generate partition ids.<commit_after>import sys
import random
if len(sys.argv) != 3:
print >> sys.stderr, "USAGE: python generate_partitions.py nodes partitions_per_node"
sys.exit()
FORMAT_WIDTH = 10
nodes = int(sys.argv[1])
partitions = int(sys.argv[2])
ids = range(nodes * partitions)
# use known seed so this is repeatable
random.seed(92873498274)
random.shuffle(ids)
for i in xrange(nodes):
print
print 'node', i
print '<partitions>'
print ' ',
node_ids = sorted(ids[i*partitions:(i+1)*partitions])
for j in xrange(len(node_ids)):
print str(node_ids[j]) + ',',
if j % FORMAT_WIDTH == FORMAT_WIDTH - 1:
print
print ' ',
print '</partitions>'
|
|
5578122c42c328d41ac258b4b411eb67125ad2f0
|
benchexec/tools/ulcseq.py
|
benchexec/tools/ulcseq.py
|
#!/usr/bin/env python
"""
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool):
"""
Tool wrapper for UL-CSeq (http://users.ecs.soton.ac.uk/gp4/cseq/cseq.html).
"""
def executable(self):
return util.find_executable('ul-cseq.py')
def name(self):
return 'UL-CSeq'
def version(self, executable):
return self._version_from_tool(executable)
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):
"""
Compose the command line to execute from the name of the executable,
the user-specified options, and the inputfile to analyze.
This method can get overridden, if, for example, some options should
be enabled or if the order of arguments must be changed.
All paths passed to this method (executable, tasks, and propertyfile)
are either absolute or have been made relative to the designated working directory.
@param executable: the path to the executable of the tool (typically the result of executable())
@param options: a list of options, in the same order as given in the XML-file.
@param tasks: a list of tasks, that should be analysed with the tool in one run.
In most cases we we have only _one_ inputfile.
@param propertyfile: contains a specification for the verifier.
@param rlimits: This dictionary contains resource-limits for a run,
for example: time-limit, soft-time-limit, hard-time-limit, memory-limit, cpu-core-limit.
All entries in rlimits are optional, so check for existence before usage!
"""
assert len(tasks) == 1, "only one inputfile supported"
inputfile = ["--input", tasks[0]]
spec = ["--spec", propertyfile] if propertyfile is not None else []
return [executable] + options + spec + inputfile
def determine_result(self, returncode, returnsignal, output, isTimeout):
output = '\n'.join(output)
status = result.RESULT_UNKNOWN
if "FALSE" in output:
status = result.RESULT_FALSE_REACH
elif "TRUE" in output:
status = result.RESULT_TRUE_PROP
else:
status = result.RESULT_UNKNOWN
return status
|
Add wrapper script for UL-CSeq tool
|
Add wrapper script for UL-CSeq tool
|
Python
|
apache-2.0
|
ultimate-pa/benchexec,martin-neuhaeusser/benchexec,ultimate-pa/benchexec,martin-neuhaeusser/benchexec,martin-neuhaeusser/benchexec,IljaZakharov/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,martin-neuhaeusser/benchexec,IljaZakharov/benchexec,dbeyer/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,IljaZakharov/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,dbeyer/benchexec,dbeyer/benchexec,sosy-lab/benchexec,IljaZakharov/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,dbeyer/benchexec
|
Add wrapper script for UL-CSeq tool
|
#!/usr/bin/env python
"""
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool):
"""
Tool wrapper for UL-CSeq (http://users.ecs.soton.ac.uk/gp4/cseq/cseq.html).
"""
def executable(self):
return util.find_executable('ul-cseq.py')
def name(self):
return 'UL-CSeq'
def version(self, executable):
return self._version_from_tool(executable)
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):
"""
Compose the command line to execute from the name of the executable,
the user-specified options, and the inputfile to analyze.
This method can get overridden, if, for example, some options should
be enabled or if the order of arguments must be changed.
All paths passed to this method (executable, tasks, and propertyfile)
are either absolute or have been made relative to the designated working directory.
@param executable: the path to the executable of the tool (typically the result of executable())
@param options: a list of options, in the same order as given in the XML-file.
@param tasks: a list of tasks, that should be analysed with the tool in one run.
In most cases we we have only _one_ inputfile.
@param propertyfile: contains a specification for the verifier.
@param rlimits: This dictionary contains resource-limits for a run,
for example: time-limit, soft-time-limit, hard-time-limit, memory-limit, cpu-core-limit.
All entries in rlimits are optional, so check for existence before usage!
"""
assert len(tasks) == 1, "only one inputfile supported"
inputfile = ["--input", tasks[0]]
spec = ["--spec", propertyfile] if propertyfile is not None else []
return [executable] + options + spec + inputfile
def determine_result(self, returncode, returnsignal, output, isTimeout):
output = '\n'.join(output)
status = result.RESULT_UNKNOWN
if "FALSE" in output:
status = result.RESULT_FALSE_REACH
elif "TRUE" in output:
status = result.RESULT_TRUE_PROP
else:
status = result.RESULT_UNKNOWN
return status
|
<commit_before><commit_msg>Add wrapper script for UL-CSeq tool<commit_after>
|
#!/usr/bin/env python
"""
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool):
"""
Tool wrapper for UL-CSeq (http://users.ecs.soton.ac.uk/gp4/cseq/cseq.html).
"""
def executable(self):
return util.find_executable('ul-cseq.py')
def name(self):
return 'UL-CSeq'
def version(self, executable):
return self._version_from_tool(executable)
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):
"""
Compose the command line to execute from the name of the executable,
the user-specified options, and the inputfile to analyze.
This method can get overridden, if, for example, some options should
be enabled or if the order of arguments must be changed.
All paths passed to this method (executable, tasks, and propertyfile)
are either absolute or have been made relative to the designated working directory.
@param executable: the path to the executable of the tool (typically the result of executable())
@param options: a list of options, in the same order as given in the XML-file.
@param tasks: a list of tasks, that should be analysed with the tool in one run.
In most cases we we have only _one_ inputfile.
@param propertyfile: contains a specification for the verifier.
@param rlimits: This dictionary contains resource-limits for a run,
for example: time-limit, soft-time-limit, hard-time-limit, memory-limit, cpu-core-limit.
All entries in rlimits are optional, so check for existence before usage!
"""
assert len(tasks) == 1, "only one inputfile supported"
inputfile = ["--input", tasks[0]]
spec = ["--spec", propertyfile] if propertyfile is not None else []
return [executable] + options + spec + inputfile
def determine_result(self, returncode, returnsignal, output, isTimeout):
output = '\n'.join(output)
status = result.RESULT_UNKNOWN
if "FALSE" in output:
status = result.RESULT_FALSE_REACH
elif "TRUE" in output:
status = result.RESULT_TRUE_PROP
else:
status = result.RESULT_UNKNOWN
return status
|
Add wrapper script for UL-CSeq tool#!/usr/bin/env python
"""
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool):
"""
Tool wrapper for UL-CSeq (http://users.ecs.soton.ac.uk/gp4/cseq/cseq.html).
"""
def executable(self):
return util.find_executable('ul-cseq.py')
def name(self):
return 'UL-CSeq'
def version(self, executable):
return self._version_from_tool(executable)
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):
"""
Compose the command line to execute from the name of the executable,
the user-specified options, and the inputfile to analyze.
This method can get overridden, if, for example, some options should
be enabled or if the order of arguments must be changed.
All paths passed to this method (executable, tasks, and propertyfile)
are either absolute or have been made relative to the designated working directory.
@param executable: the path to the executable of the tool (typically the result of executable())
@param options: a list of options, in the same order as given in the XML-file.
@param tasks: a list of tasks, that should be analysed with the tool in one run.
In most cases we we have only _one_ inputfile.
@param propertyfile: contains a specification for the verifier.
@param rlimits: This dictionary contains resource-limits for a run,
for example: time-limit, soft-time-limit, hard-time-limit, memory-limit, cpu-core-limit.
All entries in rlimits are optional, so check for existence before usage!
"""
assert len(tasks) == 1, "only one inputfile supported"
inputfile = ["--input", tasks[0]]
spec = ["--spec", propertyfile] if propertyfile is not None else []
return [executable] + options + spec + inputfile
def determine_result(self, returncode, returnsignal, output, isTimeout):
output = '\n'.join(output)
status = result.RESULT_UNKNOWN
if "FALSE" in output:
status = result.RESULT_FALSE_REACH
elif "TRUE" in output:
status = result.RESULT_TRUE_PROP
else:
status = result.RESULT_UNKNOWN
return status
|
<commit_before><commit_msg>Add wrapper script for UL-CSeq tool<commit_after>#!/usr/bin/env python
"""
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool):
"""
Tool wrapper for UL-CSeq (http://users.ecs.soton.ac.uk/gp4/cseq/cseq.html).
"""
def executable(self):
return util.find_executable('ul-cseq.py')
def name(self):
return 'UL-CSeq'
def version(self, executable):
return self._version_from_tool(executable)
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):
"""
Compose the command line to execute from the name of the executable,
the user-specified options, and the inputfile to analyze.
This method can get overridden, if, for example, some options should
be enabled or if the order of arguments must be changed.
All paths passed to this method (executable, tasks, and propertyfile)
are either absolute or have been made relative to the designated working directory.
@param executable: the path to the executable of the tool (typically the result of executable())
@param options: a list of options, in the same order as given in the XML-file.
@param tasks: a list of tasks, that should be analysed with the tool in one run.
In most cases we we have only _one_ inputfile.
@param propertyfile: contains a specification for the verifier.
@param rlimits: This dictionary contains resource-limits for a run,
for example: time-limit, soft-time-limit, hard-time-limit, memory-limit, cpu-core-limit.
All entries in rlimits are optional, so check for existence before usage!
"""
assert len(tasks) == 1, "only one inputfile supported"
inputfile = ["--input", tasks[0]]
spec = ["--spec", propertyfile] if propertyfile is not None else []
return [executable] + options + spec + inputfile
def determine_result(self, returncode, returnsignal, output, isTimeout):
output = '\n'.join(output)
status = result.RESULT_UNKNOWN
if "FALSE" in output:
status = result.RESULT_FALSE_REACH
elif "TRUE" in output:
status = result.RESULT_TRUE_PROP
else:
status = result.RESULT_UNKNOWN
return status
|
|
f37798800c82a483faab875f7c1081bb8ffab84a
|
corehq/apps/hqcase/management/commands/delete_cases.py
|
corehq/apps/hqcase/management/commands/delete_cases.py
|
from optparse import make_option
from django.core.management.base import NoArgsCommand, BaseCommand
from couchdbkit import ResourceNotFound
from casexml.apps.case.models import CommCareCase
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.couch.database import iter_bulk_delete
from corehq.apps.users.models import CouchUser
class Command(BaseCommand):
help = "Hard delete all cases owned by a given user. (ID or username)"
args = '<user>'
option_list = NoArgsCommand.option_list + (
make_option('--no-prompt',
action='store_true',
dest='no_prompt',
help='Delete cases without prompting for confirmation'),
make_option('--by-last-submitter',
action='store_true',
dest='last_submitter',
help='delete cases last updated by the specified user'),
)
@property
@memoized
def db(self):
return CommCareCase.get_db()
def case_query(self, reduce=False):
if self.last_submitter:
view_name = 'case/by_user'
else:
view_name = 'case/by_owner'
return self.db.view(
view_name,
startkey=[self.user.user_id],
endkey=[self.user.user_id, {}],
reduce=reduce,
)
def get_case_count(self):
res = self.case_query(reduce=True).one()
return res['value'] if res else 0
def delete_all(self):
case_ids = [r["id"] for r in self.case_query(reduce=False)]
iter_bulk_delete(self.db, case_ids)
def handle(self, *args, **options):
self.last_submitter = options.get('last_submitter', False)
if not len(args):
print "Usage: ./manage.py delete_cases <user>"
return
try:
self.user = CouchUser.get_by_username(args[0])
if not self.user:
self.user = CouchUser.get(args[0])
except ResourceNotFound:
print "Could not find user {}".format(args[0])
return
if not options.get('no_prompt'):
msg = "Delete all {} cases {} by {}? (y/n)\n".format(
self.get_case_count(),
"submitted" if self.last_submitter else "owned",
self.user.username,
)
if not raw_input(msg) == 'y':
print "cancelling"
return
self.delete_all()
print "Cases successfully deleted, you monster!"
|
Add mngmnt command to delete all cases for a user
|
Add mngmnt command to delete all cases for a user
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq
|
Add mngmnt command to delete all cases for a user
|
from optparse import make_option
from django.core.management.base import NoArgsCommand, BaseCommand
from couchdbkit import ResourceNotFound
from casexml.apps.case.models import CommCareCase
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.couch.database import iter_bulk_delete
from corehq.apps.users.models import CouchUser
class Command(BaseCommand):
help = "Hard delete all cases owned by a given user. (ID or username)"
args = '<user>'
option_list = NoArgsCommand.option_list + (
make_option('--no-prompt',
action='store_true',
dest='no_prompt',
help='Delete cases without prompting for confirmation'),
make_option('--by-last-submitter',
action='store_true',
dest='last_submitter',
help='delete cases last updated by the specified user'),
)
@property
@memoized
def db(self):
return CommCareCase.get_db()
def case_query(self, reduce=False):
if self.last_submitter:
view_name = 'case/by_user'
else:
view_name = 'case/by_owner'
return self.db.view(
view_name,
startkey=[self.user.user_id],
endkey=[self.user.user_id, {}],
reduce=reduce,
)
def get_case_count(self):
res = self.case_query(reduce=True).one()
return res['value'] if res else 0
def delete_all(self):
case_ids = [r["id"] for r in self.case_query(reduce=False)]
iter_bulk_delete(self.db, case_ids)
def handle(self, *args, **options):
self.last_submitter = options.get('last_submitter', False)
if not len(args):
print "Usage: ./manage.py delete_cases <user>"
return
try:
self.user = CouchUser.get_by_username(args[0])
if not self.user:
self.user = CouchUser.get(args[0])
except ResourceNotFound:
print "Could not find user {}".format(args[0])
return
if not options.get('no_prompt'):
msg = "Delete all {} cases {} by {}? (y/n)\n".format(
self.get_case_count(),
"submitted" if self.last_submitter else "owned",
self.user.username,
)
if not raw_input(msg) == 'y':
print "cancelling"
return
self.delete_all()
print "Cases successfully deleted, you monster!"
|
<commit_before><commit_msg>Add mngmnt command to delete all cases for a user<commit_after>
|
from optparse import make_option
from django.core.management.base import NoArgsCommand, BaseCommand
from couchdbkit import ResourceNotFound
from casexml.apps.case.models import CommCareCase
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.couch.database import iter_bulk_delete
from corehq.apps.users.models import CouchUser
class Command(BaseCommand):
help = "Hard delete all cases owned by a given user. (ID or username)"
args = '<user>'
option_list = NoArgsCommand.option_list + (
make_option('--no-prompt',
action='store_true',
dest='no_prompt',
help='Delete cases without prompting for confirmation'),
make_option('--by-last-submitter',
action='store_true',
dest='last_submitter',
help='delete cases last updated by the specified user'),
)
@property
@memoized
def db(self):
return CommCareCase.get_db()
def case_query(self, reduce=False):
if self.last_submitter:
view_name = 'case/by_user'
else:
view_name = 'case/by_owner'
return self.db.view(
view_name,
startkey=[self.user.user_id],
endkey=[self.user.user_id, {}],
reduce=reduce,
)
def get_case_count(self):
res = self.case_query(reduce=True).one()
return res['value'] if res else 0
def delete_all(self):
case_ids = [r["id"] for r in self.case_query(reduce=False)]
iter_bulk_delete(self.db, case_ids)
def handle(self, *args, **options):
self.last_submitter = options.get('last_submitter', False)
if not len(args):
print "Usage: ./manage.py delete_cases <user>"
return
try:
self.user = CouchUser.get_by_username(args[0])
if not self.user:
self.user = CouchUser.get(args[0])
except ResourceNotFound:
print "Could not find user {}".format(args[0])
return
if not options.get('no_prompt'):
msg = "Delete all {} cases {} by {}? (y/n)\n".format(
self.get_case_count(),
"submitted" if self.last_submitter else "owned",
self.user.username,
)
if not raw_input(msg) == 'y':
print "cancelling"
return
self.delete_all()
print "Cases successfully deleted, you monster!"
|
Add mngmnt command to delete all cases for a userfrom optparse import make_option
from django.core.management.base import NoArgsCommand, BaseCommand
from couchdbkit import ResourceNotFound
from casexml.apps.case.models import CommCareCase
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.couch.database import iter_bulk_delete
from corehq.apps.users.models import CouchUser
class Command(BaseCommand):
help = "Hard delete all cases owned by a given user. (ID or username)"
args = '<user>'
option_list = NoArgsCommand.option_list + (
make_option('--no-prompt',
action='store_true',
dest='no_prompt',
help='Delete cases without prompting for confirmation'),
make_option('--by-last-submitter',
action='store_true',
dest='last_submitter',
help='delete cases last updated by the specified user'),
)
@property
@memoized
def db(self):
return CommCareCase.get_db()
def case_query(self, reduce=False):
if self.last_submitter:
view_name = 'case/by_user'
else:
view_name = 'case/by_owner'
return self.db.view(
view_name,
startkey=[self.user.user_id],
endkey=[self.user.user_id, {}],
reduce=reduce,
)
def get_case_count(self):
res = self.case_query(reduce=True).one()
return res['value'] if res else 0
def delete_all(self):
case_ids = [r["id"] for r in self.case_query(reduce=False)]
iter_bulk_delete(self.db, case_ids)
def handle(self, *args, **options):
self.last_submitter = options.get('last_submitter', False)
if not len(args):
print "Usage: ./manage.py delete_cases <user>"
return
try:
self.user = CouchUser.get_by_username(args[0])
if not self.user:
self.user = CouchUser.get(args[0])
except ResourceNotFound:
print "Could not find user {}".format(args[0])
return
if not options.get('no_prompt'):
msg = "Delete all {} cases {} by {}? (y/n)\n".format(
self.get_case_count(),
"submitted" if self.last_submitter else "owned",
self.user.username,
)
if not raw_input(msg) == 'y':
print "cancelling"
return
self.delete_all()
print "Cases successfully deleted, you monster!"
|
<commit_before><commit_msg>Add mngmnt command to delete all cases for a user<commit_after>from optparse import make_option
from django.core.management.base import NoArgsCommand, BaseCommand
from couchdbkit import ResourceNotFound
from casexml.apps.case.models import CommCareCase
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.couch.database import iter_bulk_delete
from corehq.apps.users.models import CouchUser
class Command(BaseCommand):
help = "Hard delete all cases owned by a given user. (ID or username)"
args = '<user>'
option_list = NoArgsCommand.option_list + (
make_option('--no-prompt',
action='store_true',
dest='no_prompt',
help='Delete cases without prompting for confirmation'),
make_option('--by-last-submitter',
action='store_true',
dest='last_submitter',
help='delete cases last updated by the specified user'),
)
@property
@memoized
def db(self):
return CommCareCase.get_db()
def case_query(self, reduce=False):
if self.last_submitter:
view_name = 'case/by_user'
else:
view_name = 'case/by_owner'
return self.db.view(
view_name,
startkey=[self.user.user_id],
endkey=[self.user.user_id, {}],
reduce=reduce,
)
def get_case_count(self):
res = self.case_query(reduce=True).one()
return res['value'] if res else 0
def delete_all(self):
case_ids = [r["id"] for r in self.case_query(reduce=False)]
iter_bulk_delete(self.db, case_ids)
def handle(self, *args, **options):
self.last_submitter = options.get('last_submitter', False)
if not len(args):
print "Usage: ./manage.py delete_cases <user>"
return
try:
self.user = CouchUser.get_by_username(args[0])
if not self.user:
self.user = CouchUser.get(args[0])
except ResourceNotFound:
print "Could not find user {}".format(args[0])
return
if not options.get('no_prompt'):
msg = "Delete all {} cases {} by {}? (y/n)\n".format(
self.get_case_count(),
"submitted" if self.last_submitter else "owned",
self.user.username,
)
if not raw_input(msg) == 'y':
print "cancelling"
return
self.delete_all()
print "Cases successfully deleted, you monster!"
|
|
9d981457a8d6d1bf785eea65b1217d0d521ece72
|
ir/importer.py
|
ir/importer.py
|
from urllib.request import urlopen
from anki.notes import Note
from aqt import mw
from bs4 import BeautifulSoup
from ir.util import getInput, setField
class Importer:
def importWebpage(self):
model = mw.col.models.byName(self.settings['modelName'])
newNote = Note(mw.col, model)
url = getInput('Import Webpage', 'URL')
title = getInput('Import Webpage', 'Title')
html = urlopen(url).read().decode('utf-8')
soup = BeautifulSoup(html, 'html.parser')
for iframe in soup.find_all('iframe'):
iframe.decompose()
setField(newNote, self.settings['titleField'], title)
setField(newNote, self.settings['textField'], str(soup))
setField(newNote, self.settings['sourceField'], url)
did = mw.col.decks.byName(self.settings['importDeck'])['id']
newNote.model()['did'] = did
mw.col.addNote(newNote)
mw.deckBrowser.refresh()
|
Move importing code into dedicated class
|
Move importing code into dedicated class
|
Python
|
isc
|
luoliyan/incremental-reading-for-anki,luoliyan/incremental-reading-for-anki
|
Move importing code into dedicated class
|
from urllib.request import urlopen
from anki.notes import Note
from aqt import mw
from bs4 import BeautifulSoup
from ir.util import getInput, setField
class Importer:
def importWebpage(self):
model = mw.col.models.byName(self.settings['modelName'])
newNote = Note(mw.col, model)
url = getInput('Import Webpage', 'URL')
title = getInput('Import Webpage', 'Title')
html = urlopen(url).read().decode('utf-8')
soup = BeautifulSoup(html, 'html.parser')
for iframe in soup.find_all('iframe'):
iframe.decompose()
setField(newNote, self.settings['titleField'], title)
setField(newNote, self.settings['textField'], str(soup))
setField(newNote, self.settings['sourceField'], url)
did = mw.col.decks.byName(self.settings['importDeck'])['id']
newNote.model()['did'] = did
mw.col.addNote(newNote)
mw.deckBrowser.refresh()
|
<commit_before><commit_msg>Move importing code into dedicated class<commit_after>
|
from urllib.request import urlopen
from anki.notes import Note
from aqt import mw
from bs4 import BeautifulSoup
from ir.util import getInput, setField
class Importer:
def importWebpage(self):
model = mw.col.models.byName(self.settings['modelName'])
newNote = Note(mw.col, model)
url = getInput('Import Webpage', 'URL')
title = getInput('Import Webpage', 'Title')
html = urlopen(url).read().decode('utf-8')
soup = BeautifulSoup(html, 'html.parser')
for iframe in soup.find_all('iframe'):
iframe.decompose()
setField(newNote, self.settings['titleField'], title)
setField(newNote, self.settings['textField'], str(soup))
setField(newNote, self.settings['sourceField'], url)
did = mw.col.decks.byName(self.settings['importDeck'])['id']
newNote.model()['did'] = did
mw.col.addNote(newNote)
mw.deckBrowser.refresh()
|
Move importing code into dedicated classfrom urllib.request import urlopen
from anki.notes import Note
from aqt import mw
from bs4 import BeautifulSoup
from ir.util import getInput, setField
class Importer:
def importWebpage(self):
model = mw.col.models.byName(self.settings['modelName'])
newNote = Note(mw.col, model)
url = getInput('Import Webpage', 'URL')
title = getInput('Import Webpage', 'Title')
html = urlopen(url).read().decode('utf-8')
soup = BeautifulSoup(html, 'html.parser')
for iframe in soup.find_all('iframe'):
iframe.decompose()
setField(newNote, self.settings['titleField'], title)
setField(newNote, self.settings['textField'], str(soup))
setField(newNote, self.settings['sourceField'], url)
did = mw.col.decks.byName(self.settings['importDeck'])['id']
newNote.model()['did'] = did
mw.col.addNote(newNote)
mw.deckBrowser.refresh()
|
<commit_before><commit_msg>Move importing code into dedicated class<commit_after>from urllib.request import urlopen
from anki.notes import Note
from aqt import mw
from bs4 import BeautifulSoup
from ir.util import getInput, setField
class Importer:
def importWebpage(self):
model = mw.col.models.byName(self.settings['modelName'])
newNote = Note(mw.col, model)
url = getInput('Import Webpage', 'URL')
title = getInput('Import Webpage', 'Title')
html = urlopen(url).read().decode('utf-8')
soup = BeautifulSoup(html, 'html.parser')
for iframe in soup.find_all('iframe'):
iframe.decompose()
setField(newNote, self.settings['titleField'], title)
setField(newNote, self.settings['textField'], str(soup))
setField(newNote, self.settings['sourceField'], url)
did = mw.col.decks.byName(self.settings['importDeck'])['id']
newNote.model()['did'] = did
mw.col.addNote(newNote)
mw.deckBrowser.refresh()
|
|
0a5cbbe8e59e867843e2ee39d3972051c53fbc88
|
bmi_tester/tests/utils.py
|
bmi_tester/tests/utils.py
|
import os
import tempfile
import shutil
from scripting.contexts import cd
from . import Bmi, INPUT_FILE
def setup_func():
# globals().update(bmi=Bmi())
# bmi.initialize(INPUT_FILE)
starting_dir = os.path.abspath(os.getcwd())
tmp_dir = os.path.abspath(tempfile.mkdtemp())
os.chdir(tmp_dir)
with open('.ROOT_DIR', 'w') as fp:
fp.write(starting_dir)
# shutil.copy2(os.path.join(starting_dir, INPUT_FILE), tmp_dir)
def teardown_func():
# del globals()['bmi']
tmp_dir = os.path.abspath(os.getcwd())
with open('.ROOT_DIR', 'r') as fp:
starting_dir = fp.read()
os.chdir(starting_dir)
shutil.rmtree(tmp_dir)
def all_names(bmi):
return set(bmi.get_input_var_names() + bmi.get_output_var_names())
def all_grids(bmi, gtype=None):
grids = [bmi.get_var_grid(name) for name in all_names(bmi)]
if gtype:
grids = [gid for gid in grids if bmi.get_grid_type(gid) == gtype]
return set(grids)
def new_bmi(infile=None):
try:
with open('.ROOT_DIR', 'r') as fp:
root_dir = fp.read()
except IOError:
root_dir = '.'
bmi = Bmi()
with cd(root_dir):
bmi.initialize(infile or INPUT_FILE)
return bmi
|
Add utilities for testing bmi.
|
Add utilities for testing bmi.
|
Python
|
mit
|
csdms/bmi-tester
|
Add utilities for testing bmi.
|
import os
import tempfile
import shutil
from scripting.contexts import cd
from . import Bmi, INPUT_FILE
def setup_func():
# globals().update(bmi=Bmi())
# bmi.initialize(INPUT_FILE)
starting_dir = os.path.abspath(os.getcwd())
tmp_dir = os.path.abspath(tempfile.mkdtemp())
os.chdir(tmp_dir)
with open('.ROOT_DIR', 'w') as fp:
fp.write(starting_dir)
# shutil.copy2(os.path.join(starting_dir, INPUT_FILE), tmp_dir)
def teardown_func():
# del globals()['bmi']
tmp_dir = os.path.abspath(os.getcwd())
with open('.ROOT_DIR', 'r') as fp:
starting_dir = fp.read()
os.chdir(starting_dir)
shutil.rmtree(tmp_dir)
def all_names(bmi):
return set(bmi.get_input_var_names() + bmi.get_output_var_names())
def all_grids(bmi, gtype=None):
grids = [bmi.get_var_grid(name) for name in all_names(bmi)]
if gtype:
grids = [gid for gid in grids if bmi.get_grid_type(gid) == gtype]
return set(grids)
def new_bmi(infile=None):
try:
with open('.ROOT_DIR', 'r') as fp:
root_dir = fp.read()
except IOError:
root_dir = '.'
bmi = Bmi()
with cd(root_dir):
bmi.initialize(infile or INPUT_FILE)
return bmi
|
<commit_before><commit_msg>Add utilities for testing bmi.<commit_after>
|
import os
import tempfile
import shutil
from scripting.contexts import cd
from . import Bmi, INPUT_FILE
def setup_func():
# globals().update(bmi=Bmi())
# bmi.initialize(INPUT_FILE)
starting_dir = os.path.abspath(os.getcwd())
tmp_dir = os.path.abspath(tempfile.mkdtemp())
os.chdir(tmp_dir)
with open('.ROOT_DIR', 'w') as fp:
fp.write(starting_dir)
# shutil.copy2(os.path.join(starting_dir, INPUT_FILE), tmp_dir)
def teardown_func():
# del globals()['bmi']
tmp_dir = os.path.abspath(os.getcwd())
with open('.ROOT_DIR', 'r') as fp:
starting_dir = fp.read()
os.chdir(starting_dir)
shutil.rmtree(tmp_dir)
def all_names(bmi):
return set(bmi.get_input_var_names() + bmi.get_output_var_names())
def all_grids(bmi, gtype=None):
grids = [bmi.get_var_grid(name) for name in all_names(bmi)]
if gtype:
grids = [gid for gid in grids if bmi.get_grid_type(gid) == gtype]
return set(grids)
def new_bmi(infile=None):
try:
with open('.ROOT_DIR', 'r') as fp:
root_dir = fp.read()
except IOError:
root_dir = '.'
bmi = Bmi()
with cd(root_dir):
bmi.initialize(infile or INPUT_FILE)
return bmi
|
Add utilities for testing bmi.import os
import tempfile
import shutil
from scripting.contexts import cd
from . import Bmi, INPUT_FILE
def setup_func():
# globals().update(bmi=Bmi())
# bmi.initialize(INPUT_FILE)
starting_dir = os.path.abspath(os.getcwd())
tmp_dir = os.path.abspath(tempfile.mkdtemp())
os.chdir(tmp_dir)
with open('.ROOT_DIR', 'w') as fp:
fp.write(starting_dir)
# shutil.copy2(os.path.join(starting_dir, INPUT_FILE), tmp_dir)
def teardown_func():
# del globals()['bmi']
tmp_dir = os.path.abspath(os.getcwd())
with open('.ROOT_DIR', 'r') as fp:
starting_dir = fp.read()
os.chdir(starting_dir)
shutil.rmtree(tmp_dir)
def all_names(bmi):
return set(bmi.get_input_var_names() + bmi.get_output_var_names())
def all_grids(bmi, gtype=None):
grids = [bmi.get_var_grid(name) for name in all_names(bmi)]
if gtype:
grids = [gid for gid in grids if bmi.get_grid_type(gid) == gtype]
return set(grids)
def new_bmi(infile=None):
try:
with open('.ROOT_DIR', 'r') as fp:
root_dir = fp.read()
except IOError:
root_dir = '.'
bmi = Bmi()
with cd(root_dir):
bmi.initialize(infile or INPUT_FILE)
return bmi
|
<commit_before><commit_msg>Add utilities for testing bmi.<commit_after>import os
import tempfile
import shutil
from scripting.contexts import cd
from . import Bmi, INPUT_FILE
def setup_func():
# globals().update(bmi=Bmi())
# bmi.initialize(INPUT_FILE)
starting_dir = os.path.abspath(os.getcwd())
tmp_dir = os.path.abspath(tempfile.mkdtemp())
os.chdir(tmp_dir)
with open('.ROOT_DIR', 'w') as fp:
fp.write(starting_dir)
# shutil.copy2(os.path.join(starting_dir, INPUT_FILE), tmp_dir)
def teardown_func():
# del globals()['bmi']
tmp_dir = os.path.abspath(os.getcwd())
with open('.ROOT_DIR', 'r') as fp:
starting_dir = fp.read()
os.chdir(starting_dir)
shutil.rmtree(tmp_dir)
def all_names(bmi):
return set(bmi.get_input_var_names() + bmi.get_output_var_names())
def all_grids(bmi, gtype=None):
grids = [bmi.get_var_grid(name) for name in all_names(bmi)]
if gtype:
grids = [gid for gid in grids if bmi.get_grid_type(gid) == gtype]
return set(grids)
def new_bmi(infile=None):
try:
with open('.ROOT_DIR', 'r') as fp:
root_dir = fp.read()
except IOError:
root_dir = '.'
bmi = Bmi()
with cd(root_dir):
bmi.initialize(infile or INPUT_FILE)
return bmi
|
|
99be8919a0bc274dc311ebe3201dfc490a1d0d07
|
setup.py
|
setup.py
|
import os
from distutils.core import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "DataShape",
version = "0.1.0",
author = "Continuum Analytics",
author_email = "blaze-dev@continuum.io",
description = ("A data description language."),
license = "BSD",
keywords = "data language",
url = "http://packages.python.org/datashape",
packages = ["datashape", "datashape.test"],
long_description = read('README.md'),
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development",
"License :: OSI Approved :: BSD License",
],
)
|
import os
from distutils.core import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "DataShape",
version = "0.1.0",
author = "Continuum Analytics",
author_email = "blaze-dev@continuum.io",
description = ("A data description language."),
license = "BSD",
keywords = "data language",
url = "http://packages.python.org/datashape",
packages = ["datashape", "datashape.tests"],
long_description = read('README.md'),
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development",
"License :: OSI Approved :: BSD License",
],
)
|
Remove find_packages import, it's not in distutils
|
Remove find_packages import, it's not in distutils
|
Python
|
bsd-2-clause
|
blaze/datashape,cowlicks/datashape,ContinuumIO/datashape,cpcloud/datashape,aterrel/datashape,quantopian/datashape,FrancescAlted/datashape,quantopian/datashape,aterrel/datashape,cowlicks/datashape,markflorisson/datashape,ContinuumIO/datashape,cpcloud/datashape,blaze/datashape,llllllllll/datashape,markflorisson/datashape,FrancescAlted/datashape,llllllllll/datashape
|
import os
from distutils.core import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "DataShape",
version = "0.1.0",
author = "Continuum Analytics",
author_email = "blaze-dev@continuum.io",
description = ("A data description language."),
license = "BSD",
keywords = "data language",
url = "http://packages.python.org/datashape",
packages = ["datashape", "datashape.test"],
long_description = read('README.md'),
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development",
"License :: OSI Approved :: BSD License",
],
)
Remove find_packages import, it's not in distutils
|
import os
from distutils.core import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "DataShape",
version = "0.1.0",
author = "Continuum Analytics",
author_email = "blaze-dev@continuum.io",
description = ("A data description language."),
license = "BSD",
keywords = "data language",
url = "http://packages.python.org/datashape",
packages = ["datashape", "datashape.tests"],
long_description = read('README.md'),
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development",
"License :: OSI Approved :: BSD License",
],
)
|
<commit_before>import os
from distutils.core import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "DataShape",
version = "0.1.0",
author = "Continuum Analytics",
author_email = "blaze-dev@continuum.io",
description = ("A data description language."),
license = "BSD",
keywords = "data language",
url = "http://packages.python.org/datashape",
packages = ["datashape", "datashape.test"],
long_description = read('README.md'),
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development",
"License :: OSI Approved :: BSD License",
],
)
<commit_msg>Remove find_packages import, it's not in distutils<commit_after>
|
import os
from distutils.core import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "DataShape",
version = "0.1.0",
author = "Continuum Analytics",
author_email = "blaze-dev@continuum.io",
description = ("A data description language."),
license = "BSD",
keywords = "data language",
url = "http://packages.python.org/datashape",
packages = ["datashape", "datashape.tests"],
long_description = read('README.md'),
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development",
"License :: OSI Approved :: BSD License",
],
)
|
import os
from distutils.core import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "DataShape",
version = "0.1.0",
author = "Continuum Analytics",
author_email = "blaze-dev@continuum.io",
description = ("A data description language."),
license = "BSD",
keywords = "data language",
url = "http://packages.python.org/datashape",
packages = ["datashape", "datashape.test"],
long_description = read('README.md'),
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development",
"License :: OSI Approved :: BSD License",
],
)
Remove find_packages import, it's not in distutilsimport os
from distutils.core import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "DataShape",
version = "0.1.0",
author = "Continuum Analytics",
author_email = "blaze-dev@continuum.io",
description = ("A data description language."),
license = "BSD",
keywords = "data language",
url = "http://packages.python.org/datashape",
packages = ["datashape", "datashape.tests"],
long_description = read('README.md'),
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development",
"License :: OSI Approved :: BSD License",
],
)
|
<commit_before>import os
from distutils.core import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "DataShape",
version = "0.1.0",
author = "Continuum Analytics",
author_email = "blaze-dev@continuum.io",
description = ("A data description language."),
license = "BSD",
keywords = "data language",
url = "http://packages.python.org/datashape",
packages = ["datashape", "datashape.test"],
long_description = read('README.md'),
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development",
"License :: OSI Approved :: BSD License",
],
)
<commit_msg>Remove find_packages import, it's not in distutils<commit_after>import os
from distutils.core import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "DataShape",
version = "0.1.0",
author = "Continuum Analytics",
author_email = "blaze-dev@continuum.io",
description = ("A data description language."),
license = "BSD",
keywords = "data language",
url = "http://packages.python.org/datashape",
packages = ["datashape", "datashape.tests"],
long_description = read('README.md'),
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development",
"License :: OSI Approved :: BSD License",
],
)
|
429d671502e44268b57fc4846ecefa15f8bf5a62
|
tests/cupy_tests/cuda_tests/test_profile.py
|
tests/cupy_tests/cuda_tests/test_profile.py
|
import unittest
import mock
from cupy import cuda
class TestProfile(unittest.TestCase):
def test_profile(self):
start_patch = mock.patch('cupy.cuda.profiler.start')
stop_patch = mock.patch('cupy.cuda.profiler.stop')
with start_patch as start, stop_patch as stop:
with cuda.profile():
pass
start.assert_called_once_with()
stop.assert_called_once_with()
|
Write test case for profiler
|
Write test case for profiler
|
Python
|
mit
|
ktnyt/chainer,wkentaro/chainer,pfnet/chainer,jnishi/chainer,delta2323/chainer,rezoo/chainer,cupy/cupy,chainer/chainer,jnishi/chainer,kashif/chainer,keisuke-umezawa/chainer,kiyukuta/chainer,niboshi/chainer,cupy/cupy,hvy/chainer,jnishi/chainer,anaruse/chainer,keisuke-umezawa/chainer,cupy/cupy,benob/chainer,hvy/chainer,ysekky/chainer,keisuke-umezawa/chainer,wkentaro/chainer,aonotas/chainer,tkerola/chainer,niboshi/chainer,hvy/chainer,niboshi/chainer,ktnyt/chainer,okuta/chainer,cupy/cupy,chainer/chainer,ktnyt/chainer,okuta/chainer,ronekko/chainer,keisuke-umezawa/chainer,hvy/chainer,niboshi/chainer,wkentaro/chainer,kikusu/chainer,kikusu/chainer,okuta/chainer,benob/chainer,jnishi/chainer,chainer/chainer,chainer/chainer,okuta/chainer,ktnyt/chainer,wkentaro/chainer
|
Write test case for profiler
|
import unittest
import mock
from cupy import cuda
class TestProfile(unittest.TestCase):
def test_profile(self):
start_patch = mock.patch('cupy.cuda.profiler.start')
stop_patch = mock.patch('cupy.cuda.profiler.stop')
with start_patch as start, stop_patch as stop:
with cuda.profile():
pass
start.assert_called_once_with()
stop.assert_called_once_with()
|
<commit_before><commit_msg>Write test case for profiler<commit_after>
|
import unittest
import mock
from cupy import cuda
class TestProfile(unittest.TestCase):
def test_profile(self):
start_patch = mock.patch('cupy.cuda.profiler.start')
stop_patch = mock.patch('cupy.cuda.profiler.stop')
with start_patch as start, stop_patch as stop:
with cuda.profile():
pass
start.assert_called_once_with()
stop.assert_called_once_with()
|
Write test case for profilerimport unittest
import mock
from cupy import cuda
class TestProfile(unittest.TestCase):
def test_profile(self):
start_patch = mock.patch('cupy.cuda.profiler.start')
stop_patch = mock.patch('cupy.cuda.profiler.stop')
with start_patch as start, stop_patch as stop:
with cuda.profile():
pass
start.assert_called_once_with()
stop.assert_called_once_with()
|
<commit_before><commit_msg>Write test case for profiler<commit_after>import unittest
import mock
from cupy import cuda
class TestProfile(unittest.TestCase):
def test_profile(self):
start_patch = mock.patch('cupy.cuda.profiler.start')
stop_patch = mock.patch('cupy.cuda.profiler.stop')
with start_patch as start, stop_patch as stop:
with cuda.profile():
pass
start.assert_called_once_with()
stop.assert_called_once_with()
|
|
6ea2029fd85a90b256144dee7524fa7885a009bd
|
pdc/apps/package/migrations/0011_auto_20160219_0915.py
|
pdc/apps/package/migrations/0011_auto_20160219_0915.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_new_image_type_and_format(apps, schema_editor):
formats = [
'tar.gz',
'tar.xz'
]
ImageFormat = apps.get_model('package', 'ImageFormat')
for format in formats:
ImageFormat.objects.get_or_create(name=format)
ImageType = apps.get_model('package', 'ImageType')
ImageType.objects.get_or_create(name='docker')
class Migration(migrations.Migration):
dependencies = [
('package', '0010_auto_20160218_1339'),
]
operations = [
migrations.RunPython(add_new_image_type_and_format)
]
|
Add 2 image formats and 1 image type.
|
Add 2 image formats and 1 image type.
Add 2 image formats 'tar.gz' and 'tar.xz'.
Add 1 image type 'docker'.
JIRA: PDC-1341
|
Python
|
mit
|
product-definition-center/product-definition-center,release-engineering/product-definition-center,pombredanne/product-definition-center,release-engineering/product-definition-center,product-definition-center/product-definition-center,product-definition-center/product-definition-center,pombredanne/product-definition-center,release-engineering/product-definition-center,product-definition-center/product-definition-center,pombredanne/product-definition-center,pombredanne/product-definition-center,release-engineering/product-definition-center
|
Add 2 image formats and 1 image type.
Add 2 image formats 'tar.gz' and 'tar.xz'.
Add 1 image type 'docker'.
JIRA: PDC-1341
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_new_image_type_and_format(apps, schema_editor):
formats = [
'tar.gz',
'tar.xz'
]
ImageFormat = apps.get_model('package', 'ImageFormat')
for format in formats:
ImageFormat.objects.get_or_create(name=format)
ImageType = apps.get_model('package', 'ImageType')
ImageType.objects.get_or_create(name='docker')
class Migration(migrations.Migration):
dependencies = [
('package', '0010_auto_20160218_1339'),
]
operations = [
migrations.RunPython(add_new_image_type_and_format)
]
|
<commit_before><commit_msg>Add 2 image formats and 1 image type.
Add 2 image formats 'tar.gz' and 'tar.xz'.
Add 1 image type 'docker'.
JIRA: PDC-1341<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_new_image_type_and_format(apps, schema_editor):
formats = [
'tar.gz',
'tar.xz'
]
ImageFormat = apps.get_model('package', 'ImageFormat')
for format in formats:
ImageFormat.objects.get_or_create(name=format)
ImageType = apps.get_model('package', 'ImageType')
ImageType.objects.get_or_create(name='docker')
class Migration(migrations.Migration):
dependencies = [
('package', '0010_auto_20160218_1339'),
]
operations = [
migrations.RunPython(add_new_image_type_and_format)
]
|
Add 2 image formats and 1 image type.
Add 2 image formats 'tar.gz' and 'tar.xz'.
Add 1 image type 'docker'.
JIRA: PDC-1341# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_new_image_type_and_format(apps, schema_editor):
formats = [
'tar.gz',
'tar.xz'
]
ImageFormat = apps.get_model('package', 'ImageFormat')
for format in formats:
ImageFormat.objects.get_or_create(name=format)
ImageType = apps.get_model('package', 'ImageType')
ImageType.objects.get_or_create(name='docker')
class Migration(migrations.Migration):
dependencies = [
('package', '0010_auto_20160218_1339'),
]
operations = [
migrations.RunPython(add_new_image_type_and_format)
]
|
<commit_before><commit_msg>Add 2 image formats and 1 image type.
Add 2 image formats 'tar.gz' and 'tar.xz'.
Add 1 image type 'docker'.
JIRA: PDC-1341<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_new_image_type_and_format(apps, schema_editor):
formats = [
'tar.gz',
'tar.xz'
]
ImageFormat = apps.get_model('package', 'ImageFormat')
for format in formats:
ImageFormat.objects.get_or_create(name=format)
ImageType = apps.get_model('package', 'ImageType')
ImageType.objects.get_or_create(name='docker')
class Migration(migrations.Migration):
dependencies = [
('package', '0010_auto_20160218_1339'),
]
operations = [
migrations.RunPython(add_new_image_type_and_format)
]
|
|
af8641f1ae8e03508c66954774ac0eac41bf1de8
|
soja_find.py
|
soja_find.py
|
import fnmatch
import os
def find_files(path, types):
"""
Find files from path with types
:param path: Where you will find.
:param types: Which types you will find.
:return: files
"""
matches = []
for root, dirnames, filenames in os.walk(path):
for extensions in types:
for filename in fnmatch.filter(filenames, extensions):
matches.append(os.path.join(root, filename))
return matches
if __name__ == '__main__':
my_python_files = find_files('./', ['*.py'])
print(my_python_files)
|
Add soja find, a easy find files tools.
|
Add soja find, a easy find files tools.
|
Python
|
mit
|
iTaa/soja_box
|
Add soja find, a easy find files tools.
|
import fnmatch
import os
def find_files(path, types):
"""
Find files from path with types
:param path: Where you will find.
:param types: Which types you will find.
:return: files
"""
matches = []
for root, dirnames, filenames in os.walk(path):
for extensions in types:
for filename in fnmatch.filter(filenames, extensions):
matches.append(os.path.join(root, filename))
return matches
if __name__ == '__main__':
my_python_files = find_files('./', ['*.py'])
print(my_python_files)
|
<commit_before><commit_msg>Add soja find, a easy find files tools.<commit_after>
|
import fnmatch
import os
def find_files(path, types):
"""
Find files from path with types
:param path: Where you will find.
:param types: Which types you will find.
:return: files
"""
matches = []
for root, dirnames, filenames in os.walk(path):
for extensions in types:
for filename in fnmatch.filter(filenames, extensions):
matches.append(os.path.join(root, filename))
return matches
if __name__ == '__main__':
my_python_files = find_files('./', ['*.py'])
print(my_python_files)
|
Add soja find, a easy find files tools.import fnmatch
import os
def find_files(path, types):
"""
Find files from path with types
:param path: Where you will find.
:param types: Which types you will find.
:return: files
"""
matches = []
for root, dirnames, filenames in os.walk(path):
for extensions in types:
for filename in fnmatch.filter(filenames, extensions):
matches.append(os.path.join(root, filename))
return matches
if __name__ == '__main__':
my_python_files = find_files('./', ['*.py'])
print(my_python_files)
|
<commit_before><commit_msg>Add soja find, a easy find files tools.<commit_after>import fnmatch
import os
def find_files(path, types):
"""
Find files from path with types
:param path: Where you will find.
:param types: Which types you will find.
:return: files
"""
matches = []
for root, dirnames, filenames in os.walk(path):
for extensions in types:
for filename in fnmatch.filter(filenames, extensions):
matches.append(os.path.join(root, filename))
return matches
if __name__ == '__main__':
my_python_files = find_files('./', ['*.py'])
print(my_python_files)
|
|
80e43fe71922137dd9760cebc8e15dd82cfb04b6
|
tests/providers/test_godaddy.py
|
tests/providers/test_godaddy.py
|
# Test for one implementation of the interface
from unittest import TestCase
from lexicon.providers.godaddy import Provider
from integration_tests import IntegrationTests
import pytest
# Hook into testing framework by inheriting unittest.TestCase and reuse
# the tests which *each and every* implementation of the interface must
# pass, by inheritance from integration_tests.IntegrationTests
class GoDaddyProviderTests(TestCase, IntegrationTests):
Provider = Provider
provider_name = 'godaddy'
# Domain existing in the GoDaddy OTE server at the time of the test (17/06/2017)
domain = '000.biz'
def _filter_headers(self):
return ['Authorization']
def _test_engine_overrides(self):
overrides = super(GoDaddyProviderTests, self)._test_engine_overrides()
# Use the OTE server, which allows tests without account
overrides.update({'api_endpoint': 'https://api.ote-godaddy.com/v1'})
return overrides
def _test_options(self):
cmd_options = super(GoDaddyProviderTests, self)._test_options()
# This token is public,
# and used on https://developer.godaddy.com to test against OTE server
cmd_options.update({
'auth_key': 'UzQxLikm_46KxDFnbjN7cQjmw6wocia',
'auth_secret': '46L26ydpkwMaKZV6uVdDWe'
})
return cmd_options
@pytest.mark.skip(reason="GoDaddy does not use id in their DNS records")
def test_Provider_when_calling_delete_record_by_identifier_should_remove_record(self):
return
|
Configure integration tests for GoDaddy provider
|
Configure integration tests for GoDaddy provider
|
Python
|
mit
|
AnalogJ/lexicon,tnwhitwell/lexicon,tnwhitwell/lexicon,AnalogJ/lexicon
|
Configure integration tests for GoDaddy provider
|
# Test for one implementation of the interface
from unittest import TestCase
from lexicon.providers.godaddy import Provider
from integration_tests import IntegrationTests
import pytest
# Hook into testing framework by inheriting unittest.TestCase and reuse
# the tests which *each and every* implementation of the interface must
# pass, by inheritance from integration_tests.IntegrationTests
class GoDaddyProviderTests(TestCase, IntegrationTests):
Provider = Provider
provider_name = 'godaddy'
# Domain existing in the GoDaddy OTE server at the time of the test (17/06/2017)
domain = '000.biz'
def _filter_headers(self):
return ['Authorization']
def _test_engine_overrides(self):
overrides = super(GoDaddyProviderTests, self)._test_engine_overrides()
# Use the OTE server, which allows tests without account
overrides.update({'api_endpoint': 'https://api.ote-godaddy.com/v1'})
return overrides
def _test_options(self):
cmd_options = super(GoDaddyProviderTests, self)._test_options()
# This token is public,
# and used on https://developer.godaddy.com to test against OTE server
cmd_options.update({
'auth_key': 'UzQxLikm_46KxDFnbjN7cQjmw6wocia',
'auth_secret': '46L26ydpkwMaKZV6uVdDWe'
})
return cmd_options
@pytest.mark.skip(reason="GoDaddy does not use id in their DNS records")
def test_Provider_when_calling_delete_record_by_identifier_should_remove_record(self):
return
|
<commit_before><commit_msg>Configure integration tests for GoDaddy provider<commit_after>
|
# Test for one implementation of the interface
from unittest import TestCase
from lexicon.providers.godaddy import Provider
from integration_tests import IntegrationTests
import pytest
# Hook into testing framework by inheriting unittest.TestCase and reuse
# the tests which *each and every* implementation of the interface must
# pass, by inheritance from integration_tests.IntegrationTests
class GoDaddyProviderTests(TestCase, IntegrationTests):
Provider = Provider
provider_name = 'godaddy'
# Domain existing in the GoDaddy OTE server at the time of the test (17/06/2017)
domain = '000.biz'
def _filter_headers(self):
return ['Authorization']
def _test_engine_overrides(self):
overrides = super(GoDaddyProviderTests, self)._test_engine_overrides()
# Use the OTE server, which allows tests without account
overrides.update({'api_endpoint': 'https://api.ote-godaddy.com/v1'})
return overrides
def _test_options(self):
cmd_options = super(GoDaddyProviderTests, self)._test_options()
# This token is public,
# and used on https://developer.godaddy.com to test against OTE server
cmd_options.update({
'auth_key': 'UzQxLikm_46KxDFnbjN7cQjmw6wocia',
'auth_secret': '46L26ydpkwMaKZV6uVdDWe'
})
return cmd_options
@pytest.mark.skip(reason="GoDaddy does not use id in their DNS records")
def test_Provider_when_calling_delete_record_by_identifier_should_remove_record(self):
return
|
Configure integration tests for GoDaddy provider# Test for one implementation of the interface
from unittest import TestCase
from lexicon.providers.godaddy import Provider
from integration_tests import IntegrationTests
import pytest
# Hook into testing framework by inheriting unittest.TestCase and reuse
# the tests which *each and every* implementation of the interface must
# pass, by inheritance from integration_tests.IntegrationTests
class GoDaddyProviderTests(TestCase, IntegrationTests):
Provider = Provider
provider_name = 'godaddy'
# Domain existing in the GoDaddy OTE server at the time of the test (17/06/2017)
domain = '000.biz'
def _filter_headers(self):
return ['Authorization']
def _test_engine_overrides(self):
overrides = super(GoDaddyProviderTests, self)._test_engine_overrides()
# Use the OTE server, which allows tests without account
overrides.update({'api_endpoint': 'https://api.ote-godaddy.com/v1'})
return overrides
def _test_options(self):
cmd_options = super(GoDaddyProviderTests, self)._test_options()
# This token is public,
# and used on https://developer.godaddy.com to test against OTE server
cmd_options.update({
'auth_key': 'UzQxLikm_46KxDFnbjN7cQjmw6wocia',
'auth_secret': '46L26ydpkwMaKZV6uVdDWe'
})
return cmd_options
@pytest.mark.skip(reason="GoDaddy does not use id in their DNS records")
def test_Provider_when_calling_delete_record_by_identifier_should_remove_record(self):
return
|
<commit_before><commit_msg>Configure integration tests for GoDaddy provider<commit_after># Test for one implementation of the interface
from unittest import TestCase
from lexicon.providers.godaddy import Provider
from integration_tests import IntegrationTests
import pytest
# Hook into testing framework by inheriting unittest.TestCase and reuse
# the tests which *each and every* implementation of the interface must
# pass, by inheritance from integration_tests.IntegrationTests
class GoDaddyProviderTests(TestCase, IntegrationTests):
Provider = Provider
provider_name = 'godaddy'
# Domain existing in the GoDaddy OTE server at the time of the test (17/06/2017)
domain = '000.biz'
def _filter_headers(self):
return ['Authorization']
def _test_engine_overrides(self):
overrides = super(GoDaddyProviderTests, self)._test_engine_overrides()
# Use the OTE server, which allows tests without account
overrides.update({'api_endpoint': 'https://api.ote-godaddy.com/v1'})
return overrides
def _test_options(self):
cmd_options = super(GoDaddyProviderTests, self)._test_options()
# This token is public,
# and used on https://developer.godaddy.com to test against OTE server
cmd_options.update({
'auth_key': 'UzQxLikm_46KxDFnbjN7cQjmw6wocia',
'auth_secret': '46L26ydpkwMaKZV6uVdDWe'
})
return cmd_options
@pytest.mark.skip(reason="GoDaddy does not use id in their DNS records")
def test_Provider_when_calling_delete_record_by_identifier_should_remove_record(self):
return
|
|
07fdaf648ac290438c91413a69fc77235bf691de
|
py/predict-the-winner.py
|
py/predict-the-winner.py
|
class Solution(object):
def PredictTheWinner(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
dp = dict()
def top_down(start, end):
if start == end:
dp[start, end] = 0
elif (start, end) not in dp:
dp[start, end] = max(nums[start] - top_down(start + 1, end), nums[end - 1] - top_down(start, end - 1))
return dp[start, end]
return top_down(0, len(nums)) >= 0
|
Add py solution for 486. Predict the Winner
|
Add py solution for 486. Predict the Winner
486. Predict the Winner: https://leetcode.com/problems/predict-the-winner/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 486. Predict the Winner
486. Predict the Winner: https://leetcode.com/problems/predict-the-winner/
|
class Solution(object):
def PredictTheWinner(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
dp = dict()
def top_down(start, end):
if start == end:
dp[start, end] = 0
elif (start, end) not in dp:
dp[start, end] = max(nums[start] - top_down(start + 1, end), nums[end - 1] - top_down(start, end - 1))
return dp[start, end]
return top_down(0, len(nums)) >= 0
|
<commit_before><commit_msg>Add py solution for 486. Predict the Winner
486. Predict the Winner: https://leetcode.com/problems/predict-the-winner/<commit_after>
|
class Solution(object):
def PredictTheWinner(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
dp = dict()
def top_down(start, end):
if start == end:
dp[start, end] = 0
elif (start, end) not in dp:
dp[start, end] = max(nums[start] - top_down(start + 1, end), nums[end - 1] - top_down(start, end - 1))
return dp[start, end]
return top_down(0, len(nums)) >= 0
|
Add py solution for 486. Predict the Winner
486. Predict the Winner: https://leetcode.com/problems/predict-the-winner/class Solution(object):
def PredictTheWinner(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
dp = dict()
def top_down(start, end):
if start == end:
dp[start, end] = 0
elif (start, end) not in dp:
dp[start, end] = max(nums[start] - top_down(start + 1, end), nums[end - 1] - top_down(start, end - 1))
return dp[start, end]
return top_down(0, len(nums)) >= 0
|
<commit_before><commit_msg>Add py solution for 486. Predict the Winner
486. Predict the Winner: https://leetcode.com/problems/predict-the-winner/<commit_after>class Solution(object):
def PredictTheWinner(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
dp = dict()
def top_down(start, end):
if start == end:
dp[start, end] = 0
elif (start, end) not in dp:
dp[start, end] = max(nums[start] - top_down(start + 1, end), nums[end - 1] - top_down(start, end - 1))
return dp[start, end]
return top_down(0, len(nums)) >= 0
|
|
5a431f7b0cc9eba3f8a68650aa41c3f1e31520c8
|
s3stash/stash_single_mediajson.py
|
s3stash/stash_single_mediajson.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
import argparse
import logging
import json
from s3stash.nxstash_mediajson import NuxeoStashMediaJson
def main(argv=None):
parser = argparse.ArgumentParser(
description='Create and stash media.json file for a nuxeo object')
parser.add_argument('path', help="Nuxeo document path")
parser.add_argument(
'--bucket',
default='static.ucldc.cdlib.org/media_json',
help="S3 bucket name")
parser.add_argument('--region', default='us-east-1', help='AWS region')
parser.add_argument(
'--pynuxrc', default='~/.pynuxrc', help="rc file for use by pynux")
if argv is None:
argv = parser.parse_args()
# logging
# FIXME would like to name log with nuxeo UID
filename = os.path.basename(argv.path)
logfile = "logs/mediajson-{}.log".format(filename)
print "LOG:\t{}".format(logfile)
logging.basicConfig(
filename=logfile,
level=logging.INFO,
format='%(asctime)s (%(name)s) [%(levelname)s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
# create and stash media.json
nxstash = NuxeoStashMediaJson(
argv.path,
argv.bucket,
argv.region,
argv.pynuxrc,
True)
report = nxstash.nxstashref()
# output report to json file
reportfile = "reports/mediajson-{}.json".format(filename)
with open(reportfile, 'w') as f:
json.dump(report, f, sort_keys=True, indent=4)
# parse report to give basic stats
print "REPORT:\t{}".format(reportfile)
print "SUMMARY:"
print "stashed:\t{}".format(report['stashed'])
print "\nDone."
if __name__ == "__main__":
sys.exit(main())
|
Add script to create and single media.json file
|
Add script to create and single media.json file
|
Python
|
bsd-3-clause
|
barbarahui/nuxeo-calisphere,barbarahui/nuxeo-calisphere
|
Add script to create and single media.json file
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
import argparse
import logging
import json
from s3stash.nxstash_mediajson import NuxeoStashMediaJson
def main(argv=None):
parser = argparse.ArgumentParser(
description='Create and stash media.json file for a nuxeo object')
parser.add_argument('path', help="Nuxeo document path")
parser.add_argument(
'--bucket',
default='static.ucldc.cdlib.org/media_json',
help="S3 bucket name")
parser.add_argument('--region', default='us-east-1', help='AWS region')
parser.add_argument(
'--pynuxrc', default='~/.pynuxrc', help="rc file for use by pynux")
if argv is None:
argv = parser.parse_args()
# logging
# FIXME would like to name log with nuxeo UID
filename = os.path.basename(argv.path)
logfile = "logs/mediajson-{}.log".format(filename)
print "LOG:\t{}".format(logfile)
logging.basicConfig(
filename=logfile,
level=logging.INFO,
format='%(asctime)s (%(name)s) [%(levelname)s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
# create and stash media.json
nxstash = NuxeoStashMediaJson(
argv.path,
argv.bucket,
argv.region,
argv.pynuxrc,
True)
report = nxstash.nxstashref()
# output report to json file
reportfile = "reports/mediajson-{}.json".format(filename)
with open(reportfile, 'w') as f:
json.dump(report, f, sort_keys=True, indent=4)
# parse report to give basic stats
print "REPORT:\t{}".format(reportfile)
print "SUMMARY:"
print "stashed:\t{}".format(report['stashed'])
print "\nDone."
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add script to create and single media.json file<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
import argparse
import logging
import json
from s3stash.nxstash_mediajson import NuxeoStashMediaJson
def main(argv=None):
parser = argparse.ArgumentParser(
description='Create and stash media.json file for a nuxeo object')
parser.add_argument('path', help="Nuxeo document path")
parser.add_argument(
'--bucket',
default='static.ucldc.cdlib.org/media_json',
help="S3 bucket name")
parser.add_argument('--region', default='us-east-1', help='AWS region')
parser.add_argument(
'--pynuxrc', default='~/.pynuxrc', help="rc file for use by pynux")
if argv is None:
argv = parser.parse_args()
# logging
# FIXME would like to name log with nuxeo UID
filename = os.path.basename(argv.path)
logfile = "logs/mediajson-{}.log".format(filename)
print "LOG:\t{}".format(logfile)
logging.basicConfig(
filename=logfile,
level=logging.INFO,
format='%(asctime)s (%(name)s) [%(levelname)s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
# create and stash media.json
nxstash = NuxeoStashMediaJson(
argv.path,
argv.bucket,
argv.region,
argv.pynuxrc,
True)
report = nxstash.nxstashref()
# output report to json file
reportfile = "reports/mediajson-{}.json".format(filename)
with open(reportfile, 'w') as f:
json.dump(report, f, sort_keys=True, indent=4)
# parse report to give basic stats
print "REPORT:\t{}".format(reportfile)
print "SUMMARY:"
print "stashed:\t{}".format(report['stashed'])
print "\nDone."
if __name__ == "__main__":
sys.exit(main())
|
Add script to create and single media.json file#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
import argparse
import logging
import json
from s3stash.nxstash_mediajson import NuxeoStashMediaJson
def main(argv=None):
parser = argparse.ArgumentParser(
description='Create and stash media.json file for a nuxeo object')
parser.add_argument('path', help="Nuxeo document path")
parser.add_argument(
'--bucket',
default='static.ucldc.cdlib.org/media_json',
help="S3 bucket name")
parser.add_argument('--region', default='us-east-1', help='AWS region')
parser.add_argument(
'--pynuxrc', default='~/.pynuxrc', help="rc file for use by pynux")
if argv is None:
argv = parser.parse_args()
# logging
# FIXME would like to name log with nuxeo UID
filename = os.path.basename(argv.path)
logfile = "logs/mediajson-{}.log".format(filename)
print "LOG:\t{}".format(logfile)
logging.basicConfig(
filename=logfile,
level=logging.INFO,
format='%(asctime)s (%(name)s) [%(levelname)s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
# create and stash media.json
nxstash = NuxeoStashMediaJson(
argv.path,
argv.bucket,
argv.region,
argv.pynuxrc,
True)
report = nxstash.nxstashref()
# output report to json file
reportfile = "reports/mediajson-{}.json".format(filename)
with open(reportfile, 'w') as f:
json.dump(report, f, sort_keys=True, indent=4)
# parse report to give basic stats
print "REPORT:\t{}".format(reportfile)
print "SUMMARY:"
print "stashed:\t{}".format(report['stashed'])
print "\nDone."
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add script to create and single media.json file<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
import argparse
import logging
import json
from s3stash.nxstash_mediajson import NuxeoStashMediaJson
def main(argv=None):
parser = argparse.ArgumentParser(
description='Create and stash media.json file for a nuxeo object')
parser.add_argument('path', help="Nuxeo document path")
parser.add_argument(
'--bucket',
default='static.ucldc.cdlib.org/media_json',
help="S3 bucket name")
parser.add_argument('--region', default='us-east-1', help='AWS region')
parser.add_argument(
'--pynuxrc', default='~/.pynuxrc', help="rc file for use by pynux")
if argv is None:
argv = parser.parse_args()
# logging
# FIXME would like to name log with nuxeo UID
filename = os.path.basename(argv.path)
logfile = "logs/mediajson-{}.log".format(filename)
print "LOG:\t{}".format(logfile)
logging.basicConfig(
filename=logfile,
level=logging.INFO,
format='%(asctime)s (%(name)s) [%(levelname)s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
# create and stash media.json
nxstash = NuxeoStashMediaJson(
argv.path,
argv.bucket,
argv.region,
argv.pynuxrc,
True)
report = nxstash.nxstashref()
# output report to json file
reportfile = "reports/mediajson-{}.json".format(filename)
with open(reportfile, 'w') as f:
json.dump(report, f, sort_keys=True, indent=4)
# parse report to give basic stats
print "REPORT:\t{}".format(reportfile)
print "SUMMARY:"
print "stashed:\t{}".format(report['stashed'])
print "\nDone."
if __name__ == "__main__":
sys.exit(main())
|
|
cc21429b99c8dc6a92487081dc8422b16abad85f
|
zerver/management/commands/dump_messages.py
|
zerver/management/commands/dump_messages.py
|
from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.models import Message, Realm, Stream, Recipient
import datetime
import time
class Command(BaseCommand):
default_cutoff = time.time() - 60 * 60 * 24 * 30 # 30 days.
option_list = BaseCommand.option_list + (
make_option('--domain',
dest='domain',
type='str',
help='The domain whose public streams you want to dump.'),
make_option('--since',
dest='since',
type='int',
default=default_cutoff,
help='The time in epoch since from which to start the dump.')
)
def handle(self, *args, **options):
realm = Realm.objects.get(domain=options["domain"])
streams = Stream.objects.filter(realm=realm, invite_only=False)
recipients = Recipient.objects.filter(
type=Recipient.STREAM, type_id__in=[stream.id for stream in streams])
cutoff = datetime.datetime.fromtimestamp(options["since"])
messages = Message.objects.filter(pub_date__gt=cutoff, recipient__in=recipients)
for message in messages:
print message.to_dict(False)
|
Add a management command to dump all messages on public streams for a realm.
|
Add a management command to dump all messages on public streams for a realm.
(imported from commit f4f8bfece408b466af4db93b2da15cf69b68e0a3)
|
Python
|
apache-2.0
|
hengqujushi/zulip,stamhe/zulip,wweiradio/zulip,dattatreya303/zulip,ashwinirudrappa/zulip,ikasumiwt/zulip,DazWorrall/zulip,ipernet/zulip,hj3938/zulip,praveenaki/zulip,hackerkid/zulip,mahim97/zulip,so0k/zulip,zofuthan/zulip,babbage/zulip,saitodisse/zulip,joyhchen/zulip,jackrzhang/zulip,Suninus/zulip,dattatreya303/zulip,mohsenSy/zulip,rht/zulip,calvinleenyc/zulip,proliming/zulip,synicalsyntax/zulip,dnmfarrell/zulip,arpitpanwar/zulip,Suninus/zulip,TigorC/zulip,EasonYi/zulip,RobotCaleb/zulip,armooo/zulip,ipernet/zulip,samatdav/zulip,krtkmj/zulip,arpitpanwar/zulip,seapasulli/zulip,ufosky-server/zulip,Gabriel0402/zulip,Qgap/zulip,tbutter/zulip,hackerkid/zulip,aps-sids/zulip,jimmy54/zulip,seapasulli/zulip,reyha/zulip,samatdav/zulip,akuseru/zulip,easyfmxu/zulip,MayB/zulip,jonesgithub/zulip,noroot/zulip,he15his/zulip,so0k/zulip,LAndreas/zulip,technicalpickles/zulip,guiquanz/zulip,xuxiao/zulip,JPJPJPOPOP/zulip,punchagan/zulip,qq1012803704/zulip,shubhamdhama/zulip,EasonYi/zulip,voidException/zulip,jrowan/zulip,MariaFaBella85/zulip,dotcool/zulip,stamhe/zulip,RobotCaleb/zulip,itnihao/zulip,umkay/zulip,m1ssou/zulip,voidException/zulip,Batterfii/zulip,tdr130/zulip,fw1121/zulip,willingc/zulip,joshisa/zulip,huangkebo/zulip,grave-w-grave/zulip,bitemyapp/zulip,xuxiao/zulip,codeKonami/zulip,gkotian/zulip,dxq-git/zulip,schatt/zulip,gkotian/zulip,rht/zulip,dattatreya303/zulip,m1ssou/zulip,Suninus/zulip,souravbadami/zulip,arpith/zulip,schatt/zulip,wdaher/zulip,peiwei/zulip,eeshangarg/zulip,brockwhittaker/zulip,yocome/zulip,zhaoweigg/zulip,moria/zulip,hj3938/zulip,AZtheAsian/zulip,Cheppers/zulip,developerfm/zulip,alliejones/zulip,Diptanshu8/zulip,Gabriel0402/zulip,Frouk/zulip,hengqujushi/zulip,sup95/zulip,suxinde2009/zulip,joshisa/zulip,dhcrzf/zulip,tdr130/zulip,karamcnair/zulip,sonali0901/zulip,LAndreas/zulip,codeKonami/zulip,amanharitsh123/zulip,Vallher/zulip,wweiradio/zulip,tommyip/zulip,pradiptad/zulip,deer-hope/zulip,lfranchi/zulip,punchagan/zulip,kokoar/zulip,jessedhillon/zulip,PhilSk/zulip,sharmaeklavya2/zulip,KingxBanana/zulip,mansilladev/zulip,Juanvulcano/zulip,stamhe/zulip,tommyip/zulip,sharmaeklavya2/zulip,fw1121/zulip,RobotCaleb/zulip,firstblade/zulip,JPJPJPOPOP/zulip,amallia/zulip,aliceriot/zulip,peguin40/zulip,codeKonami/zulip,hj3938/zulip,huangkebo/zulip,luyifan/zulip,xuanhan863/zulip,vakila/zulip,gigawhitlocks/zulip,schatt/zulip,Frouk/zulip,EasonYi/zulip,levixie/zulip,joshisa/zulip,dotcool/zulip,so0k/zulip,eeshangarg/zulip,ashwinirudrappa/zulip,MayB/zulip,dxq-git/zulip,themass/zulip,guiquanz/zulip,amallia/zulip,Jianchun1/zulip,reyha/zulip,zacps/zulip,praveenaki/zulip,aps-sids/zulip,tommyip/zulip,mdavid/zulip,bowlofstew/zulip,paxapy/zulip,deer-hope/zulip,MariaFaBella85/zulip,hafeez3000/zulip,Drooids/zulip,hengqujushi/zulip,christi3k/zulip,dnmfarrell/zulip,j831/zulip,dotcool/zulip,he15his/zulip,wdaher/zulip,synicalsyntax/zulip,andersk/zulip,bitemyapp/zulip,zorojean/zulip,voidException/zulip,glovebx/zulip,jessedhillon/zulip,qq1012803704/zulip,cosmicAsymmetry/zulip,johnnygaddarr/zulip,wavelets/zulip,TigorC/zulip,akuseru/zulip,lfranchi/zulip,Batterfii/zulip,adnanh/zulip,KJin99/zulip,esander91/zulip,alliejones/zulip,luyifan/zulip,Cheppers/zulip,nicholasbs/zulip,Diptanshu8/zulip,Juanvulcano/zulip,tommyip/zulip,j831/zulip,jerryge/zulip,sup95/zulip,ipernet/zulip,mahim97/zulip,vaidap/zulip,RobotCaleb/zulip,shubhamdhama/zulip,Cheppers/zulip,tiansiyuan/zulip,synicalsyntax/zulip,punchagan/zulip,armooo/zulip,ipernet/zulip,jonesgithub/zulip,babbage/zulip,guiquanz/zulip,thomasboyt/zulip,babbage/zulip,yuvipanda/zulip,hj3938/zulip,tdr130/zulip,aakash-cr7/zulip,showell/zulip,jainayush975/zulip,johnny9/zulip,themass/zulip,jackrzhang/zulip,babbage/zulip,LeeRisk/zulip,Drooids/zulip,zacps/zulip,tdr130/zulip,zacps/zulip,huangkebo/zulip,zulip/zulip,seapasulli/zulip,xuxiao/zulip,lfranchi/zulip,kokoar/zulip,peiwei/zulip,isht3/zulip,hayderimran7/zulip,dnmfarrell/zulip,willingc/zulip,SmartPeople/zulip,ryansnowboarder/zulip,mahim97/zulip,JanzTam/zulip,voidException/zulip,kaiyuanheshang/zulip,gigawhitlocks/zulip,amallia/zulip,tommyip/zulip,Batterfii/zulip,zachallaun/zulip,tommyip/zulip,vabs22/zulip,aliceriot/zulip,easyfmxu/zulip,moria/zulip,sharmaeklavya2/zulip,punchagan/zulip,timabbott/zulip,joshisa/zulip,ericzhou2008/zulip,natanovia/zulip,zorojean/zulip,jonesgithub/zulip,blaze225/zulip,deer-hope/zulip,calvinleenyc/zulip,JPJPJPOPOP/zulip,pradiptad/zulip,vaidap/zulip,Drooids/zulip,aps-sids/zulip,zwily/zulip,zhaoweigg/zulip,vakila/zulip,luyifan/zulip,luyifan/zulip,PhilSk/zulip,ikasumiwt/zulip,ipernet/zulip,ryanbackman/zulip,tdr130/zulip,samatdav/zulip,punchagan/zulip,technicalpickles/zulip,glovebx/zulip,mdavid/zulip,avastu/zulip,m1ssou/zulip,ikasumiwt/zulip,saitodisse/zulip,Galexrt/zulip,hafeez3000/zulip,eeshangarg/zulip,dawran6/zulip,Vallher/zulip,tbutter/zulip,so0k/zulip,suxinde2009/zulip,brainwane/zulip,brockwhittaker/zulip,dwrpayne/zulip,jonesgithub/zulip,natanovia/zulip,zulip/zulip,zulip/zulip,punchagan/zulip,SmartPeople/zulip,yuvipanda/zulip,mansilladev/zulip,glovebx/zulip,pradiptad/zulip,avastu/zulip,souravbadami/zulip,jerryge/zulip,PaulPetring/zulip,saitodisse/zulip,alliejones/zulip,eastlhu/zulip,Gabriel0402/zulip,johnny9/zulip,fw1121/zulip,joyhchen/zulip,yuvipanda/zulip,bluesea/zulip,voidException/zulip,dotcool/zulip,jphilipsen05/zulip,shaunstanislaus/zulip,hafeez3000/zulip,johnnygaddarr/zulip,Jianchun1/zulip,Jianchun1/zulip,jainayush975/zulip,Gabriel0402/zulip,moria/zulip,arpitpanwar/zulip,LAndreas/zulip,vikas-parashar/zulip,Suninus/zulip,moria/zulip,lfranchi/zulip,wweiradio/zulip,mdavid/zulip,krtkmj/zulip,praveenaki/zulip,Gabriel0402/zulip,Vallher/zulip,zwily/zulip,synicalsyntax/zulip,kou/zulip,armooo/zulip,zwily/zulip,developerfm/zulip,firstblade/zulip,amallia/zulip,swinghu/zulip,SmartPeople/zulip,moria/zulip,souravbadami/zulip,AZtheAsian/zulip,bluesea/zulip,esander91/zulip,alliejones/zulip,johnnygaddarr/zulip,ApsOps/zulip,PaulPetring/zulip,j831/zulip,Vallher/zulip,eastlhu/zulip,AZtheAsian/zulip,jeffcao/zulip,ashwinirudrappa/zulip,blaze225/zulip,cosmicAsymmetry/zulip,praveenaki/zulip,kaiyuanheshang/zulip,firstblade/zulip,dwrpayne/zulip,dotcool/zulip,jphilipsen05/zulip,proliming/zulip,jrowan/zulip,DazWorrall/zulip,vabs22/zulip,zachallaun/zulip,ApsOps/zulip,tiansiyuan/zulip,tbutter/zulip,rht/zulip,amallia/zulip,PaulPetring/zulip,hengqujushi/zulip,seapasulli/zulip,bowlofstew/zulip,moria/zulip,aakash-cr7/zulip,reyha/zulip,gigawhitlocks/zulip,lfranchi/zulip,shaunstanislaus/zulip,hafeez3000/zulip,kokoar/zulip,easyfmxu/zulip,grave-w-grave/zulip,hengqujushi/zulip,jeffcao/zulip,tdr130/zulip,vikas-parashar/zulip,hackerkid/zulip,shrikrishnaholla/zulip,hafeez3000/zulip,armooo/zulip,ericzhou2008/zulip,dnmfarrell/zulip,ryanbackman/zulip,ApsOps/zulip,cosmicAsymmetry/zulip,krtkmj/zulip,jimmy54/zulip,shubhamdhama/zulip,cosmicAsymmetry/zulip,andersk/zulip,vaidap/zulip,jphilipsen05/zulip,wangdeshui/zulip,atomic-labs/zulip,amanharitsh123/zulip,eastlhu/zulip,gkotian/zulip,MariaFaBella85/zulip,ryansnowboarder/zulip,susansls/zulip,zulip/zulip,yocome/zulip,wweiradio/zulip,dwrpayne/zulip,sup95/zulip,aakash-cr7/zulip,shaunstanislaus/zulip,tiansiyuan/zulip,levixie/zulip,verma-varsha/zulip,thomasboyt/zulip,bitemyapp/zulip,LAndreas/zulip,shrikrishnaholla/zulip,hayderimran7/zulip,mohsenSy/zulip,hackerkid/zulip,KingxBanana/zulip,nicholasbs/zulip,Frouk/zulip,technicalpickles/zulip,DazWorrall/zulip,thomasboyt/zulip,ipernet/zulip,bitemyapp/zulip,jerryge/zulip,jonesgithub/zulip,peiwei/zulip,codeKonami/zulip,brockwhittaker/zulip,noroot/zulip,nicholasbs/zulip,wangdeshui/zulip,gigawhitlocks/zulip,isht3/zulip,dwrpayne/zulip,mansilladev/zulip,PhilSk/zulip,brockwhittaker/zulip,grave-w-grave/zulip,dnmfarrell/zulip,niftynei/zulip,shrikrishnaholla/zulip,littledogboy/zulip,avastu/zulip,joyhchen/zulip,aliceriot/zulip,ryanbackman/zulip,rishig/zulip,jonesgithub/zulip,kaiyuanheshang/zulip,ericzhou2008/zulip,andersk/zulip,themass/zulip,ahmadassaf/zulip,TigorC/zulip,xuxiao/zulip,vaidap/zulip,pradiptad/zulip,huangkebo/zulip,thomasboyt/zulip,arpith/zulip,itnihao/zulip,vaidap/zulip,luyifan/zulip,wavelets/zulip,dxq-git/zulip,AZtheAsian/zulip,Batterfii/zulip,Batterfii/zulip,shubhamdhama/zulip,deer-hope/zulip,yocome/zulip,codeKonami/zulip,hustlzp/zulip,armooo/zulip,aliceriot/zulip,bluesea/zulip,sonali0901/zulip,xuanhan863/zulip,proliming/zulip,RobotCaleb/zulip,zhaoweigg/zulip,peguin40/zulip,johnnygaddarr/zulip,ikasumiwt/zulip,Juanvulcano/zulip,andersk/zulip,technicalpickles/zulip,bastianh/zulip,Qgap/zulip,bastianh/zulip,SmartPeople/zulip,krtkmj/zulip,reyha/zulip,wweiradio/zulip,calvinleenyc/zulip,ikasumiwt/zulip,joshisa/zulip,rishig/zulip,atomic-labs/zulip,amyliu345/zulip,udxxabp/zulip,Frouk/zulip,thomasboyt/zulip,he15his/zulip,noroot/zulip,kokoar/zulip,jackrzhang/zulip,karamcnair/zulip,gkotian/zulip,brainwane/zulip,wweiradio/zulip,noroot/zulip,willingc/zulip,dawran6/zulip,esander91/zulip,natanovia/zulip,firstblade/zulip,kou/zulip,zorojean/zulip,joshisa/zulip,kou/zulip,KingxBanana/zulip,glovebx/zulip,showell/zulip,gkotian/zulip,TigorC/zulip,JPJPJPOPOP/zulip,yocome/zulip,LeeRisk/zulip,shrikrishnaholla/zulip,PaulPetring/zulip,krtkmj/zulip,kaiyuanheshang/zulip,dawran6/zulip,nicholasbs/zulip,adnanh/zulip,Frouk/zulip,udxxabp/zulip,grave-w-grave/zulip,firstblade/zulip,aps-sids/zulip,bssrdf/zulip,arpith/zulip,sonali0901/zulip,EasonYi/zulip,vikas-parashar/zulip,littledogboy/zulip,guiquanz/zulip,voidException/zulip,j831/zulip,mansilladev/zulip,schatt/zulip,mohsenSy/zulip,bowlofstew/zulip,praveenaki/zulip,wavelets/zulip,udxxabp/zulip,noroot/zulip,yocome/zulip,jimmy54/zulip,mohsenSy/zulip,shaunstanislaus/zulip,Qgap/zulip,ryanbackman/zulip,RobotCaleb/zulip,avastu/zulip,hackerkid/zulip,brockwhittaker/zulip,ApsOps/zulip,atomic-labs/zulip,calvinleenyc/zulip,rishig/zulip,zachallaun/zulip,proliming/zulip,hayderimran7/zulip,brainwane/zulip,littledogboy/zulip,sup95/zulip,tbutter/zulip,mahim97/zulip,bluesea/zulip,esander91/zulip,jrowan/zulip,dnmfarrell/zulip,jainayush975/zulip,voidException/zulip,mohsenSy/zulip,ahmadassaf/zulip,codeKonami/zulip,xuanhan863/zulip,zorojean/zulip,dattatreya303/zulip,shaunstanislaus/zulip,Galexrt/zulip,Drooids/zulip,DazWorrall/zulip,zofuthan/zulip,LAndreas/zulip,dxq-git/zulip,reyha/zulip,developerfm/zulip,shaunstanislaus/zulip,TigorC/zulip,verma-varsha/zulip,niftynei/zulip,saitodisse/zulip,MariaFaBella85/zulip,zulip/zulip,guiquanz/zulip,Drooids/zulip,EasonYi/zulip,calvinleenyc/zulip,he15his/zulip,praveenaki/zulip,paxapy/zulip,natanovia/zulip,ufosky-server/zulip,developerfm/zulip,Suninus/zulip,yuvipanda/zulip,samatdav/zulip,technicalpickles/zulip,shaunstanislaus/zulip,peguin40/zulip,littledogboy/zulip,MayB/zulip,arpitpanwar/zulip,praveenaki/zulip,showell/zulip,wangdeshui/zulip,hackerkid/zulip,ikasumiwt/zulip,swinghu/zulip,aps-sids/zulip,johnny9/zulip,bowlofstew/zulip,MayB/zulip,zwily/zulip,joyhchen/zulip,niftynei/zulip,ashwinirudrappa/zulip,jainayush975/zulip,joyhchen/zulip,arpitpanwar/zulip,kou/zulip,vabs22/zulip,huangkebo/zulip,zacps/zulip,zorojean/zulip,natanovia/zulip,dhcrzf/zulip,hustlzp/zulip,bssrdf/zulip,schatt/zulip,vabs22/zulip,hackerkid/zulip,easyfmxu/zulip,bssrdf/zulip,yocome/zulip,adnanh/zulip,jainayush975/zulip,zofuthan/zulip,easyfmxu/zulip,udxxabp/zulip,bssrdf/zulip,Diptanshu8/zulip,ikasumiwt/zulip,jessedhillon/zulip,zhaoweigg/zulip,akuseru/zulip,developerfm/zulip,wdaher/zulip,wangdeshui/zulip,brainwane/zulip,noroot/zulip,stamhe/zulip,jimmy54/zulip,avastu/zulip,bluesea/zulip,atomic-labs/zulip,blaze225/zulip,SmartPeople/zulip,jainayush975/zulip,mdavid/zulip,karamcnair/zulip,jonesgithub/zulip,KJin99/zulip,KJin99/zulip,hengqujushi/zulip,amallia/zulip,Vallher/zulip,ashwinirudrappa/zulip,timabbott/zulip,shubhamdhama/zulip,technicalpickles/zulip,niftynei/zulip,DazWorrall/zulip,zachallaun/zulip,dwrpayne/zulip,fw1121/zulip,avastu/zulip,wavelets/zulip,levixie/zulip,hustlzp/zulip,peiwei/zulip,mahim97/zulip,kokoar/zulip,Cheppers/zulip,jeffcao/zulip,natanovia/zulip,dhcrzf/zulip,kou/zulip,johnnygaddarr/zulip,vabs22/zulip,isht3/zulip,MayB/zulip,DazWorrall/zulip,luyifan/zulip,paxapy/zulip,qq1012803704/zulip,bowlofstew/zulip,jerryge/zulip,karamcnair/zulip,eeshangarg/zulip,amyliu345/zulip,akuseru/zulip,eastlhu/zulip,grave-w-grave/zulip,amanharitsh123/zulip,ryansnowboarder/zulip,ipernet/zulip,codeKonami/zulip,Cheppers/zulip,MariaFaBella85/zulip,timabbott/zulip,akuseru/zulip,verma-varsha/zulip,hustlzp/zulip,he15his/zulip,ufosky-server/zulip,rht/zulip,LeeRisk/zulip,Qgap/zulip,nicholasbs/zulip,MayB/zulip,bastianh/zulip,developerfm/zulip,littledogboy/zulip,jackrzhang/zulip,karamcnair/zulip,verma-varsha/zulip,dattatreya303/zulip,MariaFaBella85/zulip,jackrzhang/zulip,hustlzp/zulip,zofuthan/zulip,LeeRisk/zulip,glovebx/zulip,itnihao/zulip,brainwane/zulip,PaulPetring/zulip,Gabriel0402/zulip,wdaher/zulip,brainwane/zulip,ApsOps/zulip,timabbott/zulip,ufosky-server/zulip,hafeez3000/zulip,ericzhou2008/zulip,Diptanshu8/zulip,hafeez3000/zulip,AZtheAsian/zulip,udxxabp/zulip,DazWorrall/zulip,zacps/zulip,dxq-git/zulip,bowlofstew/zulip,eastlhu/zulip,tiansiyuan/zulip,showell/zulip,tiansiyuan/zulip,yuvipanda/zulip,cosmicAsymmetry/zulip,j831/zulip,rishig/zulip,vakila/zulip,Galexrt/zulip,Diptanshu8/zulip,jessedhillon/zulip,xuanhan863/zulip,JanzTam/zulip,zachallaun/zulip,xuxiao/zulip,arpith/zulip,mdavid/zulip,bitemyapp/zulip,arpitpanwar/zulip,andersk/zulip,avastu/zulip,ashwinirudrappa/zulip,hj3938/zulip,pradiptad/zulip,zwily/zulip,gigawhitlocks/zulip,ahmadassaf/zulip,guiquanz/zulip,KingxBanana/zulip,jphilipsen05/zulip,Qgap/zulip,mdavid/zulip,sharmaeklavya2/zulip,Diptanshu8/zulip,seapasulli/zulip,easyfmxu/zulip,themass/zulip,karamcnair/zulip,christi3k/zulip,qq1012803704/zulip,SmartPeople/zulip,pradiptad/zulip,christi3k/zulip,jimmy54/zulip,synicalsyntax/zulip,samatdav/zulip,bitemyapp/zulip,dawran6/zulip,amyliu345/zulip,vikas-parashar/zulip,jerryge/zulip,m1ssou/zulip,ericzhou2008/zulip,rht/zulip,swinghu/zulip,dhcrzf/zulip,m1ssou/zulip,armooo/zulip,souravbadami/zulip,isht3/zulip,amallia/zulip,ryansnowboarder/zulip,JPJPJPOPOP/zulip,xuanhan863/zulip,esander91/zulip,verma-varsha/zulip,tdr130/zulip,m1ssou/zulip,yuvipanda/zulip,JPJPJPOPOP/zulip,christi3k/zulip,dawran6/zulip,alliejones/zulip,JanzTam/zulip,zofuthan/zulip,krtkmj/zulip,arpith/zulip,wdaher/zulip,ryanbackman/zulip,jrowan/zulip,andersk/zulip,swinghu/zulip,peiwei/zulip,vikas-parashar/zulip,Juanvulcano/zulip,jerryge/zulip,hj3938/zulip,noroot/zulip,EasonYi/zulip,Drooids/zulip,mahim97/zulip,stamhe/zulip,seapasulli/zulip,jrowan/zulip,nicholasbs/zulip,thomasboyt/zulip,bastianh/zulip,johnny9/zulip,shrikrishnaholla/zulip,Qgap/zulip,littledogboy/zulip,showell/zulip,levixie/zulip,johnny9/zulip,deer-hope/zulip,proliming/zulip,alliejones/zulip,jessedhillon/zulip,Cheppers/zulip,thomasboyt/zulip,Juanvulcano/zulip,niftynei/zulip,zhaoweigg/zulip,dnmfarrell/zulip,bastianh/zulip,MayB/zulip,sonali0901/zulip,sup95/zulip,LeeRisk/zulip,susansls/zulip,umkay/zulip,eeshangarg/zulip,firstblade/zulip,Frouk/zulip,adnanh/zulip,gkotian/zulip,jerryge/zulip,brockwhittaker/zulip,umkay/zulip,LeeRisk/zulip,LAndreas/zulip,PaulPetring/zulip,JanzTam/zulip,moria/zulip,wavelets/zulip,jessedhillon/zulip,reyha/zulip,suxinde2009/zulip,samatdav/zulip,timabbott/zulip,developerfm/zulip,schatt/zulip,blaze225/zulip,jeffcao/zulip,dxq-git/zulip,PhilSk/zulip,tbutter/zulip,umkay/zulip,zachallaun/zulip,udxxabp/zulip,proliming/zulip,yocome/zulip,Qgap/zulip,arpith/zulip,babbage/zulip,hayderimran7/zulip,dxq-git/zulip,paxapy/zulip,jimmy54/zulip,Frouk/zulip,JanzTam/zulip,wdaher/zulip,amanharitsh123/zulip,mansilladev/zulip,zorojean/zulip,zulip/zulip,deer-hope/zulip,themass/zulip,fw1121/zulip,jeffcao/zulip,susansls/zulip,fw1121/zulip,zulip/zulip,vakila/zulip,adnanh/zulip,wweiradio/zulip,showell/zulip,shrikrishnaholla/zulip,swinghu/zulip,synicalsyntax/zulip,sharmaeklavya2/zulip,xuxiao/zulip,johnny9/zulip,willingc/zulip,rishig/zulip,zorojean/zulip,saitodisse/zulip,dotcool/zulip,levixie/zulip,adnanh/zulip,kou/zulip,so0k/zulip,Jianchun1/zulip,grave-w-grave/zulip,niftynei/zulip,susansls/zulip,aps-sids/zulip,vabs22/zulip,yuvipanda/zulip,esander91/zulip,Vallher/zulip,ryansnowboarder/zulip,jimmy54/zulip,Galexrt/zulip,qq1012803704/zulip,mdavid/zulip,stamhe/zulip,udxxabp/zulip,verma-varsha/zulip,sonali0901/zulip,hustlzp/zulip,esander91/zulip,RobotCaleb/zulip,paxapy/zulip,showell/zulip,armooo/zulip,wangdeshui/zulip,j831/zulip,saitodisse/zulip,aliceriot/zulip,glovebx/zulip,amanharitsh123/zulip,wdaher/zulip,joyhchen/zulip,peiwei/zulip,firstblade/zulip,hayderimran7/zulip,amyliu345/zulip,mansilladev/zulip,itnihao/zulip,KJin99/zulip,timabbott/zulip,ryansnowboarder/zulip,xuanhan863/zulip,Galexrt/zulip,Vallher/zulip,atomic-labs/zulip,eeshangarg/zulip,vikas-parashar/zulip,tommyip/zulip,gkotian/zulip,zofuthan/zulip,xuanhan863/zulip,dhcrzf/zulip,ufosky-server/zulip,suxinde2009/zulip,ericzhou2008/zulip,ufosky-server/zulip,Batterfii/zulip,JanzTam/zulip,bowlofstew/zulip,fw1121/zulip,aliceriot/zulip,jphilipsen05/zulip,zwily/zulip,themass/zulip,ryanbackman/zulip,bluesea/zulip,susansls/zulip,kokoar/zulip,wavelets/zulip,peiwei/zulip,vakila/zulip,Cheppers/zulip,vaidap/zulip,johnnygaddarr/zulip,zwily/zulip,shubhamdhama/zulip,jessedhillon/zulip,umkay/zulip,jackrzhang/zulip,m1ssou/zulip,bastianh/zulip,sharmaeklavya2/zulip,willingc/zulip,blaze225/zulip,punchagan/zulip,hengqujushi/zulip,KJin99/zulip,amyliu345/zulip,natanovia/zulip,gigawhitlocks/zulip,guiquanz/zulip,akuseru/zulip,dhcrzf/zulip,LeeRisk/zulip,KJin99/zulip,dwrpayne/zulip,dotcool/zulip,cosmicAsymmetry/zulip,amyliu345/zulip,Gabriel0402/zulip,ApsOps/zulip,he15his/zulip,deer-hope/zulip,zhaoweigg/zulip,huangkebo/zulip,babbage/zulip,peguin40/zulip,mohsenSy/zulip,MariaFaBella85/zulip,umkay/zulip,shrikrishnaholla/zulip,aakash-cr7/zulip,Galexrt/zulip,rht/zulip,zachallaun/zulip,pradiptad/zulip,wangdeshui/zulip,PhilSk/zulip,isht3/zulip,babbage/zulip,bluesea/zulip,tiansiyuan/zulip,levixie/zulip,so0k/zulip,souravbadami/zulip,peguin40/zulip,hustlzp/zulip,schatt/zulip,levixie/zulip,xuxiao/zulip,susansls/zulip,christi3k/zulip,tiansiyuan/zulip,so0k/zulip,zhaoweigg/zulip,seapasulli/zulip,mansilladev/zulip,aliceriot/zulip,rishig/zulip,dawran6/zulip,huangkebo/zulip,lfranchi/zulip,kaiyuanheshang/zulip,willingc/zulip,Batterfii/zulip,johnny9/zulip,ahmadassaf/zulip,technicalpickles/zulip,ufosky-server/zulip,aakash-cr7/zulip,blaze225/zulip,eastlhu/zulip,bssrdf/zulip,wavelets/zulip,bssrdf/zulip,amanharitsh123/zulip,EasonYi/zulip,souravbadami/zulip,Galexrt/zulip,jackrzhang/zulip,itnihao/zulip,aakash-cr7/zulip,synicalsyntax/zulip,Suninus/zulip,suxinde2009/zulip,lfranchi/zulip,PhilSk/zulip,suxinde2009/zulip,isht3/zulip,PaulPetring/zulip,kokoar/zulip,ashwinirudrappa/zulip,timabbott/zulip,he15his/zulip,glovebx/zulip,littledogboy/zulip,ApsOps/zulip,jphilipsen05/zulip,hj3938/zulip,jrowan/zulip,Jianchun1/zulip,luyifan/zulip,ryansnowboarder/zulip,qq1012803704/zulip,eastlhu/zulip,ahmadassaf/zulip,KingxBanana/zulip,themass/zulip,kou/zulip,wangdeshui/zulip,itnihao/zulip,aps-sids/zulip,akuseru/zulip,saitodisse/zulip,JanzTam/zulip,umkay/zulip,tbutter/zulip,Suninus/zulip,Drooids/zulip,KingxBanana/zulip,itnihao/zulip,joshisa/zulip,adnanh/zulip,proliming/zulip,TigorC/zulip,swinghu/zulip,easyfmxu/zulip,ericzhou2008/zulip,qq1012803704/zulip,vakila/zulip,zofuthan/zulip,stamhe/zulip,shubhamdhama/zulip,arpitpanwar/zulip,andersk/zulip,AZtheAsian/zulip,zacps/zulip,Juanvulcano/zulip,dwrpayne/zulip,peguin40/zulip,hayderimran7/zulip,atomic-labs/zulip,Jianchun1/zulip,ahmadassaf/zulip,dattatreya303/zulip,rishig/zulip,kaiyuanheshang/zulip,sup95/zulip,paxapy/zulip,bssrdf/zulip,hayderimran7/zulip,kaiyuanheshang/zulip,gigawhitlocks/zulip,ahmadassaf/zulip,swinghu/zulip,bitemyapp/zulip,krtkmj/zulip,sonali0901/zulip,bastianh/zulip,suxinde2009/zulip,jeffcao/zulip,dhcrzf/zulip,jeffcao/zulip,vakila/zulip,nicholasbs/zulip,calvinleenyc/zulip,LAndreas/zulip,johnnygaddarr/zulip,KJin99/zulip,rht/zulip,eeshangarg/zulip,alliejones/zulip,christi3k/zulip,karamcnair/zulip,tbutter/zulip,atomic-labs/zulip,brainwane/zulip,willingc/zulip
|
Add a management command to dump all messages on public streams for a realm.
(imported from commit f4f8bfece408b466af4db93b2da15cf69b68e0a3)
|
from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.models import Message, Realm, Stream, Recipient
import datetime
import time
class Command(BaseCommand):
default_cutoff = time.time() - 60 * 60 * 24 * 30 # 30 days.
option_list = BaseCommand.option_list + (
make_option('--domain',
dest='domain',
type='str',
help='The domain whose public streams you want to dump.'),
make_option('--since',
dest='since',
type='int',
default=default_cutoff,
help='The time in epoch since from which to start the dump.')
)
def handle(self, *args, **options):
realm = Realm.objects.get(domain=options["domain"])
streams = Stream.objects.filter(realm=realm, invite_only=False)
recipients = Recipient.objects.filter(
type=Recipient.STREAM, type_id__in=[stream.id for stream in streams])
cutoff = datetime.datetime.fromtimestamp(options["since"])
messages = Message.objects.filter(pub_date__gt=cutoff, recipient__in=recipients)
for message in messages:
print message.to_dict(False)
|
<commit_before><commit_msg>Add a management command to dump all messages on public streams for a realm.
(imported from commit f4f8bfece408b466af4db93b2da15cf69b68e0a3)<commit_after>
|
from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.models import Message, Realm, Stream, Recipient
import datetime
import time
class Command(BaseCommand):
default_cutoff = time.time() - 60 * 60 * 24 * 30 # 30 days.
option_list = BaseCommand.option_list + (
make_option('--domain',
dest='domain',
type='str',
help='The domain whose public streams you want to dump.'),
make_option('--since',
dest='since',
type='int',
default=default_cutoff,
help='The time in epoch since from which to start the dump.')
)
def handle(self, *args, **options):
realm = Realm.objects.get(domain=options["domain"])
streams = Stream.objects.filter(realm=realm, invite_only=False)
recipients = Recipient.objects.filter(
type=Recipient.STREAM, type_id__in=[stream.id for stream in streams])
cutoff = datetime.datetime.fromtimestamp(options["since"])
messages = Message.objects.filter(pub_date__gt=cutoff, recipient__in=recipients)
for message in messages:
print message.to_dict(False)
|
Add a management command to dump all messages on public streams for a realm.
(imported from commit f4f8bfece408b466af4db93b2da15cf69b68e0a3)from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.models import Message, Realm, Stream, Recipient
import datetime
import time
class Command(BaseCommand):
default_cutoff = time.time() - 60 * 60 * 24 * 30 # 30 days.
option_list = BaseCommand.option_list + (
make_option('--domain',
dest='domain',
type='str',
help='The domain whose public streams you want to dump.'),
make_option('--since',
dest='since',
type='int',
default=default_cutoff,
help='The time in epoch since from which to start the dump.')
)
def handle(self, *args, **options):
realm = Realm.objects.get(domain=options["domain"])
streams = Stream.objects.filter(realm=realm, invite_only=False)
recipients = Recipient.objects.filter(
type=Recipient.STREAM, type_id__in=[stream.id for stream in streams])
cutoff = datetime.datetime.fromtimestamp(options["since"])
messages = Message.objects.filter(pub_date__gt=cutoff, recipient__in=recipients)
for message in messages:
print message.to_dict(False)
|
<commit_before><commit_msg>Add a management command to dump all messages on public streams for a realm.
(imported from commit f4f8bfece408b466af4db93b2da15cf69b68e0a3)<commit_after>from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.models import Message, Realm, Stream, Recipient
import datetime
import time
class Command(BaseCommand):
default_cutoff = time.time() - 60 * 60 * 24 * 30 # 30 days.
option_list = BaseCommand.option_list + (
make_option('--domain',
dest='domain',
type='str',
help='The domain whose public streams you want to dump.'),
make_option('--since',
dest='since',
type='int',
default=default_cutoff,
help='The time in epoch since from which to start the dump.')
)
def handle(self, *args, **options):
realm = Realm.objects.get(domain=options["domain"])
streams = Stream.objects.filter(realm=realm, invite_only=False)
recipients = Recipient.objects.filter(
type=Recipient.STREAM, type_id__in=[stream.id for stream in streams])
cutoff = datetime.datetime.fromtimestamp(options["since"])
messages = Message.objects.filter(pub_date__gt=cutoff, recipient__in=recipients)
for message in messages:
print message.to_dict(False)
|
|
15408424fc6c2e8a10a2739427cbdfd51867b49a
|
setup.py
|
setup.py
|
from setuptools import setup
import re
import upsidedown
VERSION = str(upsidedown.__version__)
(AUTHOR, EMAIL) = re.match('^(.*?)\s*<(.*)>$', upsidedown.__author__).groups()
URL = upsidedown.__url__
LICENSE = upsidedown.__license__
setup(name='upsidedown',
version=VERSION,
author=AUTHOR,
author_email=EMAIL,
description='"Flip" characters in a string to create an "upside-down" impression.',
long_description=open('README').read().decode('utf8'),
url=URL,
download_url='http://github.com/cburgmer/upsidedown/downloads',
py_modules=['upsidedown'],
entry_points={
'console_scripts': [
'upsidedown = upsidedown:main',
],
},
license=LICENSE,
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Topic :: Text Processing',
])
|
import re
import codecs
from setuptools import setup
import upsidedown
VERSION = str(upsidedown.__version__)
(AUTHOR, EMAIL) = re.match('^(.*?)\s*<(.*)>$', upsidedown.__author__).groups()
URL = upsidedown.__url__
LICENSE = upsidedown.__license__
with codecs.open('README', encoding='utf-8') as readme:
long_description = readme.read()
setup(name='upsidedown',
version=VERSION,
author=AUTHOR,
author_email=EMAIL,
description='"Flip" characters in a string to create an "upside-down" impression.',
long_description=long_description,
url=URL,
download_url='http://github.com/cburgmer/upsidedown/downloads',
py_modules=['upsidedown'],
entry_points={
'console_scripts': [
'upsidedown = upsidedown:main',
],
},
license=LICENSE,
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Topic :: Text Processing',
])
|
Use codecs to open the readme
|
Use codecs to open the readme
|
Python
|
mit
|
jaraco/upsidedown,cburgmer/upsidedown
|
from setuptools import setup
import re
import upsidedown
VERSION = str(upsidedown.__version__)
(AUTHOR, EMAIL) = re.match('^(.*?)\s*<(.*)>$', upsidedown.__author__).groups()
URL = upsidedown.__url__
LICENSE = upsidedown.__license__
setup(name='upsidedown',
version=VERSION,
author=AUTHOR,
author_email=EMAIL,
description='"Flip" characters in a string to create an "upside-down" impression.',
long_description=open('README').read().decode('utf8'),
url=URL,
download_url='http://github.com/cburgmer/upsidedown/downloads',
py_modules=['upsidedown'],
entry_points={
'console_scripts': [
'upsidedown = upsidedown:main',
],
},
license=LICENSE,
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Topic :: Text Processing',
])
Use codecs to open the readme
|
import re
import codecs
from setuptools import setup
import upsidedown
VERSION = str(upsidedown.__version__)
(AUTHOR, EMAIL) = re.match('^(.*?)\s*<(.*)>$', upsidedown.__author__).groups()
URL = upsidedown.__url__
LICENSE = upsidedown.__license__
with codecs.open('README', encoding='utf-8') as readme:
long_description = readme.read()
setup(name='upsidedown',
version=VERSION,
author=AUTHOR,
author_email=EMAIL,
description='"Flip" characters in a string to create an "upside-down" impression.',
long_description=long_description,
url=URL,
download_url='http://github.com/cburgmer/upsidedown/downloads',
py_modules=['upsidedown'],
entry_points={
'console_scripts': [
'upsidedown = upsidedown:main',
],
},
license=LICENSE,
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Topic :: Text Processing',
])
|
<commit_before>from setuptools import setup
import re
import upsidedown
VERSION = str(upsidedown.__version__)
(AUTHOR, EMAIL) = re.match('^(.*?)\s*<(.*)>$', upsidedown.__author__).groups()
URL = upsidedown.__url__
LICENSE = upsidedown.__license__
setup(name='upsidedown',
version=VERSION,
author=AUTHOR,
author_email=EMAIL,
description='"Flip" characters in a string to create an "upside-down" impression.',
long_description=open('README').read().decode('utf8'),
url=URL,
download_url='http://github.com/cburgmer/upsidedown/downloads',
py_modules=['upsidedown'],
entry_points={
'console_scripts': [
'upsidedown = upsidedown:main',
],
},
license=LICENSE,
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Topic :: Text Processing',
])
<commit_msg>Use codecs to open the readme<commit_after>
|
import re
import codecs
from setuptools import setup
import upsidedown
VERSION = str(upsidedown.__version__)
(AUTHOR, EMAIL) = re.match('^(.*?)\s*<(.*)>$', upsidedown.__author__).groups()
URL = upsidedown.__url__
LICENSE = upsidedown.__license__
with codecs.open('README', encoding='utf-8') as readme:
long_description = readme.read()
setup(name='upsidedown',
version=VERSION,
author=AUTHOR,
author_email=EMAIL,
description='"Flip" characters in a string to create an "upside-down" impression.',
long_description=long_description,
url=URL,
download_url='http://github.com/cburgmer/upsidedown/downloads',
py_modules=['upsidedown'],
entry_points={
'console_scripts': [
'upsidedown = upsidedown:main',
],
},
license=LICENSE,
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Topic :: Text Processing',
])
|
from setuptools import setup
import re
import upsidedown
VERSION = str(upsidedown.__version__)
(AUTHOR, EMAIL) = re.match('^(.*?)\s*<(.*)>$', upsidedown.__author__).groups()
URL = upsidedown.__url__
LICENSE = upsidedown.__license__
setup(name='upsidedown',
version=VERSION,
author=AUTHOR,
author_email=EMAIL,
description='"Flip" characters in a string to create an "upside-down" impression.',
long_description=open('README').read().decode('utf8'),
url=URL,
download_url='http://github.com/cburgmer/upsidedown/downloads',
py_modules=['upsidedown'],
entry_points={
'console_scripts': [
'upsidedown = upsidedown:main',
],
},
license=LICENSE,
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Topic :: Text Processing',
])
Use codecs to open the readmeimport re
import codecs
from setuptools import setup
import upsidedown
VERSION = str(upsidedown.__version__)
(AUTHOR, EMAIL) = re.match('^(.*?)\s*<(.*)>$', upsidedown.__author__).groups()
URL = upsidedown.__url__
LICENSE = upsidedown.__license__
with codecs.open('README', encoding='utf-8') as readme:
long_description = readme.read()
setup(name='upsidedown',
version=VERSION,
author=AUTHOR,
author_email=EMAIL,
description='"Flip" characters in a string to create an "upside-down" impression.',
long_description=long_description,
url=URL,
download_url='http://github.com/cburgmer/upsidedown/downloads',
py_modules=['upsidedown'],
entry_points={
'console_scripts': [
'upsidedown = upsidedown:main',
],
},
license=LICENSE,
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Topic :: Text Processing',
])
|
<commit_before>from setuptools import setup
import re
import upsidedown
VERSION = str(upsidedown.__version__)
(AUTHOR, EMAIL) = re.match('^(.*?)\s*<(.*)>$', upsidedown.__author__).groups()
URL = upsidedown.__url__
LICENSE = upsidedown.__license__
setup(name='upsidedown',
version=VERSION,
author=AUTHOR,
author_email=EMAIL,
description='"Flip" characters in a string to create an "upside-down" impression.',
long_description=open('README').read().decode('utf8'),
url=URL,
download_url='http://github.com/cburgmer/upsidedown/downloads',
py_modules=['upsidedown'],
entry_points={
'console_scripts': [
'upsidedown = upsidedown:main',
],
},
license=LICENSE,
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Topic :: Text Processing',
])
<commit_msg>Use codecs to open the readme<commit_after>import re
import codecs
from setuptools import setup
import upsidedown
VERSION = str(upsidedown.__version__)
(AUTHOR, EMAIL) = re.match('^(.*?)\s*<(.*)>$', upsidedown.__author__).groups()
URL = upsidedown.__url__
LICENSE = upsidedown.__license__
with codecs.open('README', encoding='utf-8') as readme:
long_description = readme.read()
setup(name='upsidedown',
version=VERSION,
author=AUTHOR,
author_email=EMAIL,
description='"Flip" characters in a string to create an "upside-down" impression.',
long_description=long_description,
url=URL,
download_url='http://github.com/cburgmer/upsidedown/downloads',
py_modules=['upsidedown'],
entry_points={
'console_scripts': [
'upsidedown = upsidedown:main',
],
},
license=LICENSE,
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Topic :: Text Processing',
])
|
afbaa30e4bf96be16ee8728c29abbbdcbe33d7b0
|
contrib_bots/bots/helloworld/test_helloworld.py
|
contrib_bots/bots/helloworld/test_helloworld.py
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestHelloWorldBot(BotTestCase):
bot_name = "helloworld"
def test_bot(self):
self.assert_bot_output(
{'content': "foo", 'type': "private", 'sender_email': "foo"},
"beep boop"
)
self.assert_bot_output(
{'content': "Hi, my name is abc", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"beep boop"
)
self.assert_bot_output(
{'content': "", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"beep boop"
)
|
Add tests for helloworld bot in contrib_bots.
|
testsuite: Add tests for helloworld bot in contrib_bots.
Add test file 'test_helloworld.py'.
|
Python
|
apache-2.0
|
jrowan/zulip,timabbott/zulip,shubhamdhama/zulip,jackrzhang/zulip,rishig/zulip,zulip/zulip,amanharitsh123/zulip,shubhamdhama/zulip,verma-varsha/zulip,punchagan/zulip,timabbott/zulip,vabs22/zulip,zulip/zulip,hackerkid/zulip,synicalsyntax/zulip,mahim97/zulip,mahim97/zulip,punchagan/zulip,zulip/zulip,jackrzhang/zulip,brainwane/zulip,amanharitsh123/zulip,rishig/zulip,tommyip/zulip,timabbott/zulip,amanharitsh123/zulip,eeshangarg/zulip,kou/zulip,hackerkid/zulip,eeshangarg/zulip,zulip/zulip,brockwhittaker/zulip,rht/zulip,eeshangarg/zulip,kou/zulip,jrowan/zulip,dhcrzf/zulip,shubhamdhama/zulip,brainwane/zulip,shubhamdhama/zulip,Galexrt/zulip,tommyip/zulip,tommyip/zulip,tommyip/zulip,jackrzhang/zulip,jrowan/zulip,vaidap/zulip,kou/zulip,vabs22/zulip,amanharitsh123/zulip,andersk/zulip,dhcrzf/zulip,punchagan/zulip,Galexrt/zulip,verma-varsha/zulip,dhcrzf/zulip,amanharitsh123/zulip,dhcrzf/zulip,showell/zulip,kou/zulip,jackrzhang/zulip,timabbott/zulip,jrowan/zulip,tommyip/zulip,Galexrt/zulip,mahim97/zulip,zulip/zulip,kou/zulip,zulip/zulip,brainwane/zulip,brainwane/zulip,verma-varsha/zulip,andersk/zulip,brockwhittaker/zulip,Galexrt/zulip,mahim97/zulip,showell/zulip,showell/zulip,vaidap/zulip,brainwane/zulip,andersk/zulip,vaidap/zulip,Galexrt/zulip,vabs22/zulip,jrowan/zulip,tommyip/zulip,hackerkid/zulip,dhcrzf/zulip,andersk/zulip,kou/zulip,punchagan/zulip,jackrzhang/zulip,rishig/zulip,timabbott/zulip,shubhamdhama/zulip,brockwhittaker/zulip,synicalsyntax/zulip,vaidap/zulip,andersk/zulip,verma-varsha/zulip,dhcrzf/zulip,hackerkid/zulip,andersk/zulip,Galexrt/zulip,hackerkid/zulip,timabbott/zulip,rht/zulip,hackerkid/zulip,tommyip/zulip,rht/zulip,rishig/zulip,rht/zulip,verma-varsha/zulip,rht/zulip,amanharitsh123/zulip,showell/zulip,jackrzhang/zulip,vabs22/zulip,Galexrt/zulip,brockwhittaker/zulip,showell/zulip,eeshangarg/zulip,brockwhittaker/zulip,andersk/zulip,mahim97/zulip,synicalsyntax/zulip,rishig/zulip,rishig/zulip,brockwhittaker/zulip,shubhamdhama/zulip,showell/zulip,vabs22/zulip,kou/zulip,hackerkid/zulip,synicalsyntax/zulip,vabs22/zulip,verma-varsha/zulip,rht/zulip,zulip/zulip,jrowan/zulip,punchagan/zulip,synicalsyntax/zulip,punchagan/zulip,brainwane/zulip,timabbott/zulip,mahim97/zulip,showell/zulip,synicalsyntax/zulip,eeshangarg/zulip,rishig/zulip,eeshangarg/zulip,dhcrzf/zulip,brainwane/zulip,vaidap/zulip,shubhamdhama/zulip,eeshangarg/zulip,punchagan/zulip,jackrzhang/zulip,synicalsyntax/zulip,vaidap/zulip,rht/zulip
|
testsuite: Add tests for helloworld bot in contrib_bots.
Add test file 'test_helloworld.py'.
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestHelloWorldBot(BotTestCase):
bot_name = "helloworld"
def test_bot(self):
self.assert_bot_output(
{'content': "foo", 'type': "private", 'sender_email': "foo"},
"beep boop"
)
self.assert_bot_output(
{'content': "Hi, my name is abc", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"beep boop"
)
self.assert_bot_output(
{'content': "", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"beep boop"
)
|
<commit_before><commit_msg>testsuite: Add tests for helloworld bot in contrib_bots.
Add test file 'test_helloworld.py'.<commit_after>
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestHelloWorldBot(BotTestCase):
bot_name = "helloworld"
def test_bot(self):
self.assert_bot_output(
{'content': "foo", 'type': "private", 'sender_email': "foo"},
"beep boop"
)
self.assert_bot_output(
{'content': "Hi, my name is abc", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"beep boop"
)
self.assert_bot_output(
{'content': "", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"beep boop"
)
|
testsuite: Add tests for helloworld bot in contrib_bots.
Add test file 'test_helloworld.py'.#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestHelloWorldBot(BotTestCase):
bot_name = "helloworld"
def test_bot(self):
self.assert_bot_output(
{'content': "foo", 'type': "private", 'sender_email': "foo"},
"beep boop"
)
self.assert_bot_output(
{'content': "Hi, my name is abc", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"beep boop"
)
self.assert_bot_output(
{'content': "", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"beep boop"
)
|
<commit_before><commit_msg>testsuite: Add tests for helloworld bot in contrib_bots.
Add test file 'test_helloworld.py'.<commit_after>#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestHelloWorldBot(BotTestCase):
bot_name = "helloworld"
def test_bot(self):
self.assert_bot_output(
{'content': "foo", 'type': "private", 'sender_email': "foo"},
"beep boop"
)
self.assert_bot_output(
{'content': "Hi, my name is abc", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"beep boop"
)
self.assert_bot_output(
{'content': "", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"beep boop"
)
|
|
e88b03898a1aef8bdb4a99e18e40cb20c8f77ba2
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='regulations-configs',
version='0.1.0',
description='CFPB-specific configuration for the eRegulations parser',
author='CFPB',
author_email='tech@cfpb.gov',
packages=find_packages(),
)
|
Make it a Python package
|
Make it a Python package
|
Python
|
cc0-1.0
|
ascott1/regulations-configs,grapesmoker/regulations-configs,cfpb/regulations-configs,willbarton/regulations-configs
|
Make it a Python package
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='regulations-configs',
version='0.1.0',
description='CFPB-specific configuration for the eRegulations parser',
author='CFPB',
author_email='tech@cfpb.gov',
packages=find_packages(),
)
|
<commit_before><commit_msg>Make it a Python package<commit_after>
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='regulations-configs',
version='0.1.0',
description='CFPB-specific configuration for the eRegulations parser',
author='CFPB',
author_email='tech@cfpb.gov',
packages=find_packages(),
)
|
Make it a Python package#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='regulations-configs',
version='0.1.0',
description='CFPB-specific configuration for the eRegulations parser',
author='CFPB',
author_email='tech@cfpb.gov',
packages=find_packages(),
)
|
<commit_before><commit_msg>Make it a Python package<commit_after>#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='regulations-configs',
version='0.1.0',
description='CFPB-specific configuration for the eRegulations parser',
author='CFPB',
author_email='tech@cfpb.gov',
packages=find_packages(),
)
|
|
8a7e88f95d14c2d24f505113162543ebb45c9cbf
|
tests/write_cb_bogus_test.py
|
tests/write_cb_bogus_test.py
|
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import os.path
import pycurl
import sys
import unittest
class WriteAbortTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
def tearDown(self):
self.curl.close()
def write_cb_returning_string(self, data):
return 'foo'
def write_cb_returning_float(self, data):
return 0.5
def test_write_cb_returning_string(self):
self.check(self.write_cb_returning_string)
def test_write_cb_returning_float(self):
self.check(self.write_cb_returning_float)
def check(self, write_cb):
# download the script itself through the file:// protocol into write_cb
c = pycurl.Curl()
self.curl.setopt(pycurl.URL, 'file://' + os.path.abspath(sys.argv[0]))
self.curl.setopt(pycurl.WRITEFUNCTION, write_cb)
try:
self.curl.perform()
except pycurl.error, (err, msg):
# we expect pycurl.E_WRITE_ERROR as the response
assert pycurl.E_WRITE_ERROR == err
# actual error
assert hasattr(sys, 'last_type')
self.assertEqual(pycurl.error, sys.last_type)
assert hasattr(sys, 'last_value')
self.assertEqual('write callback must return int or None', str(sys.last_value))
|
Check that bogus return values from write callback are correctly handled (still)
|
Check that bogus return values from write callback are correctly handled (still)
|
Python
|
lgpl-2.1
|
p/pycurl-archived,p/pycurl-archived,pycurl/pycurl,pycurl/pycurl,pycurl/pycurl,p/pycurl-archived
|
Check that bogus return values from write callback are correctly handled (still)
|
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import os.path
import pycurl
import sys
import unittest
class WriteAbortTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
def tearDown(self):
self.curl.close()
def write_cb_returning_string(self, data):
return 'foo'
def write_cb_returning_float(self, data):
return 0.5
def test_write_cb_returning_string(self):
self.check(self.write_cb_returning_string)
def test_write_cb_returning_float(self):
self.check(self.write_cb_returning_float)
def check(self, write_cb):
# download the script itself through the file:// protocol into write_cb
c = pycurl.Curl()
self.curl.setopt(pycurl.URL, 'file://' + os.path.abspath(sys.argv[0]))
self.curl.setopt(pycurl.WRITEFUNCTION, write_cb)
try:
self.curl.perform()
except pycurl.error, (err, msg):
# we expect pycurl.E_WRITE_ERROR as the response
assert pycurl.E_WRITE_ERROR == err
# actual error
assert hasattr(sys, 'last_type')
self.assertEqual(pycurl.error, sys.last_type)
assert hasattr(sys, 'last_value')
self.assertEqual('write callback must return int or None', str(sys.last_value))
|
<commit_before><commit_msg>Check that bogus return values from write callback are correctly handled (still)<commit_after>
|
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import os.path
import pycurl
import sys
import unittest
class WriteAbortTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
def tearDown(self):
self.curl.close()
def write_cb_returning_string(self, data):
return 'foo'
def write_cb_returning_float(self, data):
return 0.5
def test_write_cb_returning_string(self):
self.check(self.write_cb_returning_string)
def test_write_cb_returning_float(self):
self.check(self.write_cb_returning_float)
def check(self, write_cb):
# download the script itself through the file:// protocol into write_cb
c = pycurl.Curl()
self.curl.setopt(pycurl.URL, 'file://' + os.path.abspath(sys.argv[0]))
self.curl.setopt(pycurl.WRITEFUNCTION, write_cb)
try:
self.curl.perform()
except pycurl.error, (err, msg):
# we expect pycurl.E_WRITE_ERROR as the response
assert pycurl.E_WRITE_ERROR == err
# actual error
assert hasattr(sys, 'last_type')
self.assertEqual(pycurl.error, sys.last_type)
assert hasattr(sys, 'last_value')
self.assertEqual('write callback must return int or None', str(sys.last_value))
|
Check that bogus return values from write callback are correctly handled (still)#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import os.path
import pycurl
import sys
import unittest
class WriteAbortTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
def tearDown(self):
self.curl.close()
def write_cb_returning_string(self, data):
return 'foo'
def write_cb_returning_float(self, data):
return 0.5
def test_write_cb_returning_string(self):
self.check(self.write_cb_returning_string)
def test_write_cb_returning_float(self):
self.check(self.write_cb_returning_float)
def check(self, write_cb):
# download the script itself through the file:// protocol into write_cb
c = pycurl.Curl()
self.curl.setopt(pycurl.URL, 'file://' + os.path.abspath(sys.argv[0]))
self.curl.setopt(pycurl.WRITEFUNCTION, write_cb)
try:
self.curl.perform()
except pycurl.error, (err, msg):
# we expect pycurl.E_WRITE_ERROR as the response
assert pycurl.E_WRITE_ERROR == err
# actual error
assert hasattr(sys, 'last_type')
self.assertEqual(pycurl.error, sys.last_type)
assert hasattr(sys, 'last_value')
self.assertEqual('write callback must return int or None', str(sys.last_value))
|
<commit_before><commit_msg>Check that bogus return values from write callback are correctly handled (still)<commit_after>#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import os.path
import pycurl
import sys
import unittest
class WriteAbortTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
def tearDown(self):
self.curl.close()
def write_cb_returning_string(self, data):
return 'foo'
def write_cb_returning_float(self, data):
return 0.5
def test_write_cb_returning_string(self):
self.check(self.write_cb_returning_string)
def test_write_cb_returning_float(self):
self.check(self.write_cb_returning_float)
def check(self, write_cb):
# download the script itself through the file:// protocol into write_cb
c = pycurl.Curl()
self.curl.setopt(pycurl.URL, 'file://' + os.path.abspath(sys.argv[0]))
self.curl.setopt(pycurl.WRITEFUNCTION, write_cb)
try:
self.curl.perform()
except pycurl.error, (err, msg):
# we expect pycurl.E_WRITE_ERROR as the response
assert pycurl.E_WRITE_ERROR == err
# actual error
assert hasattr(sys, 'last_type')
self.assertEqual(pycurl.error, sys.last_type)
assert hasattr(sys, 'last_value')
self.assertEqual('write callback must return int or None', str(sys.last_value))
|
|
e2b44e953d959ef3281de7e90a10a5bc5900efca
|
setup.py
|
setup.py
|
#!/usr/bin/env python2.7
import glob, re, subprocess, sys
from distutils.core import setup
def run_setup():
try:
version = subprocess.check_output(['git', 'describe', '--tags'], stderr=open('/dev/null', 'w')).strip()
except:
print 'cannot determine version: no tags detected'
sys.exit(1)
setup(
author='Max Kalika',
author_email='max@topsy.com',
name='snmpy',
version=re.search(r'(?P<version>[0-9]+(?:\.[0-9]*)*)$', version).group('version'),
scripts=['snmpy.py'],
py_modules=[mod[:-3] for mod in glob.glob('snmpy/*.py')],
data_files=['snmpy.cfg.example']
)
if __name__ == '__main__':
run_setup()
|
Add basic package management support.
|
Add basic package management support.
|
Python
|
mit
|
mk23/snmpy,mk23/snmpy
|
Add basic package management support.
|
#!/usr/bin/env python2.7
import glob, re, subprocess, sys
from distutils.core import setup
def run_setup():
try:
version = subprocess.check_output(['git', 'describe', '--tags'], stderr=open('/dev/null', 'w')).strip()
except:
print 'cannot determine version: no tags detected'
sys.exit(1)
setup(
author='Max Kalika',
author_email='max@topsy.com',
name='snmpy',
version=re.search(r'(?P<version>[0-9]+(?:\.[0-9]*)*)$', version).group('version'),
scripts=['snmpy.py'],
py_modules=[mod[:-3] for mod in glob.glob('snmpy/*.py')],
data_files=['snmpy.cfg.example']
)
if __name__ == '__main__':
run_setup()
|
<commit_before><commit_msg>Add basic package management support.<commit_after>
|
#!/usr/bin/env python2.7
import glob, re, subprocess, sys
from distutils.core import setup
def run_setup():
try:
version = subprocess.check_output(['git', 'describe', '--tags'], stderr=open('/dev/null', 'w')).strip()
except:
print 'cannot determine version: no tags detected'
sys.exit(1)
setup(
author='Max Kalika',
author_email='max@topsy.com',
name='snmpy',
version=re.search(r'(?P<version>[0-9]+(?:\.[0-9]*)*)$', version).group('version'),
scripts=['snmpy.py'],
py_modules=[mod[:-3] for mod in glob.glob('snmpy/*.py')],
data_files=['snmpy.cfg.example']
)
if __name__ == '__main__':
run_setup()
|
Add basic package management support.#!/usr/bin/env python2.7
import glob, re, subprocess, sys
from distutils.core import setup
def run_setup():
try:
version = subprocess.check_output(['git', 'describe', '--tags'], stderr=open('/dev/null', 'w')).strip()
except:
print 'cannot determine version: no tags detected'
sys.exit(1)
setup(
author='Max Kalika',
author_email='max@topsy.com',
name='snmpy',
version=re.search(r'(?P<version>[0-9]+(?:\.[0-9]*)*)$', version).group('version'),
scripts=['snmpy.py'],
py_modules=[mod[:-3] for mod in glob.glob('snmpy/*.py')],
data_files=['snmpy.cfg.example']
)
if __name__ == '__main__':
run_setup()
|
<commit_before><commit_msg>Add basic package management support.<commit_after>#!/usr/bin/env python2.7
import glob, re, subprocess, sys
from distutils.core import setup
def run_setup():
try:
version = subprocess.check_output(['git', 'describe', '--tags'], stderr=open('/dev/null', 'w')).strip()
except:
print 'cannot determine version: no tags detected'
sys.exit(1)
setup(
author='Max Kalika',
author_email='max@topsy.com',
name='snmpy',
version=re.search(r'(?P<version>[0-9]+(?:\.[0-9]*)*)$', version).group('version'),
scripts=['snmpy.py'],
py_modules=[mod[:-3] for mod in glob.glob('snmpy/*.py')],
data_files=['snmpy.cfg.example']
)
if __name__ == '__main__':
run_setup()
|
|
6f46fe06a4d4666cd518f5db5ae54924e317578f
|
sremailer.py
|
sremailer.py
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import bottle
import stoneridge
@bottle.post('/email')
def email():
r = bottle.request
to = r.forms.get('to')
subject = r.forms.get('subject')
msg = r.forms.get('message')
stoneridge.sendmail(to, subject, msg)
def daemon():
stoneridge.StreamLogger.bottle_inject()
bottle.run('0.0.0.0', port=2255)
@stoneridge.main
def main():
parser = stoneridge.DaemonArgumentParser()
parser.parse_args()
parser.start_daemon(daemon)
|
Add web form submit emailer
|
Add web form submit emailer
|
Python
|
mpl-2.0
|
mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge
|
Add web form submit emailer
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import bottle
import stoneridge
@bottle.post('/email')
def email():
r = bottle.request
to = r.forms.get('to')
subject = r.forms.get('subject')
msg = r.forms.get('message')
stoneridge.sendmail(to, subject, msg)
def daemon():
stoneridge.StreamLogger.bottle_inject()
bottle.run('0.0.0.0', port=2255)
@stoneridge.main
def main():
parser = stoneridge.DaemonArgumentParser()
parser.parse_args()
parser.start_daemon(daemon)
|
<commit_before><commit_msg>Add web form submit emailer<commit_after>
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import bottle
import stoneridge
@bottle.post('/email')
def email():
r = bottle.request
to = r.forms.get('to')
subject = r.forms.get('subject')
msg = r.forms.get('message')
stoneridge.sendmail(to, subject, msg)
def daemon():
stoneridge.StreamLogger.bottle_inject()
bottle.run('0.0.0.0', port=2255)
@stoneridge.main
def main():
parser = stoneridge.DaemonArgumentParser()
parser.parse_args()
parser.start_daemon(daemon)
|
Add web form submit emailer#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import bottle
import stoneridge
@bottle.post('/email')
def email():
r = bottle.request
to = r.forms.get('to')
subject = r.forms.get('subject')
msg = r.forms.get('message')
stoneridge.sendmail(to, subject, msg)
def daemon():
stoneridge.StreamLogger.bottle_inject()
bottle.run('0.0.0.0', port=2255)
@stoneridge.main
def main():
parser = stoneridge.DaemonArgumentParser()
parser.parse_args()
parser.start_daemon(daemon)
|
<commit_before><commit_msg>Add web form submit emailer<commit_after>#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import bottle
import stoneridge
@bottle.post('/email')
def email():
r = bottle.request
to = r.forms.get('to')
subject = r.forms.get('subject')
msg = r.forms.get('message')
stoneridge.sendmail(to, subject, msg)
def daemon():
stoneridge.StreamLogger.bottle_inject()
bottle.run('0.0.0.0', port=2255)
@stoneridge.main
def main():
parser = stoneridge.DaemonArgumentParser()
parser.parse_args()
parser.start_daemon(daemon)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.