commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6a0efd80ab081bc0056bc7ec346320f03fdcc503
|
migrations/versions/0071_add_job_error_state.py
|
migrations/versions/0071_add_job_error_state.py
|
"""empty message
Revision ID: 0071_add_job_error_state
Revises: 0070_fix_notify_user_email
Create Date: 2017-03-10 16:15:22.153948
"""
# revision identifiers, used by Alembic.
revision = '0071_add_job_error_state'
down_revision = '0070_fix_notify_user_email'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute("INSERT INTO JOB_STATUS VALUES('error')")
def downgrade():
op.execute("DELETE FROM JOB_STATUS WHERE name = 'error'")
|
Add a new status to the job statuses to handle errors.
|
Add a new status to the job statuses to handle errors.
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add a new status to the job statuses to handle errors.
|
"""empty message
Revision ID: 0071_add_job_error_state
Revises: 0070_fix_notify_user_email
Create Date: 2017-03-10 16:15:22.153948
"""
# revision identifiers, used by Alembic.
revision = '0071_add_job_error_state'
down_revision = '0070_fix_notify_user_email'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute("INSERT INTO JOB_STATUS VALUES('error')")
def downgrade():
op.execute("DELETE FROM JOB_STATUS WHERE name = 'error'")
|
<commit_before><commit_msg>Add a new status to the job statuses to handle errors.<commit_after>
|
"""empty message
Revision ID: 0071_add_job_error_state
Revises: 0070_fix_notify_user_email
Create Date: 2017-03-10 16:15:22.153948
"""
# revision identifiers, used by Alembic.
revision = '0071_add_job_error_state'
down_revision = '0070_fix_notify_user_email'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute("INSERT INTO JOB_STATUS VALUES('error')")
def downgrade():
op.execute("DELETE FROM JOB_STATUS WHERE name = 'error'")
|
Add a new status to the job statuses to handle errors."""empty message
Revision ID: 0071_add_job_error_state
Revises: 0070_fix_notify_user_email
Create Date: 2017-03-10 16:15:22.153948
"""
# revision identifiers, used by Alembic.
revision = '0071_add_job_error_state'
down_revision = '0070_fix_notify_user_email'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute("INSERT INTO JOB_STATUS VALUES('error')")
def downgrade():
op.execute("DELETE FROM JOB_STATUS WHERE name = 'error'")
|
<commit_before><commit_msg>Add a new status to the job statuses to handle errors.<commit_after>"""empty message
Revision ID: 0071_add_job_error_state
Revises: 0070_fix_notify_user_email
Create Date: 2017-03-10 16:15:22.153948
"""
# revision identifiers, used by Alembic.
revision = '0071_add_job_error_state'
down_revision = '0070_fix_notify_user_email'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute("INSERT INTO JOB_STATUS VALUES('error')")
def downgrade():
op.execute("DELETE FROM JOB_STATUS WHERE name = 'error'")
|
|
4117649358d34bc6f18868d19f1e0e1f4b38f699
|
sara_flexbe_states/src/sara_flexbe_states/TF_transform.py
|
sara_flexbe_states/src/sara_flexbe_states/TF_transform.py
|
#!/usr/bin/env python
import rospy
import tf
class TF_transformation:
"""
Transformation from a reference to another
--in_ref frame_id first reference
--out_ref frame_id second reference
># in_pos Point point in in_pos
<= done Did all the transformation
<= fail Failed to transform
"""
def __init__(self,in_ref,out_ref):
'''
Constructor
'''
super(TF_transformation,self).__init__(outcomes=['done','fail'], input_keys=['in_pos'], output_keys=['out_pos'])
self.listener = tf.TransformListener()
self.in_ref=in_ref
self.out_ref=out_ref
def execute(self, userdata):
point = geometry_msgs.msg.PointStamped()
point.header.frame_id = self.in_ref
point.point = userdata.in_pos
self.listener.waitForTransform("map", self.out_ref, rospy.Time(0), rospy.Duration(1))
print("Frame : map")
print(" point : "+str(point.point))
try:
point = self.listener.transformPoint(self.out_ref, point)
userdata.out_pos = point.point
return 'done'
except:
return 'fail'
|
Add State to transform between references
|
Add State to transform between references
|
Python
|
bsd-3-clause
|
WalkingMachine/sara_behaviors,WalkingMachine/sara_behaviors
|
Add State to transform between references
|
#!/usr/bin/env python
import rospy
import tf
class TF_transformation:
"""
Transformation from a reference to another
--in_ref frame_id first reference
--out_ref frame_id second reference
># in_pos Point point in in_pos
<= done Did all the transformation
<= fail Failed to transform
"""
def __init__(self,in_ref,out_ref):
'''
Constructor
'''
super(TF_transformation,self).__init__(outcomes=['done','fail'], input_keys=['in_pos'], output_keys=['out_pos'])
self.listener = tf.TransformListener()
self.in_ref=in_ref
self.out_ref=out_ref
def execute(self, userdata):
point = geometry_msgs.msg.PointStamped()
point.header.frame_id = self.in_ref
point.point = userdata.in_pos
self.listener.waitForTransform("map", self.out_ref, rospy.Time(0), rospy.Duration(1))
print("Frame : map")
print(" point : "+str(point.point))
try:
point = self.listener.transformPoint(self.out_ref, point)
userdata.out_pos = point.point
return 'done'
except:
return 'fail'
|
<commit_before><commit_msg>Add State to transform between references<commit_after>
|
#!/usr/bin/env python
import rospy
import tf
class TF_transformation:
"""
Transformation from a reference to another
--in_ref frame_id first reference
--out_ref frame_id second reference
># in_pos Point point in in_pos
<= done Did all the transformation
<= fail Failed to transform
"""
def __init__(self,in_ref,out_ref):
'''
Constructor
'''
super(TF_transformation,self).__init__(outcomes=['done','fail'], input_keys=['in_pos'], output_keys=['out_pos'])
self.listener = tf.TransformListener()
self.in_ref=in_ref
self.out_ref=out_ref
def execute(self, userdata):
point = geometry_msgs.msg.PointStamped()
point.header.frame_id = self.in_ref
point.point = userdata.in_pos
self.listener.waitForTransform("map", self.out_ref, rospy.Time(0), rospy.Duration(1))
print("Frame : map")
print(" point : "+str(point.point))
try:
point = self.listener.transformPoint(self.out_ref, point)
userdata.out_pos = point.point
return 'done'
except:
return 'fail'
|
Add State to transform between references#!/usr/bin/env python
import rospy
import tf
class TF_transformation:
"""
Transformation from a reference to another
--in_ref frame_id first reference
--out_ref frame_id second reference
># in_pos Point point in in_pos
<= done Did all the transformation
<= fail Failed to transform
"""
def __init__(self,in_ref,out_ref):
'''
Constructor
'''
super(TF_transformation,self).__init__(outcomes=['done','fail'], input_keys=['in_pos'], output_keys=['out_pos'])
self.listener = tf.TransformListener()
self.in_ref=in_ref
self.out_ref=out_ref
def execute(self, userdata):
point = geometry_msgs.msg.PointStamped()
point.header.frame_id = self.in_ref
point.point = userdata.in_pos
self.listener.waitForTransform("map", self.out_ref, rospy.Time(0), rospy.Duration(1))
print("Frame : map")
print(" point : "+str(point.point))
try:
point = self.listener.transformPoint(self.out_ref, point)
userdata.out_pos = point.point
return 'done'
except:
return 'fail'
|
<commit_before><commit_msg>Add State to transform between references<commit_after>#!/usr/bin/env python
import rospy
import tf
class TF_transformation:
"""
Transformation from a reference to another
--in_ref frame_id first reference
--out_ref frame_id second reference
># in_pos Point point in in_pos
<= done Did all the transformation
<= fail Failed to transform
"""
def __init__(self,in_ref,out_ref):
'''
Constructor
'''
super(TF_transformation,self).__init__(outcomes=['done','fail'], input_keys=['in_pos'], output_keys=['out_pos'])
self.listener = tf.TransformListener()
self.in_ref=in_ref
self.out_ref=out_ref
def execute(self, userdata):
point = geometry_msgs.msg.PointStamped()
point.header.frame_id = self.in_ref
point.point = userdata.in_pos
self.listener.waitForTransform("map", self.out_ref, rospy.Time(0), rospy.Duration(1))
print("Frame : map")
print(" point : "+str(point.point))
try:
point = self.listener.transformPoint(self.out_ref, point)
userdata.out_pos = point.point
return 'done'
except:
return 'fail'
|
|
90f94d18f6ce4d165ad67ccfdc93c0eefdcb0c69
|
kolibri/core/tasks/validation.py
|
kolibri/core/tasks/validation.py
|
from rest_framework import serializers
class JobValidator(serializers.Serializer):
"""
A serializer class for validating and deserializing job data.
Task is included for completeness of documentation of expected fields.
But we will validate the existence of this before we get to this point.
"""
type = serializers.CharField(required=True)
def validate(self, data):
kwargs = data.copy()
kwargs.pop("type")
return {
"args": (),
"kwargs": kwargs,
"extra_metadata": {},
}
def run_validation(self, data):
value = super(JobValidator, self).run_validation(data)
if not isinstance(value, dict):
raise TypeError("Validator must return a dict.")
extra_metadata = value.get("extra_metadata", {})
if extra_metadata is not None and not isinstance(extra_metadata, dict):
raise TypeError("'extra_metadata' must be a dict.")
if "user" in self.context and self.context["user"].is_authenticated():
user = self.context["user"]
extra_metadata.update(
{
"started_by": user.id,
"started_by_username": user.username,
}
)
value["extra_metadata"] = extra_metadata
return value
|
Implement a base job validator using DRF serializers.
|
Implement a base job validator using DRF serializers.
|
Python
|
mit
|
learningequality/kolibri,learningequality/kolibri,learningequality/kolibri,learningequality/kolibri
|
Implement a base job validator using DRF serializers.
|
from rest_framework import serializers
class JobValidator(serializers.Serializer):
"""
A serializer class for validating and deserializing job data.
Task is included for completeness of documentation of expected fields.
But we will validate the existence of this before we get to this point.
"""
type = serializers.CharField(required=True)
def validate(self, data):
kwargs = data.copy()
kwargs.pop("type")
return {
"args": (),
"kwargs": kwargs,
"extra_metadata": {},
}
def run_validation(self, data):
value = super(JobValidator, self).run_validation(data)
if not isinstance(value, dict):
raise TypeError("Validator must return a dict.")
extra_metadata = value.get("extra_metadata", {})
if extra_metadata is not None and not isinstance(extra_metadata, dict):
raise TypeError("'extra_metadata' must be a dict.")
if "user" in self.context and self.context["user"].is_authenticated():
user = self.context["user"]
extra_metadata.update(
{
"started_by": user.id,
"started_by_username": user.username,
}
)
value["extra_metadata"] = extra_metadata
return value
|
<commit_before><commit_msg>Implement a base job validator using DRF serializers.<commit_after>
|
from rest_framework import serializers
class JobValidator(serializers.Serializer):
"""
A serializer class for validating and deserializing job data.
Task is included for completeness of documentation of expected fields.
But we will validate the existence of this before we get to this point.
"""
type = serializers.CharField(required=True)
def validate(self, data):
kwargs = data.copy()
kwargs.pop("type")
return {
"args": (),
"kwargs": kwargs,
"extra_metadata": {},
}
def run_validation(self, data):
value = super(JobValidator, self).run_validation(data)
if not isinstance(value, dict):
raise TypeError("Validator must return a dict.")
extra_metadata = value.get("extra_metadata", {})
if extra_metadata is not None and not isinstance(extra_metadata, dict):
raise TypeError("'extra_metadata' must be a dict.")
if "user" in self.context and self.context["user"].is_authenticated():
user = self.context["user"]
extra_metadata.update(
{
"started_by": user.id,
"started_by_username": user.username,
}
)
value["extra_metadata"] = extra_metadata
return value
|
Implement a base job validator using DRF serializers.from rest_framework import serializers
class JobValidator(serializers.Serializer):
"""
A serializer class for validating and deserializing job data.
Task is included for completeness of documentation of expected fields.
But we will validate the existence of this before we get to this point.
"""
type = serializers.CharField(required=True)
def validate(self, data):
kwargs = data.copy()
kwargs.pop("type")
return {
"args": (),
"kwargs": kwargs,
"extra_metadata": {},
}
def run_validation(self, data):
value = super(JobValidator, self).run_validation(data)
if not isinstance(value, dict):
raise TypeError("Validator must return a dict.")
extra_metadata = value.get("extra_metadata", {})
if extra_metadata is not None and not isinstance(extra_metadata, dict):
raise TypeError("'extra_metadata' must be a dict.")
if "user" in self.context and self.context["user"].is_authenticated():
user = self.context["user"]
extra_metadata.update(
{
"started_by": user.id,
"started_by_username": user.username,
}
)
value["extra_metadata"] = extra_metadata
return value
|
<commit_before><commit_msg>Implement a base job validator using DRF serializers.<commit_after>from rest_framework import serializers
class JobValidator(serializers.Serializer):
"""
A serializer class for validating and deserializing job data.
Task is included for completeness of documentation of expected fields.
But we will validate the existence of this before we get to this point.
"""
type = serializers.CharField(required=True)
def validate(self, data):
kwargs = data.copy()
kwargs.pop("type")
return {
"args": (),
"kwargs": kwargs,
"extra_metadata": {},
}
def run_validation(self, data):
value = super(JobValidator, self).run_validation(data)
if not isinstance(value, dict):
raise TypeError("Validator must return a dict.")
extra_metadata = value.get("extra_metadata", {})
if extra_metadata is not None and not isinstance(extra_metadata, dict):
raise TypeError("'extra_metadata' must be a dict.")
if "user" in self.context and self.context["user"].is_authenticated():
user = self.context["user"]
extra_metadata.update(
{
"started_by": user.id,
"started_by_username": user.username,
}
)
value["extra_metadata"] = extra_metadata
return value
|
|
3c613bc1b729904883bca77924d892012b93cdc3
|
powerline/renderers/pango_markup.py
|
powerline/renderers/pango_markup.py
|
# vim:fileencoding=utf-8:noet
from powerline.renderer import Renderer
from powerline.colorscheme import ATTR_BOLD, ATTR_ITALIC, ATTR_UNDERLINE
class PangoMarkupRenderer(Renderer):
'''Powerline Pango markup segment renderer.'''
@staticmethod
def hlstyle(*args, **kwargs):
# We don't need to explicitly reset attributes, so skip those calls
return ''
def hl(self, contents, fg=None, bg=None, attr=None):
'''Highlight a segment.'''
awesome_attr = []
if fg is not None:
if fg is not False and fg[1] is not False:
awesome_attr += ['foreground="#{0:06x}"'.format(fg[1])]
if bg is not None:
if bg is not False and bg[1] is not False:
awesome_attr += ['background="#{0:06x}"'.format(bg[1])]
if attr is not None and attr is not False:
if attr & ATTR_BOLD:
awesome_attr += ['font_weight="bold"']
if attr & ATTR_ITALIC:
awesome_attr += ['font_style="italic"']
if attr & ATTR_UNDERLINE:
awesome_attr += ['underline="single"']
return '<span ' + ' '.join(awesome_attr) + '>' + contents + '</span>'
renderer = PangoMarkupRenderer
|
# vim:fileencoding=utf-8:noet
from powerline.renderer import Renderer
from powerline.colorscheme import ATTR_BOLD, ATTR_ITALIC, ATTR_UNDERLINE
from xmlrpclib import escape as _escape
class PangoMarkupRenderer(Renderer):
'''Powerline Pango markup segment renderer.'''
@staticmethod
def hlstyle(*args, **kwargs):
# We don't need to explicitly reset attributes, so skip those calls
return ''
def hl(self, contents, fg=None, bg=None, attr=None):
'''Highlight a segment.'''
awesome_attr = []
if fg is not None:
if fg is not False and fg[1] is not False:
awesome_attr += ['foreground="#{0:06x}"'.format(fg[1])]
if bg is not None:
if bg is not False and bg[1] is not False:
awesome_attr += ['background="#{0:06x}"'.format(bg[1])]
if attr is not None and attr is not False:
if attr & ATTR_BOLD:
awesome_attr += ['font_weight="bold"']
if attr & ATTR_ITALIC:
awesome_attr += ['font_style="italic"']
if attr & ATTR_UNDERLINE:
awesome_attr += ['underline="single"']
return '<span ' + ' '.join(awesome_attr) + '>' + contents + '</span>'
escape = staticmethod(_escape)
renderer = PangoMarkupRenderer
|
Use xmlrpclib.escape for escaping in PangoMarkupRenderer
|
Use xmlrpclib.escape for escaping in PangoMarkupRenderer
|
Python
|
mit
|
dragon788/powerline,cyrixhero/powerline,blindFS/powerline,QuLogic/powerline,magus424/powerline,areteix/powerline,dragon788/powerline,keelerm84/powerline,lukw00/powerline,xfumihiro/powerline,prvnkumar/powerline,magus424/powerline,s0undt3ch/powerline,dragon788/powerline,bartvm/powerline,bezhermoso/powerline,wfscheper/powerline,DoctorJellyface/powerline,junix/powerline,IvanAli/powerline,DoctorJellyface/powerline,cyrixhero/powerline,keelerm84/powerline,wfscheper/powerline,lukw00/powerline,darac/powerline,seanfisk/powerline,firebitsbr/powerline,Liangjianghao/powerline,Luffin/powerline,s0undt3ch/powerline,bezhermoso/powerline,S0lll0s/powerline,junix/powerline,wfscheper/powerline,xxxhycl2010/powerline,QuLogic/powerline,IvanAli/powerline,areteix/powerline,xfumihiro/powerline,xxxhycl2010/powerline,IvanAli/powerline,xxxhycl2010/powerline,blindFS/powerline,cyrixhero/powerline,prvnkumar/powerline,xfumihiro/powerline,Liangjianghao/powerline,DoctorJellyface/powerline,s0undt3ch/powerline,junix/powerline,blindFS/powerline,S0lll0s/powerline,darac/powerline,kenrachynski/powerline,prvnkumar/powerline,Luffin/powerline,Liangjianghao/powerline,seanfisk/powerline,magus424/powerline,EricSB/powerline,bartvm/powerline,firebitsbr/powerline,seanfisk/powerline,EricSB/powerline,russellb/powerline,QuLogic/powerline,darac/powerline,bartvm/powerline,lukw00/powerline,Luffin/powerline,kenrachynski/powerline,russellb/powerline,bezhermoso/powerline,S0lll0s/powerline,russellb/powerline,EricSB/powerline,firebitsbr/powerline,areteix/powerline,kenrachynski/powerline
|
# vim:fileencoding=utf-8:noet
from powerline.renderer import Renderer
from powerline.colorscheme import ATTR_BOLD, ATTR_ITALIC, ATTR_UNDERLINE
class PangoMarkupRenderer(Renderer):
'''Powerline Pango markup segment renderer.'''
@staticmethod
def hlstyle(*args, **kwargs):
# We don't need to explicitly reset attributes, so skip those calls
return ''
def hl(self, contents, fg=None, bg=None, attr=None):
'''Highlight a segment.'''
awesome_attr = []
if fg is not None:
if fg is not False and fg[1] is not False:
awesome_attr += ['foreground="#{0:06x}"'.format(fg[1])]
if bg is not None:
if bg is not False and bg[1] is not False:
awesome_attr += ['background="#{0:06x}"'.format(bg[1])]
if attr is not None and attr is not False:
if attr & ATTR_BOLD:
awesome_attr += ['font_weight="bold"']
if attr & ATTR_ITALIC:
awesome_attr += ['font_style="italic"']
if attr & ATTR_UNDERLINE:
awesome_attr += ['underline="single"']
return '<span ' + ' '.join(awesome_attr) + '>' + contents + '</span>'
renderer = PangoMarkupRenderer
Use xmlrpclib.escape for escaping in PangoMarkupRenderer
|
# vim:fileencoding=utf-8:noet
from powerline.renderer import Renderer
from powerline.colorscheme import ATTR_BOLD, ATTR_ITALIC, ATTR_UNDERLINE
from xmlrpclib import escape as _escape
class PangoMarkupRenderer(Renderer):
'''Powerline Pango markup segment renderer.'''
@staticmethod
def hlstyle(*args, **kwargs):
# We don't need to explicitly reset attributes, so skip those calls
return ''
def hl(self, contents, fg=None, bg=None, attr=None):
'''Highlight a segment.'''
awesome_attr = []
if fg is not None:
if fg is not False and fg[1] is not False:
awesome_attr += ['foreground="#{0:06x}"'.format(fg[1])]
if bg is not None:
if bg is not False and bg[1] is not False:
awesome_attr += ['background="#{0:06x}"'.format(bg[1])]
if attr is not None and attr is not False:
if attr & ATTR_BOLD:
awesome_attr += ['font_weight="bold"']
if attr & ATTR_ITALIC:
awesome_attr += ['font_style="italic"']
if attr & ATTR_UNDERLINE:
awesome_attr += ['underline="single"']
return '<span ' + ' '.join(awesome_attr) + '>' + contents + '</span>'
escape = staticmethod(_escape)
renderer = PangoMarkupRenderer
|
<commit_before># vim:fileencoding=utf-8:noet
from powerline.renderer import Renderer
from powerline.colorscheme import ATTR_BOLD, ATTR_ITALIC, ATTR_UNDERLINE
class PangoMarkupRenderer(Renderer):
'''Powerline Pango markup segment renderer.'''
@staticmethod
def hlstyle(*args, **kwargs):
# We don't need to explicitly reset attributes, so skip those calls
return ''
def hl(self, contents, fg=None, bg=None, attr=None):
'''Highlight a segment.'''
awesome_attr = []
if fg is not None:
if fg is not False and fg[1] is not False:
awesome_attr += ['foreground="#{0:06x}"'.format(fg[1])]
if bg is not None:
if bg is not False and bg[1] is not False:
awesome_attr += ['background="#{0:06x}"'.format(bg[1])]
if attr is not None and attr is not False:
if attr & ATTR_BOLD:
awesome_attr += ['font_weight="bold"']
if attr & ATTR_ITALIC:
awesome_attr += ['font_style="italic"']
if attr & ATTR_UNDERLINE:
awesome_attr += ['underline="single"']
return '<span ' + ' '.join(awesome_attr) + '>' + contents + '</span>'
renderer = PangoMarkupRenderer
<commit_msg>Use xmlrpclib.escape for escaping in PangoMarkupRenderer<commit_after>
|
# vim:fileencoding=utf-8:noet
from powerline.renderer import Renderer
from powerline.colorscheme import ATTR_BOLD, ATTR_ITALIC, ATTR_UNDERLINE
from xmlrpclib import escape as _escape
class PangoMarkupRenderer(Renderer):
'''Powerline Pango markup segment renderer.'''
@staticmethod
def hlstyle(*args, **kwargs):
# We don't need to explicitly reset attributes, so skip those calls
return ''
def hl(self, contents, fg=None, bg=None, attr=None):
'''Highlight a segment.'''
awesome_attr = []
if fg is not None:
if fg is not False and fg[1] is not False:
awesome_attr += ['foreground="#{0:06x}"'.format(fg[1])]
if bg is not None:
if bg is not False and bg[1] is not False:
awesome_attr += ['background="#{0:06x}"'.format(bg[1])]
if attr is not None and attr is not False:
if attr & ATTR_BOLD:
awesome_attr += ['font_weight="bold"']
if attr & ATTR_ITALIC:
awesome_attr += ['font_style="italic"']
if attr & ATTR_UNDERLINE:
awesome_attr += ['underline="single"']
return '<span ' + ' '.join(awesome_attr) + '>' + contents + '</span>'
escape = staticmethod(_escape)
renderer = PangoMarkupRenderer
|
# vim:fileencoding=utf-8:noet
from powerline.renderer import Renderer
from powerline.colorscheme import ATTR_BOLD, ATTR_ITALIC, ATTR_UNDERLINE
class PangoMarkupRenderer(Renderer):
'''Powerline Pango markup segment renderer.'''
@staticmethod
def hlstyle(*args, **kwargs):
# We don't need to explicitly reset attributes, so skip those calls
return ''
def hl(self, contents, fg=None, bg=None, attr=None):
'''Highlight a segment.'''
awesome_attr = []
if fg is not None:
if fg is not False and fg[1] is not False:
awesome_attr += ['foreground="#{0:06x}"'.format(fg[1])]
if bg is not None:
if bg is not False and bg[1] is not False:
awesome_attr += ['background="#{0:06x}"'.format(bg[1])]
if attr is not None and attr is not False:
if attr & ATTR_BOLD:
awesome_attr += ['font_weight="bold"']
if attr & ATTR_ITALIC:
awesome_attr += ['font_style="italic"']
if attr & ATTR_UNDERLINE:
awesome_attr += ['underline="single"']
return '<span ' + ' '.join(awesome_attr) + '>' + contents + '</span>'
renderer = PangoMarkupRenderer
Use xmlrpclib.escape for escaping in PangoMarkupRenderer# vim:fileencoding=utf-8:noet
from powerline.renderer import Renderer
from powerline.colorscheme import ATTR_BOLD, ATTR_ITALIC, ATTR_UNDERLINE
from xmlrpclib import escape as _escape
class PangoMarkupRenderer(Renderer):
'''Powerline Pango markup segment renderer.'''
@staticmethod
def hlstyle(*args, **kwargs):
# We don't need to explicitly reset attributes, so skip those calls
return ''
def hl(self, contents, fg=None, bg=None, attr=None):
'''Highlight a segment.'''
awesome_attr = []
if fg is not None:
if fg is not False and fg[1] is not False:
awesome_attr += ['foreground="#{0:06x}"'.format(fg[1])]
if bg is not None:
if bg is not False and bg[1] is not False:
awesome_attr += ['background="#{0:06x}"'.format(bg[1])]
if attr is not None and attr is not False:
if attr & ATTR_BOLD:
awesome_attr += ['font_weight="bold"']
if attr & ATTR_ITALIC:
awesome_attr += ['font_style="italic"']
if attr & ATTR_UNDERLINE:
awesome_attr += ['underline="single"']
return '<span ' + ' '.join(awesome_attr) + '>' + contents + '</span>'
escape = staticmethod(_escape)
renderer = PangoMarkupRenderer
|
<commit_before># vim:fileencoding=utf-8:noet
from powerline.renderer import Renderer
from powerline.colorscheme import ATTR_BOLD, ATTR_ITALIC, ATTR_UNDERLINE
class PangoMarkupRenderer(Renderer):
'''Powerline Pango markup segment renderer.'''
@staticmethod
def hlstyle(*args, **kwargs):
# We don't need to explicitly reset attributes, so skip those calls
return ''
def hl(self, contents, fg=None, bg=None, attr=None):
'''Highlight a segment.'''
awesome_attr = []
if fg is not None:
if fg is not False and fg[1] is not False:
awesome_attr += ['foreground="#{0:06x}"'.format(fg[1])]
if bg is not None:
if bg is not False and bg[1] is not False:
awesome_attr += ['background="#{0:06x}"'.format(bg[1])]
if attr is not None and attr is not False:
if attr & ATTR_BOLD:
awesome_attr += ['font_weight="bold"']
if attr & ATTR_ITALIC:
awesome_attr += ['font_style="italic"']
if attr & ATTR_UNDERLINE:
awesome_attr += ['underline="single"']
return '<span ' + ' '.join(awesome_attr) + '>' + contents + '</span>'
renderer = PangoMarkupRenderer
<commit_msg>Use xmlrpclib.escape for escaping in PangoMarkupRenderer<commit_after># vim:fileencoding=utf-8:noet
from powerline.renderer import Renderer
from powerline.colorscheme import ATTR_BOLD, ATTR_ITALIC, ATTR_UNDERLINE
from xmlrpclib import escape as _escape
class PangoMarkupRenderer(Renderer):
'''Powerline Pango markup segment renderer.'''
@staticmethod
def hlstyle(*args, **kwargs):
# We don't need to explicitly reset attributes, so skip those calls
return ''
def hl(self, contents, fg=None, bg=None, attr=None):
'''Highlight a segment.'''
awesome_attr = []
if fg is not None:
if fg is not False and fg[1] is not False:
awesome_attr += ['foreground="#{0:06x}"'.format(fg[1])]
if bg is not None:
if bg is not False and bg[1] is not False:
awesome_attr += ['background="#{0:06x}"'.format(bg[1])]
if attr is not None and attr is not False:
if attr & ATTR_BOLD:
awesome_attr += ['font_weight="bold"']
if attr & ATTR_ITALIC:
awesome_attr += ['font_style="italic"']
if attr & ATTR_UNDERLINE:
awesome_attr += ['underline="single"']
return '<span ' + ' '.join(awesome_attr) + '>' + contents + '</span>'
escape = staticmethod(_escape)
renderer = PangoMarkupRenderer
|
0c537c9cb0d6899557fd0cce125bf35a626493cf
|
migrations/versions/21b7c3b2ce88_index_build_project_.py
|
migrations/versions/21b7c3b2ce88_index_build_project_.py
|
"""Index Build.project_id,patch_id,date_created
Revision ID: 21b7c3b2ce88
Revises: 4134b4818694
Create Date: 2013-12-03 16:19:11.794912
"""
# revision identifiers, used by Alembic.
revision = '21b7c3b2ce88'
down_revision = '4134b4818694'
from alembic import op
def upgrade():
op.create_index('idx_build_project_patch_date', 'build', ['project_id', 'patch_id', 'date_created'])
def downgrade():
pass
|
Index Build (project_id, patch_id, date_created)
|
Index Build (project_id, patch_id, date_created)
|
Python
|
apache-2.0
|
dropbox/changes,bowlofstew/changes,dropbox/changes,wfxiang08/changes,wfxiang08/changes,bowlofstew/changes,wfxiang08/changes,bowlofstew/changes,wfxiang08/changes,dropbox/changes,bowlofstew/changes,dropbox/changes
|
Index Build (project_id, patch_id, date_created)
|
"""Index Build.project_id,patch_id,date_created
Revision ID: 21b7c3b2ce88
Revises: 4134b4818694
Create Date: 2013-12-03 16:19:11.794912
"""
# revision identifiers, used by Alembic.
revision = '21b7c3b2ce88'
down_revision = '4134b4818694'
from alembic import op
def upgrade():
op.create_index('idx_build_project_patch_date', 'build', ['project_id', 'patch_id', 'date_created'])
def downgrade():
pass
|
<commit_before><commit_msg>Index Build (project_id, patch_id, date_created)<commit_after>
|
"""Index Build.project_id,patch_id,date_created
Revision ID: 21b7c3b2ce88
Revises: 4134b4818694
Create Date: 2013-12-03 16:19:11.794912
"""
# revision identifiers, used by Alembic.
revision = '21b7c3b2ce88'
down_revision = '4134b4818694'
from alembic import op
def upgrade():
op.create_index('idx_build_project_patch_date', 'build', ['project_id', 'patch_id', 'date_created'])
def downgrade():
pass
|
Index Build (project_id, patch_id, date_created)"""Index Build.project_id,patch_id,date_created
Revision ID: 21b7c3b2ce88
Revises: 4134b4818694
Create Date: 2013-12-03 16:19:11.794912
"""
# revision identifiers, used by Alembic.
revision = '21b7c3b2ce88'
down_revision = '4134b4818694'
from alembic import op
def upgrade():
op.create_index('idx_build_project_patch_date', 'build', ['project_id', 'patch_id', 'date_created'])
def downgrade():
pass
|
<commit_before><commit_msg>Index Build (project_id, patch_id, date_created)<commit_after>"""Index Build.project_id,patch_id,date_created
Revision ID: 21b7c3b2ce88
Revises: 4134b4818694
Create Date: 2013-12-03 16:19:11.794912
"""
# revision identifiers, used by Alembic.
revision = '21b7c3b2ce88'
down_revision = '4134b4818694'
from alembic import op
def upgrade():
op.create_index('idx_build_project_patch_date', 'build', ['project_id', 'patch_id', 'date_created'])
def downgrade():
pass
|
|
2adff23b1c9f6eef00c4e21449911098107d36e6
|
tests/test_available.py
|
tests/test_available.py
|
from api.search import available
def test_available_term_by_category_invalid():
assert available.available_term_by_category('foo', 'bar') == []
def test_available_term_by_category_taxonomy():
tests = [
(('superkingdom', 'b'), [{'val': 'Bacteria', 'desc': None}]),
(('phylum', 'f'), [{'val': 'Firmicutes', 'desc': None}]),
(('class', 'b'), [{'val': 'Bacilli', 'desc': None}]),
(('order', 'l'), [{'val': 'Lactobacillales', 'desc': None}]),
(('family', 's'), [{'val': 'Streptococcaceae', 'desc': None}, {'val': 'Streptomycetaceae', 'desc': None}]),
(('genus', 'l'), [{'val': 'Lactococcus', 'desc': None}]),
(('species', 'l'), [{'val': 'lactis', 'desc': None}]),
(('strain', 'c'), [{'val': 'CV56', 'desc': None}]),
]
for args, expected in tests:
assert available.available_term_by_category(*args) == expected
def test_available_term_by_category():
tests = [
(('acc', 'nc_01'), [{'val': 'NC_017486', 'desc': None}]),
(('compoundseq', 'a'), [{'val': 'ASFGEGTFTSPSSYAIGTRCPICC', 'desc': None}]),
(('compoundclass', 'c'), [{'val': 'Class-I', 'desc': None}, {'val': 'Class-III', 'desc': None}]),
(('monomer', 'ala'), [{'val': 'ala', 'desc': 'Alanine'}]),
(('type', 'lanti'), [{'val': 'lantipeptide', 'desc': 'Lanthipeptide'}]),
(('profile', 'fabf'), [{'val': 'FabF', 'desc': 'FabF'}]),
(('asdomain', 'PKS_DH2'), [{'val': 'PKS_DH2', 'desc': 'Dehydrogenase-2 domain'}]),
(('clusterblast', 'HM219853'), [{'val': 'HM219853_c1', 'desc': 'Lactococcus lactis subsp. lactis nisin biosynthetic gene clust...'}]),
(('knowncluster', 'kirro'), [{'val': 'BGC0001070_c1', 'desc': 'Kirromycin biosynthetic gene cluster'}]),
(('subcluster', 'novobiocin'), [{'val': 'AF170880_2_c2', 'desc': 'novobiocin noviose deoxysugar'}]),
]
for args, expected in tests:
assert available.available_term_by_category(*args) == expected, args
|
Add tests for 'available' module
|
search: Add tests for 'available' module
Signed-off-by: Kai Blin <ad3597797f6179d503c382b2627cc19939309418@biosustain.dtu.dk>
|
Python
|
agpl-3.0
|
antismash/db-api,antismash/db-api
|
search: Add tests for 'available' module
Signed-off-by: Kai Blin <ad3597797f6179d503c382b2627cc19939309418@biosustain.dtu.dk>
|
from api.search import available
def test_available_term_by_category_invalid():
assert available.available_term_by_category('foo', 'bar') == []
def test_available_term_by_category_taxonomy():
tests = [
(('superkingdom', 'b'), [{'val': 'Bacteria', 'desc': None}]),
(('phylum', 'f'), [{'val': 'Firmicutes', 'desc': None}]),
(('class', 'b'), [{'val': 'Bacilli', 'desc': None}]),
(('order', 'l'), [{'val': 'Lactobacillales', 'desc': None}]),
(('family', 's'), [{'val': 'Streptococcaceae', 'desc': None}, {'val': 'Streptomycetaceae', 'desc': None}]),
(('genus', 'l'), [{'val': 'Lactococcus', 'desc': None}]),
(('species', 'l'), [{'val': 'lactis', 'desc': None}]),
(('strain', 'c'), [{'val': 'CV56', 'desc': None}]),
]
for args, expected in tests:
assert available.available_term_by_category(*args) == expected
def test_available_term_by_category():
tests = [
(('acc', 'nc_01'), [{'val': 'NC_017486', 'desc': None}]),
(('compoundseq', 'a'), [{'val': 'ASFGEGTFTSPSSYAIGTRCPICC', 'desc': None}]),
(('compoundclass', 'c'), [{'val': 'Class-I', 'desc': None}, {'val': 'Class-III', 'desc': None}]),
(('monomer', 'ala'), [{'val': 'ala', 'desc': 'Alanine'}]),
(('type', 'lanti'), [{'val': 'lantipeptide', 'desc': 'Lanthipeptide'}]),
(('profile', 'fabf'), [{'val': 'FabF', 'desc': 'FabF'}]),
(('asdomain', 'PKS_DH2'), [{'val': 'PKS_DH2', 'desc': 'Dehydrogenase-2 domain'}]),
(('clusterblast', 'HM219853'), [{'val': 'HM219853_c1', 'desc': 'Lactococcus lactis subsp. lactis nisin biosynthetic gene clust...'}]),
(('knowncluster', 'kirro'), [{'val': 'BGC0001070_c1', 'desc': 'Kirromycin biosynthetic gene cluster'}]),
(('subcluster', 'novobiocin'), [{'val': 'AF170880_2_c2', 'desc': 'novobiocin noviose deoxysugar'}]),
]
for args, expected in tests:
assert available.available_term_by_category(*args) == expected, args
|
<commit_before><commit_msg>search: Add tests for 'available' module
Signed-off-by: Kai Blin <ad3597797f6179d503c382b2627cc19939309418@biosustain.dtu.dk><commit_after>
|
from api.search import available
def test_available_term_by_category_invalid():
assert available.available_term_by_category('foo', 'bar') == []
def test_available_term_by_category_taxonomy():
tests = [
(('superkingdom', 'b'), [{'val': 'Bacteria', 'desc': None}]),
(('phylum', 'f'), [{'val': 'Firmicutes', 'desc': None}]),
(('class', 'b'), [{'val': 'Bacilli', 'desc': None}]),
(('order', 'l'), [{'val': 'Lactobacillales', 'desc': None}]),
(('family', 's'), [{'val': 'Streptococcaceae', 'desc': None}, {'val': 'Streptomycetaceae', 'desc': None}]),
(('genus', 'l'), [{'val': 'Lactococcus', 'desc': None}]),
(('species', 'l'), [{'val': 'lactis', 'desc': None}]),
(('strain', 'c'), [{'val': 'CV56', 'desc': None}]),
]
for args, expected in tests:
assert available.available_term_by_category(*args) == expected
def test_available_term_by_category():
tests = [
(('acc', 'nc_01'), [{'val': 'NC_017486', 'desc': None}]),
(('compoundseq', 'a'), [{'val': 'ASFGEGTFTSPSSYAIGTRCPICC', 'desc': None}]),
(('compoundclass', 'c'), [{'val': 'Class-I', 'desc': None}, {'val': 'Class-III', 'desc': None}]),
(('monomer', 'ala'), [{'val': 'ala', 'desc': 'Alanine'}]),
(('type', 'lanti'), [{'val': 'lantipeptide', 'desc': 'Lanthipeptide'}]),
(('profile', 'fabf'), [{'val': 'FabF', 'desc': 'FabF'}]),
(('asdomain', 'PKS_DH2'), [{'val': 'PKS_DH2', 'desc': 'Dehydrogenase-2 domain'}]),
(('clusterblast', 'HM219853'), [{'val': 'HM219853_c1', 'desc': 'Lactococcus lactis subsp. lactis nisin biosynthetic gene clust...'}]),
(('knowncluster', 'kirro'), [{'val': 'BGC0001070_c1', 'desc': 'Kirromycin biosynthetic gene cluster'}]),
(('subcluster', 'novobiocin'), [{'val': 'AF170880_2_c2', 'desc': 'novobiocin noviose deoxysugar'}]),
]
for args, expected in tests:
assert available.available_term_by_category(*args) == expected, args
|
search: Add tests for 'available' module
Signed-off-by: Kai Blin <ad3597797f6179d503c382b2627cc19939309418@biosustain.dtu.dk>from api.search import available
def test_available_term_by_category_invalid():
assert available.available_term_by_category('foo', 'bar') == []
def test_available_term_by_category_taxonomy():
tests = [
(('superkingdom', 'b'), [{'val': 'Bacteria', 'desc': None}]),
(('phylum', 'f'), [{'val': 'Firmicutes', 'desc': None}]),
(('class', 'b'), [{'val': 'Bacilli', 'desc': None}]),
(('order', 'l'), [{'val': 'Lactobacillales', 'desc': None}]),
(('family', 's'), [{'val': 'Streptococcaceae', 'desc': None}, {'val': 'Streptomycetaceae', 'desc': None}]),
(('genus', 'l'), [{'val': 'Lactococcus', 'desc': None}]),
(('species', 'l'), [{'val': 'lactis', 'desc': None}]),
(('strain', 'c'), [{'val': 'CV56', 'desc': None}]),
]
for args, expected in tests:
assert available.available_term_by_category(*args) == expected
def test_available_term_by_category():
tests = [
(('acc', 'nc_01'), [{'val': 'NC_017486', 'desc': None}]),
(('compoundseq', 'a'), [{'val': 'ASFGEGTFTSPSSYAIGTRCPICC', 'desc': None}]),
(('compoundclass', 'c'), [{'val': 'Class-I', 'desc': None}, {'val': 'Class-III', 'desc': None}]),
(('monomer', 'ala'), [{'val': 'ala', 'desc': 'Alanine'}]),
(('type', 'lanti'), [{'val': 'lantipeptide', 'desc': 'Lanthipeptide'}]),
(('profile', 'fabf'), [{'val': 'FabF', 'desc': 'FabF'}]),
(('asdomain', 'PKS_DH2'), [{'val': 'PKS_DH2', 'desc': 'Dehydrogenase-2 domain'}]),
(('clusterblast', 'HM219853'), [{'val': 'HM219853_c1', 'desc': 'Lactococcus lactis subsp. lactis nisin biosynthetic gene clust...'}]),
(('knowncluster', 'kirro'), [{'val': 'BGC0001070_c1', 'desc': 'Kirromycin biosynthetic gene cluster'}]),
(('subcluster', 'novobiocin'), [{'val': 'AF170880_2_c2', 'desc': 'novobiocin noviose deoxysugar'}]),
]
for args, expected in tests:
assert available.available_term_by_category(*args) == expected, args
|
<commit_before><commit_msg>search: Add tests for 'available' module
Signed-off-by: Kai Blin <ad3597797f6179d503c382b2627cc19939309418@biosustain.dtu.dk><commit_after>from api.search import available
def test_available_term_by_category_invalid():
assert available.available_term_by_category('foo', 'bar') == []
def test_available_term_by_category_taxonomy():
tests = [
(('superkingdom', 'b'), [{'val': 'Bacteria', 'desc': None}]),
(('phylum', 'f'), [{'val': 'Firmicutes', 'desc': None}]),
(('class', 'b'), [{'val': 'Bacilli', 'desc': None}]),
(('order', 'l'), [{'val': 'Lactobacillales', 'desc': None}]),
(('family', 's'), [{'val': 'Streptococcaceae', 'desc': None}, {'val': 'Streptomycetaceae', 'desc': None}]),
(('genus', 'l'), [{'val': 'Lactococcus', 'desc': None}]),
(('species', 'l'), [{'val': 'lactis', 'desc': None}]),
(('strain', 'c'), [{'val': 'CV56', 'desc': None}]),
]
for args, expected in tests:
assert available.available_term_by_category(*args) == expected
def test_available_term_by_category():
tests = [
(('acc', 'nc_01'), [{'val': 'NC_017486', 'desc': None}]),
(('compoundseq', 'a'), [{'val': 'ASFGEGTFTSPSSYAIGTRCPICC', 'desc': None}]),
(('compoundclass', 'c'), [{'val': 'Class-I', 'desc': None}, {'val': 'Class-III', 'desc': None}]),
(('monomer', 'ala'), [{'val': 'ala', 'desc': 'Alanine'}]),
(('type', 'lanti'), [{'val': 'lantipeptide', 'desc': 'Lanthipeptide'}]),
(('profile', 'fabf'), [{'val': 'FabF', 'desc': 'FabF'}]),
(('asdomain', 'PKS_DH2'), [{'val': 'PKS_DH2', 'desc': 'Dehydrogenase-2 domain'}]),
(('clusterblast', 'HM219853'), [{'val': 'HM219853_c1', 'desc': 'Lactococcus lactis subsp. lactis nisin biosynthetic gene clust...'}]),
(('knowncluster', 'kirro'), [{'val': 'BGC0001070_c1', 'desc': 'Kirromycin biosynthetic gene cluster'}]),
(('subcluster', 'novobiocin'), [{'val': 'AF170880_2_c2', 'desc': 'novobiocin noviose deoxysugar'}]),
]
for args, expected in tests:
assert available.available_term_by_category(*args) == expected, args
|
|
13e07eac9c69ed49210695a8465679c3df480d50
|
build.py
|
build.py
|
"""
Build (freeze) the versionhero program.
"""
import os
def main():
"""
Run this main function if this script is called directly.
:return: None
"""
os.system('pyinstaller --onefile versionhero.py')
if __name__ == "__main__":
main()
|
Add a script that will use pyinstaller to create an exe.
|
Add a script that will use pyinstaller to create an exe.
|
Python
|
apache-2.0
|
chadgra/versionhero
|
Add a script that will use pyinstaller to create an exe.
|
"""
Build (freeze) the versionhero program.
"""
import os
def main():
"""
Run this main function if this script is called directly.
:return: None
"""
os.system('pyinstaller --onefile versionhero.py')
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a script that will use pyinstaller to create an exe.<commit_after>
|
"""
Build (freeze) the versionhero program.
"""
import os
def main():
"""
Run this main function if this script is called directly.
:return: None
"""
os.system('pyinstaller --onefile versionhero.py')
if __name__ == "__main__":
main()
|
Add a script that will use pyinstaller to create an exe."""
Build (freeze) the versionhero program.
"""
import os
def main():
"""
Run this main function if this script is called directly.
:return: None
"""
os.system('pyinstaller --onefile versionhero.py')
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a script that will use pyinstaller to create an exe.<commit_after>"""
Build (freeze) the versionhero program.
"""
import os
def main():
"""
Run this main function if this script is called directly.
:return: None
"""
os.system('pyinstaller --onefile versionhero.py')
if __name__ == "__main__":
main()
|
|
318656f44d4f987e912229f83192c2d8adf97142
|
ibis/pandas/tests/test_core.py
|
ibis/pandas/tests/test_core.py
|
import pytest
pytest.importorskip('multipledispatch')
from ibis.pandas.execution import execute, execute_node # noqa: E402
from multipledispatch.conflict import ambiguities # noqa: E402
@pytest.mark.parametrize('func', [execute, execute_node])
def test_no_execute_ambiguities(func):
assert not ambiguities(func.funcs)
|
Test that there are no ambiguous dispatch definitions
|
TST: Test that there are no ambiguous dispatch definitions
Author: Phillip Cloud <cpcloud@gmail.com>
Closes #1026 from cpcloud/test-amb and squashes the following commits:
073d7b8 [Phillip Cloud] TST: Test that there are no ambiguous dispatch definitions
|
Python
|
apache-2.0
|
ibis-project/ibis,deepfield/ibis,cpcloud/ibis,cpcloud/ibis,deepfield/ibis,ibis-project/ibis,deepfield/ibis,cloudera/ibis,cpcloud/ibis,cpcloud/ibis,cloudera/ibis,deepfield/ibis,ibis-project/ibis,ibis-project/ibis,cloudera/ibis
|
TST: Test that there are no ambiguous dispatch definitions
Author: Phillip Cloud <cpcloud@gmail.com>
Closes #1026 from cpcloud/test-amb and squashes the following commits:
073d7b8 [Phillip Cloud] TST: Test that there are no ambiguous dispatch definitions
|
import pytest
pytest.importorskip('multipledispatch')
from ibis.pandas.execution import execute, execute_node # noqa: E402
from multipledispatch.conflict import ambiguities # noqa: E402
@pytest.mark.parametrize('func', [execute, execute_node])
def test_no_execute_ambiguities(func):
assert not ambiguities(func.funcs)
|
<commit_before><commit_msg>TST: Test that there are no ambiguous dispatch definitions
Author: Phillip Cloud <cpcloud@gmail.com>
Closes #1026 from cpcloud/test-amb and squashes the following commits:
073d7b8 [Phillip Cloud] TST: Test that there are no ambiguous dispatch definitions<commit_after>
|
import pytest
pytest.importorskip('multipledispatch')
from ibis.pandas.execution import execute, execute_node # noqa: E402
from multipledispatch.conflict import ambiguities # noqa: E402
@pytest.mark.parametrize('func', [execute, execute_node])
def test_no_execute_ambiguities(func):
assert not ambiguities(func.funcs)
|
TST: Test that there are no ambiguous dispatch definitions
Author: Phillip Cloud <cpcloud@gmail.com>
Closes #1026 from cpcloud/test-amb and squashes the following commits:
073d7b8 [Phillip Cloud] TST: Test that there are no ambiguous dispatch definitionsimport pytest
pytest.importorskip('multipledispatch')
from ibis.pandas.execution import execute, execute_node # noqa: E402
from multipledispatch.conflict import ambiguities # noqa: E402
@pytest.mark.parametrize('func', [execute, execute_node])
def test_no_execute_ambiguities(func):
assert not ambiguities(func.funcs)
|
<commit_before><commit_msg>TST: Test that there are no ambiguous dispatch definitions
Author: Phillip Cloud <cpcloud@gmail.com>
Closes #1026 from cpcloud/test-amb and squashes the following commits:
073d7b8 [Phillip Cloud] TST: Test that there are no ambiguous dispatch definitions<commit_after>import pytest
pytest.importorskip('multipledispatch')
from ibis.pandas.execution import execute, execute_node # noqa: E402
from multipledispatch.conflict import ambiguities # noqa: E402
@pytest.mark.parametrize('func', [execute, execute_node])
def test_no_execute_ambiguities(func):
assert not ambiguities(func.funcs)
|
|
c87e206d524137e9e76166faf20d5b1e10419220
|
senlin/tests/tempest/api/profiles/test_profile_delete.py
|
senlin/tests/tempest/api/profiles/test_profile_delete.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestProfileDelete(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestProfileDelete, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
@decorators.idempotent_id('ea3c1b9e-5ed7-4d63-84ce-2032c3bc6d27')
def test_delete_policy(self):
# Verify resp of policy delete API
res = self.client.delete_obj('profiles', self.profile['id'])
self.assertEqual(204, res['status'])
self.assertIsNone(res['body'])
|
Add API tests for profile delete
|
Add API tests for profile delete
Add API tests for profile delete
Change-Id: I47a733ce203ba76fc57080a1b086661720e83930
|
Python
|
apache-2.0
|
openstack/senlin,openstack/senlin,stackforge/senlin,openstack/senlin,stackforge/senlin
|
Add API tests for profile delete
Add API tests for profile delete
Change-Id: I47a733ce203ba76fc57080a1b086661720e83930
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestProfileDelete(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestProfileDelete, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
@decorators.idempotent_id('ea3c1b9e-5ed7-4d63-84ce-2032c3bc6d27')
def test_delete_policy(self):
# Verify resp of policy delete API
res = self.client.delete_obj('profiles', self.profile['id'])
self.assertEqual(204, res['status'])
self.assertIsNone(res['body'])
|
<commit_before><commit_msg>Add API tests for profile delete
Add API tests for profile delete
Change-Id: I47a733ce203ba76fc57080a1b086661720e83930<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestProfileDelete(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestProfileDelete, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
@decorators.idempotent_id('ea3c1b9e-5ed7-4d63-84ce-2032c3bc6d27')
def test_delete_policy(self):
# Verify resp of policy delete API
res = self.client.delete_obj('profiles', self.profile['id'])
self.assertEqual(204, res['status'])
self.assertIsNone(res['body'])
|
Add API tests for profile delete
Add API tests for profile delete
Change-Id: I47a733ce203ba76fc57080a1b086661720e83930# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestProfileDelete(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestProfileDelete, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
@decorators.idempotent_id('ea3c1b9e-5ed7-4d63-84ce-2032c3bc6d27')
def test_delete_policy(self):
# Verify resp of policy delete API
res = self.client.delete_obj('profiles', self.profile['id'])
self.assertEqual(204, res['status'])
self.assertIsNone(res['body'])
|
<commit_before><commit_msg>Add API tests for profile delete
Add API tests for profile delete
Change-Id: I47a733ce203ba76fc57080a1b086661720e83930<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestProfileDelete(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestProfileDelete, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
@decorators.idempotent_id('ea3c1b9e-5ed7-4d63-84ce-2032c3bc6d27')
def test_delete_policy(self):
# Verify resp of policy delete API
res = self.client.delete_obj('profiles', self.profile['id'])
self.assertEqual(204, res['status'])
self.assertIsNone(res['body'])
|
|
8ef4bb66c77fd0643b5d5a0b99290da05bcdce47
|
senlin/tests/tempest/api/receivers/test_receiver_list.py
|
senlin/tests/tempest/api/receivers/test_receiver_list.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestReceiverList(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestReceiverList, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
# Create a test cluster
cls.cluster = cls.create_test_cluster(cls.profile['id'],
0, min_size=0, max_size=-1)
# Create receiver
cls.receiver = cls.create_receiver(cls.cluster['id'],
'CLUSTER_RESIZE', 'webhook')
@classmethod
def resource_cleanup(cls):
# Delete receiver
cls.client.delete_obj('receivers', cls.receiver['id'])
# Delete test cluster
cls.delete_test_cluster(cls.cluster['id'])
# Delete profile
cls.delete_profile(cls.profile['id'])
super(TestReceiverList, cls).resource_cleanup()
@decorators.idempotent_id('e5cedce0-9240-45ea-90d7-692be5058aac')
def test_list_receiver(self):
res = self.client.list_objs('receivers')
self.assertEqual(200, res['status'])
self.assertIsNone(res['location'])
self.assertIsNotNone(res['body'])
receivers = res['body']
ids = []
for receiver in receivers:
for key in ['action', 'actor', 'channel', 'cluster_id',
'created_at', 'domain', 'id', 'name', 'params',
'project', 'type', 'updated_at', 'user']:
self.assertIn(key, receiver)
ids.append(receiver['id'])
self.assertIn(self.receiver['id'], ids)
|
Add API test for receiver list
|
Add API test for receiver list
Add API test for receiver list
Change-Id: I45d63410dc3a8cf361fe2ca446439abd0e9c2cec
|
Python
|
apache-2.0
|
openstack/senlin,openstack/senlin,stackforge/senlin,openstack/senlin,stackforge/senlin
|
Add API test for receiver list
Add API test for receiver list
Change-Id: I45d63410dc3a8cf361fe2ca446439abd0e9c2cec
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestReceiverList(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestReceiverList, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
# Create a test cluster
cls.cluster = cls.create_test_cluster(cls.profile['id'],
0, min_size=0, max_size=-1)
# Create receiver
cls.receiver = cls.create_receiver(cls.cluster['id'],
'CLUSTER_RESIZE', 'webhook')
@classmethod
def resource_cleanup(cls):
# Delete receiver
cls.client.delete_obj('receivers', cls.receiver['id'])
# Delete test cluster
cls.delete_test_cluster(cls.cluster['id'])
# Delete profile
cls.delete_profile(cls.profile['id'])
super(TestReceiverList, cls).resource_cleanup()
@decorators.idempotent_id('e5cedce0-9240-45ea-90d7-692be5058aac')
def test_list_receiver(self):
res = self.client.list_objs('receivers')
self.assertEqual(200, res['status'])
self.assertIsNone(res['location'])
self.assertIsNotNone(res['body'])
receivers = res['body']
ids = []
for receiver in receivers:
for key in ['action', 'actor', 'channel', 'cluster_id',
'created_at', 'domain', 'id', 'name', 'params',
'project', 'type', 'updated_at', 'user']:
self.assertIn(key, receiver)
ids.append(receiver['id'])
self.assertIn(self.receiver['id'], ids)
|
<commit_before><commit_msg>Add API test for receiver list
Add API test for receiver list
Change-Id: I45d63410dc3a8cf361fe2ca446439abd0e9c2cec<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestReceiverList(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestReceiverList, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
# Create a test cluster
cls.cluster = cls.create_test_cluster(cls.profile['id'],
0, min_size=0, max_size=-1)
# Create receiver
cls.receiver = cls.create_receiver(cls.cluster['id'],
'CLUSTER_RESIZE', 'webhook')
@classmethod
def resource_cleanup(cls):
# Delete receiver
cls.client.delete_obj('receivers', cls.receiver['id'])
# Delete test cluster
cls.delete_test_cluster(cls.cluster['id'])
# Delete profile
cls.delete_profile(cls.profile['id'])
super(TestReceiverList, cls).resource_cleanup()
@decorators.idempotent_id('e5cedce0-9240-45ea-90d7-692be5058aac')
def test_list_receiver(self):
res = self.client.list_objs('receivers')
self.assertEqual(200, res['status'])
self.assertIsNone(res['location'])
self.assertIsNotNone(res['body'])
receivers = res['body']
ids = []
for receiver in receivers:
for key in ['action', 'actor', 'channel', 'cluster_id',
'created_at', 'domain', 'id', 'name', 'params',
'project', 'type', 'updated_at', 'user']:
self.assertIn(key, receiver)
ids.append(receiver['id'])
self.assertIn(self.receiver['id'], ids)
|
Add API test for receiver list
Add API test for receiver list
Change-Id: I45d63410dc3a8cf361fe2ca446439abd0e9c2cec# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestReceiverList(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestReceiverList, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
# Create a test cluster
cls.cluster = cls.create_test_cluster(cls.profile['id'],
0, min_size=0, max_size=-1)
# Create receiver
cls.receiver = cls.create_receiver(cls.cluster['id'],
'CLUSTER_RESIZE', 'webhook')
@classmethod
def resource_cleanup(cls):
# Delete receiver
cls.client.delete_obj('receivers', cls.receiver['id'])
# Delete test cluster
cls.delete_test_cluster(cls.cluster['id'])
# Delete profile
cls.delete_profile(cls.profile['id'])
super(TestReceiverList, cls).resource_cleanup()
@decorators.idempotent_id('e5cedce0-9240-45ea-90d7-692be5058aac')
def test_list_receiver(self):
res = self.client.list_objs('receivers')
self.assertEqual(200, res['status'])
self.assertIsNone(res['location'])
self.assertIsNotNone(res['body'])
receivers = res['body']
ids = []
for receiver in receivers:
for key in ['action', 'actor', 'channel', 'cluster_id',
'created_at', 'domain', 'id', 'name', 'params',
'project', 'type', 'updated_at', 'user']:
self.assertIn(key, receiver)
ids.append(receiver['id'])
self.assertIn(self.receiver['id'], ids)
|
<commit_before><commit_msg>Add API test for receiver list
Add API test for receiver list
Change-Id: I45d63410dc3a8cf361fe2ca446439abd0e9c2cec<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestReceiverList(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestReceiverList, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
# Create a test cluster
cls.cluster = cls.create_test_cluster(cls.profile['id'],
0, min_size=0, max_size=-1)
# Create receiver
cls.receiver = cls.create_receiver(cls.cluster['id'],
'CLUSTER_RESIZE', 'webhook')
@classmethod
def resource_cleanup(cls):
# Delete receiver
cls.client.delete_obj('receivers', cls.receiver['id'])
# Delete test cluster
cls.delete_test_cluster(cls.cluster['id'])
# Delete profile
cls.delete_profile(cls.profile['id'])
super(TestReceiverList, cls).resource_cleanup()
@decorators.idempotent_id('e5cedce0-9240-45ea-90d7-692be5058aac')
def test_list_receiver(self):
res = self.client.list_objs('receivers')
self.assertEqual(200, res['status'])
self.assertIsNone(res['location'])
self.assertIsNotNone(res['body'])
receivers = res['body']
ids = []
for receiver in receivers:
for key in ['action', 'actor', 'channel', 'cluster_id',
'created_at', 'domain', 'id', 'name', 'params',
'project', 'type', 'updated_at', 'user']:
self.assertIn(key, receiver)
ids.append(receiver['id'])
self.assertIn(self.receiver['id'], ids)
|
|
795d22c113951e45803baadde2c4c95ba05de4c9
|
s2protocol/compat.py
|
s2protocol/compat.py
|
from six import PY3, binary_type
__all__ = 'byte_to_int',
def byte_to_int(x):
if PY3 and isinstance(x, binary_type):
return x
else:
return ord(x)
|
Replace `ord` depends on Python version
|
Replace `ord` depends on Python version
|
Python
|
mit
|
Blizzard/s2protocol
|
Replace `ord` depends on Python version
|
from six import PY3, binary_type
__all__ = 'byte_to_int',
def byte_to_int(x):
if PY3 and isinstance(x, binary_type):
return x
else:
return ord(x)
|
<commit_before><commit_msg>Replace `ord` depends on Python version<commit_after>
|
from six import PY3, binary_type
__all__ = 'byte_to_int',
def byte_to_int(x):
if PY3 and isinstance(x, binary_type):
return x
else:
return ord(x)
|
Replace `ord` depends on Python versionfrom six import PY3, binary_type
__all__ = 'byte_to_int',
def byte_to_int(x):
if PY3 and isinstance(x, binary_type):
return x
else:
return ord(x)
|
<commit_before><commit_msg>Replace `ord` depends on Python version<commit_after>from six import PY3, binary_type
__all__ = 'byte_to_int',
def byte_to_int(x):
if PY3 and isinstance(x, binary_type):
return x
else:
return ord(x)
|
|
0086ac0d96e2093fa56b9a6ccc26b38d9fe30497
|
chassis/test/services/data_context_test.py
|
chassis/test/services/data_context_test.py
|
import unittest
from chassis.services import data_context
from chassis.test.services.foo_context import FooContext
class UnextendedDatasourceContextTest(unittest.TestCase):
"""Unit test that an unextended DatasourceContext
methods raise NotImplementedErrors
"""
def test_insert_raises_error(self):
"""insert should raise a NotImplementedError"""
context = data_context.DatasourceContext()
with self.assertRaises(NotImplementedError):
context.insert('query', ['params'])
def test_get_connection_raises_error(self):
"""_get_connection should raise a NotImplementedError"""
context = data_context.DatasourceContext()
with self.assertRaises(NotImplementedError):
context._get_connection()
def test_close_connection_raises_error(self):
"""_close_connection should raise a NotImplementedError"""
context = data_context.DatasourceContext()
with self.assertRaises(NotImplementedError):
context._close_connection()
def test_enter_raises_error(self):
"""__enter__ should raise a NotImplementedError"""
context = data_context.DatasourceContext()
with self.assertRaises(NotImplementedError):
with context as connection:
self.assertFalse('This statement should not be executed')
|
Add unit test for DatasourceContext
|
Add unit test for DatasourceContext
|
Python
|
mit
|
refinery29/chassis,refinery29/chassis
|
Add unit test for DatasourceContext
|
import unittest
from chassis.services import data_context
from chassis.test.services.foo_context import FooContext
class UnextendedDatasourceContextTest(unittest.TestCase):
"""Unit test that an unextended DatasourceContext
methods raise NotImplementedErrors
"""
def test_insert_raises_error(self):
"""insert should raise a NotImplementedError"""
context = data_context.DatasourceContext()
with self.assertRaises(NotImplementedError):
context.insert('query', ['params'])
def test_get_connection_raises_error(self):
"""_get_connection should raise a NotImplementedError"""
context = data_context.DatasourceContext()
with self.assertRaises(NotImplementedError):
context._get_connection()
def test_close_connection_raises_error(self):
"""_close_connection should raise a NotImplementedError"""
context = data_context.DatasourceContext()
with self.assertRaises(NotImplementedError):
context._close_connection()
def test_enter_raises_error(self):
"""__enter__ should raise a NotImplementedError"""
context = data_context.DatasourceContext()
with self.assertRaises(NotImplementedError):
with context as connection:
self.assertFalse('This statement should not be executed')
|
<commit_before><commit_msg>Add unit test for DatasourceContext<commit_after>
|
import unittest
from chassis.services import data_context
from chassis.test.services.foo_context import FooContext
class UnextendedDatasourceContextTest(unittest.TestCase):
"""Unit test that an unextended DatasourceContext
methods raise NotImplementedErrors
"""
def test_insert_raises_error(self):
"""insert should raise a NotImplementedError"""
context = data_context.DatasourceContext()
with self.assertRaises(NotImplementedError):
context.insert('query', ['params'])
def test_get_connection_raises_error(self):
"""_get_connection should raise a NotImplementedError"""
context = data_context.DatasourceContext()
with self.assertRaises(NotImplementedError):
context._get_connection()
def test_close_connection_raises_error(self):
"""_close_connection should raise a NotImplementedError"""
context = data_context.DatasourceContext()
with self.assertRaises(NotImplementedError):
context._close_connection()
def test_enter_raises_error(self):
"""__enter__ should raise a NotImplementedError"""
context = data_context.DatasourceContext()
with self.assertRaises(NotImplementedError):
with context as connection:
self.assertFalse('This statement should not be executed')
|
Add unit test for DatasourceContextimport unittest
from chassis.services import data_context
from chassis.test.services.foo_context import FooContext
class UnextendedDatasourceContextTest(unittest.TestCase):
"""Unit test that an unextended DatasourceContext
methods raise NotImplementedErrors
"""
def test_insert_raises_error(self):
"""insert should raise a NotImplementedError"""
context = data_context.DatasourceContext()
with self.assertRaises(NotImplementedError):
context.insert('query', ['params'])
def test_get_connection_raises_error(self):
"""_get_connection should raise a NotImplementedError"""
context = data_context.DatasourceContext()
with self.assertRaises(NotImplementedError):
context._get_connection()
def test_close_connection_raises_error(self):
"""_close_connection should raise a NotImplementedError"""
context = data_context.DatasourceContext()
with self.assertRaises(NotImplementedError):
context._close_connection()
def test_enter_raises_error(self):
"""__enter__ should raise a NotImplementedError"""
context = data_context.DatasourceContext()
with self.assertRaises(NotImplementedError):
with context as connection:
self.assertFalse('This statement should not be executed')
|
<commit_before><commit_msg>Add unit test for DatasourceContext<commit_after>import unittest
from chassis.services import data_context
from chassis.test.services.foo_context import FooContext
class UnextendedDatasourceContextTest(unittest.TestCase):
"""Unit test that an unextended DatasourceContext
methods raise NotImplementedErrors
"""
def test_insert_raises_error(self):
"""insert should raise a NotImplementedError"""
context = data_context.DatasourceContext()
with self.assertRaises(NotImplementedError):
context.insert('query', ['params'])
def test_get_connection_raises_error(self):
"""_get_connection should raise a NotImplementedError"""
context = data_context.DatasourceContext()
with self.assertRaises(NotImplementedError):
context._get_connection()
def test_close_connection_raises_error(self):
"""_close_connection should raise a NotImplementedError"""
context = data_context.DatasourceContext()
with self.assertRaises(NotImplementedError):
context._close_connection()
def test_enter_raises_error(self):
"""__enter__ should raise a NotImplementedError"""
context = data_context.DatasourceContext()
with self.assertRaises(NotImplementedError):
with context as connection:
self.assertFalse('This statement should not be executed')
|
|
61f93dabbf740572b2e7a415198a68e6a165cdeb
|
src/pybel_tools/assess_completeness.py
|
src/pybel_tools/assess_completeness.py
|
# -*- coding: utf-8 -*-
"""Assess the completeness of a graph.
Run on CONIB with ``python -m pybel_tools.assess_completeness [PMID]``.
"""
import logging
import click
from indra.sources.indra_db_rest.api import get_statements_for_paper
from indra.sources.indra_db_rest.util import logger as indra_logger
from pybel import BELGraph, from_indra_statements
__all__ = [
'assess_completeness',
]
def assess_completeness(ids, graph: BELGraph, verbose_indra_logger: bool = False):
"""Check INDRA if the given document has new interesting content compared to the graph.
:param ids: A CURIE (e.g., pmid:30606258, pmc:PMC6318896),
a pair of database/identifier (e.g. `('pmid', '30606258')`) or a list of pairs.
:param graph: A BEL graph
"""
if not verbose_indra_logger:
indra_logger.setLevel(logging.WARNING)
if isinstance(ids, str):
ids = [ids.split(':')]
elif isinstance(ids, tuple):
ids = [ids]
# Normalize PMC database name as well as stringify all identifiers
ids = [
('pmcid' if db == 'pmc' else db, str(db_id))
for db, db_id in ids
]
stmts = get_statements_for_paper(ids=ids)
indra_graph = from_indra_statements(stmts)
indra_nodes = set(indra_graph)
if not indra_nodes:
print(f'INDRA did not return any results for {ids}')
return False
query_nodes = set(graph)
new_nodes = indra_nodes - query_nodes
print(f"""
Graph had:
Total nodes: {len(query_nodes)}
INDRA found (in {ids}):
Total nodes: {len(indra_nodes)}
New nodes: {len(new_nodes)} ({len(new_nodes) / len(indra_nodes):.2%})
""")
return True
@click.command()
@click.argument('pmid')
@click.option('-v', '--verbose', is_flag=True)
def main(pmid: str, verbose: bool) -> None:
"""Check CONIB for added value of a given article.
Example: 30606258 for paper entitled "A pathogenic tau fragment compromises microtubules,
disrupts insulin signaling and induces the unfolded protein response."
"""
import hbp_knowledge
graph = hbp_knowledge.get_graph()
assess_completeness(('pmid', pmid), graph, verbose_indra_logger=verbose)
if __name__ == '__main__':
main()
|
Add outline of INDRA-based graph completeness checker
|
Add outline of INDRA-based graph completeness checker
This is what we talked about @sgebel. This can be modified to give name-based checking instead of node-based checking as well as relation-level checking between nodes
|
Python
|
mit
|
pybel/pybel-tools,pybel/pybel-tools,pybel/pybel-tools
|
Add outline of INDRA-based graph completeness checker
This is what we talked about @sgebel. This can be modified to give name-based checking instead of node-based checking as well as relation-level checking between nodes
|
# -*- coding: utf-8 -*-
"""Assess the completeness of a graph.
Run on CONIB with ``python -m pybel_tools.assess_completeness [PMID]``.
"""
import logging
import click
from indra.sources.indra_db_rest.api import get_statements_for_paper
from indra.sources.indra_db_rest.util import logger as indra_logger
from pybel import BELGraph, from_indra_statements
__all__ = [
'assess_completeness',
]
def assess_completeness(ids, graph: BELGraph, verbose_indra_logger: bool = False):
"""Check INDRA if the given document has new interesting content compared to the graph.
:param ids: A CURIE (e.g., pmid:30606258, pmc:PMC6318896),
a pair of database/identifier (e.g. `('pmid', '30606258')`) or a list of pairs.
:param graph: A BEL graph
"""
if not verbose_indra_logger:
indra_logger.setLevel(logging.WARNING)
if isinstance(ids, str):
ids = [ids.split(':')]
elif isinstance(ids, tuple):
ids = [ids]
# Normalize PMC database name as well as stringify all identifiers
ids = [
('pmcid' if db == 'pmc' else db, str(db_id))
for db, db_id in ids
]
stmts = get_statements_for_paper(ids=ids)
indra_graph = from_indra_statements(stmts)
indra_nodes = set(indra_graph)
if not indra_nodes:
print(f'INDRA did not return any results for {ids}')
return False
query_nodes = set(graph)
new_nodes = indra_nodes - query_nodes
print(f"""
Graph had:
Total nodes: {len(query_nodes)}
INDRA found (in {ids}):
Total nodes: {len(indra_nodes)}
New nodes: {len(new_nodes)} ({len(new_nodes) / len(indra_nodes):.2%})
""")
return True
@click.command()
@click.argument('pmid')
@click.option('-v', '--verbose', is_flag=True)
def main(pmid: str, verbose: bool) -> None:
"""Check CONIB for added value of a given article.
Example: 30606258 for paper entitled "A pathogenic tau fragment compromises microtubules,
disrupts insulin signaling and induces the unfolded protein response."
"""
import hbp_knowledge
graph = hbp_knowledge.get_graph()
assess_completeness(('pmid', pmid), graph, verbose_indra_logger=verbose)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add outline of INDRA-based graph completeness checker
This is what we talked about @sgebel. This can be modified to give name-based checking instead of node-based checking as well as relation-level checking between nodes<commit_after>
|
# -*- coding: utf-8 -*-
"""Assess the completeness of a graph.
Run on CONIB with ``python -m pybel_tools.assess_completeness [PMID]``.
"""
import logging
import click
from indra.sources.indra_db_rest.api import get_statements_for_paper
from indra.sources.indra_db_rest.util import logger as indra_logger
from pybel import BELGraph, from_indra_statements
__all__ = [
'assess_completeness',
]
def assess_completeness(ids, graph: BELGraph, verbose_indra_logger: bool = False):
"""Check INDRA if the given document has new interesting content compared to the graph.
:param ids: A CURIE (e.g., pmid:30606258, pmc:PMC6318896),
a pair of database/identifier (e.g. `('pmid', '30606258')`) or a list of pairs.
:param graph: A BEL graph
"""
if not verbose_indra_logger:
indra_logger.setLevel(logging.WARNING)
if isinstance(ids, str):
ids = [ids.split(':')]
elif isinstance(ids, tuple):
ids = [ids]
# Normalize PMC database name as well as stringify all identifiers
ids = [
('pmcid' if db == 'pmc' else db, str(db_id))
for db, db_id in ids
]
stmts = get_statements_for_paper(ids=ids)
indra_graph = from_indra_statements(stmts)
indra_nodes = set(indra_graph)
if not indra_nodes:
print(f'INDRA did not return any results for {ids}')
return False
query_nodes = set(graph)
new_nodes = indra_nodes - query_nodes
print(f"""
Graph had:
Total nodes: {len(query_nodes)}
INDRA found (in {ids}):
Total nodes: {len(indra_nodes)}
New nodes: {len(new_nodes)} ({len(new_nodes) / len(indra_nodes):.2%})
""")
return True
@click.command()
@click.argument('pmid')
@click.option('-v', '--verbose', is_flag=True)
def main(pmid: str, verbose: bool) -> None:
"""Check CONIB for added value of a given article.
Example: 30606258 for paper entitled "A pathogenic tau fragment compromises microtubules,
disrupts insulin signaling and induces the unfolded protein response."
"""
import hbp_knowledge
graph = hbp_knowledge.get_graph()
assess_completeness(('pmid', pmid), graph, verbose_indra_logger=verbose)
if __name__ == '__main__':
main()
|
Add outline of INDRA-based graph completeness checker
This is what we talked about @sgebel. This can be modified to give name-based checking instead of node-based checking as well as relation-level checking between nodes# -*- coding: utf-8 -*-
"""Assess the completeness of a graph.
Run on CONIB with ``python -m pybel_tools.assess_completeness [PMID]``.
"""
import logging
import click
from indra.sources.indra_db_rest.api import get_statements_for_paper
from indra.sources.indra_db_rest.util import logger as indra_logger
from pybel import BELGraph, from_indra_statements
__all__ = [
'assess_completeness',
]
def assess_completeness(ids, graph: BELGraph, verbose_indra_logger: bool = False):
"""Check INDRA if the given document has new interesting content compared to the graph.
:param ids: A CURIE (e.g., pmid:30606258, pmc:PMC6318896),
a pair of database/identifier (e.g. `('pmid', '30606258')`) or a list of pairs.
:param graph: A BEL graph
"""
if not verbose_indra_logger:
indra_logger.setLevel(logging.WARNING)
if isinstance(ids, str):
ids = [ids.split(':')]
elif isinstance(ids, tuple):
ids = [ids]
# Normalize PMC database name as well as stringify all identifiers
ids = [
('pmcid' if db == 'pmc' else db, str(db_id))
for db, db_id in ids
]
stmts = get_statements_for_paper(ids=ids)
indra_graph = from_indra_statements(stmts)
indra_nodes = set(indra_graph)
if not indra_nodes:
print(f'INDRA did not return any results for {ids}')
return False
query_nodes = set(graph)
new_nodes = indra_nodes - query_nodes
print(f"""
Graph had:
Total nodes: {len(query_nodes)}
INDRA found (in {ids}):
Total nodes: {len(indra_nodes)}
New nodes: {len(new_nodes)} ({len(new_nodes) / len(indra_nodes):.2%})
""")
return True
@click.command()
@click.argument('pmid')
@click.option('-v', '--verbose', is_flag=True)
def main(pmid: str, verbose: bool) -> None:
"""Check CONIB for added value of a given article.
Example: 30606258 for paper entitled "A pathogenic tau fragment compromises microtubules,
disrupts insulin signaling and induces the unfolded protein response."
"""
import hbp_knowledge
graph = hbp_knowledge.get_graph()
assess_completeness(('pmid', pmid), graph, verbose_indra_logger=verbose)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add outline of INDRA-based graph completeness checker
This is what we talked about @sgebel. This can be modified to give name-based checking instead of node-based checking as well as relation-level checking between nodes<commit_after># -*- coding: utf-8 -*-
"""Assess the completeness of a graph.
Run on CONIB with ``python -m pybel_tools.assess_completeness [PMID]``.
"""
import logging
import click
from indra.sources.indra_db_rest.api import get_statements_for_paper
from indra.sources.indra_db_rest.util import logger as indra_logger
from pybel import BELGraph, from_indra_statements
__all__ = [
'assess_completeness',
]
def assess_completeness(ids, graph: BELGraph, verbose_indra_logger: bool = False):
"""Check INDRA if the given document has new interesting content compared to the graph.
:param ids: A CURIE (e.g., pmid:30606258, pmc:PMC6318896),
a pair of database/identifier (e.g. `('pmid', '30606258')`) or a list of pairs.
:param graph: A BEL graph
"""
if not verbose_indra_logger:
indra_logger.setLevel(logging.WARNING)
if isinstance(ids, str):
ids = [ids.split(':')]
elif isinstance(ids, tuple):
ids = [ids]
# Normalize PMC database name as well as stringify all identifiers
ids = [
('pmcid' if db == 'pmc' else db, str(db_id))
for db, db_id in ids
]
stmts = get_statements_for_paper(ids=ids)
indra_graph = from_indra_statements(stmts)
indra_nodes = set(indra_graph)
if not indra_nodes:
print(f'INDRA did not return any results for {ids}')
return False
query_nodes = set(graph)
new_nodes = indra_nodes - query_nodes
print(f"""
Graph had:
Total nodes: {len(query_nodes)}
INDRA found (in {ids}):
Total nodes: {len(indra_nodes)}
New nodes: {len(new_nodes)} ({len(new_nodes) / len(indra_nodes):.2%})
""")
return True
@click.command()
@click.argument('pmid')
@click.option('-v', '--verbose', is_flag=True)
def main(pmid: str, verbose: bool) -> None:
"""Check CONIB for added value of a given article.
Example: 30606258 for paper entitled "A pathogenic tau fragment compromises microtubules,
disrupts insulin signaling and induces the unfolded protein response."
"""
import hbp_knowledge
graph = hbp_knowledge.get_graph()
assess_completeness(('pmid', pmid), graph, verbose_indra_logger=verbose)
if __name__ == '__main__':
main()
|
|
3e6a321ff92c138bf901e7511ecbb166e74b3007
|
apps/blog/license_urls.py
|
apps/blog/license_urls.py
|
"""
URLCONF for the blog app.
"""
from django.conf.urls import url
from . import views, feeds
# URL patterns configuration
urlpatterns = (
# License index page
url(r'^(?P<slug>[-a-zA-Z0-9_]+)/$', views.license_detail, name='license_detail'),
# Related articles feed
url(r'^(?P<slug>[-a-zA-Z0-9_]+)/flux/$', feeds.LatestArticlesFeed(), name='latest_license_articles_rss'),
url(r'^(?P<slug>[-a-zA-Z0-9_]+)/flux/atom/$', feeds.LatestArticlesAtomFeed(), name='latest_license_articles_atom'),
)
|
Add special urls for license (add-on)
|
Add special urls for license (add-on)
|
Python
|
agpl-3.0
|
TamiaLab/carnetdumaker,TamiaLab/carnetdumaker,TamiaLab/carnetdumaker,TamiaLab/carnetdumaker
|
Add special urls for license (add-on)
|
"""
URLCONF for the blog app.
"""
from django.conf.urls import url
from . import views, feeds
# URL patterns configuration
urlpatterns = (
# License index page
url(r'^(?P<slug>[-a-zA-Z0-9_]+)/$', views.license_detail, name='license_detail'),
# Related articles feed
url(r'^(?P<slug>[-a-zA-Z0-9_]+)/flux/$', feeds.LatestArticlesFeed(), name='latest_license_articles_rss'),
url(r'^(?P<slug>[-a-zA-Z0-9_]+)/flux/atom/$', feeds.LatestArticlesAtomFeed(), name='latest_license_articles_atom'),
)
|
<commit_before><commit_msg>Add special urls for license (add-on)<commit_after>
|
"""
URLCONF for the blog app.
"""
from django.conf.urls import url
from . import views, feeds
# URL patterns configuration
urlpatterns = (
# License index page
url(r'^(?P<slug>[-a-zA-Z0-9_]+)/$', views.license_detail, name='license_detail'),
# Related articles feed
url(r'^(?P<slug>[-a-zA-Z0-9_]+)/flux/$', feeds.LatestArticlesFeed(), name='latest_license_articles_rss'),
url(r'^(?P<slug>[-a-zA-Z0-9_]+)/flux/atom/$', feeds.LatestArticlesAtomFeed(), name='latest_license_articles_atom'),
)
|
Add special urls for license (add-on)"""
URLCONF for the blog app.
"""
from django.conf.urls import url
from . import views, feeds
# URL patterns configuration
urlpatterns = (
# License index page
url(r'^(?P<slug>[-a-zA-Z0-9_]+)/$', views.license_detail, name='license_detail'),
# Related articles feed
url(r'^(?P<slug>[-a-zA-Z0-9_]+)/flux/$', feeds.LatestArticlesFeed(), name='latest_license_articles_rss'),
url(r'^(?P<slug>[-a-zA-Z0-9_]+)/flux/atom/$', feeds.LatestArticlesAtomFeed(), name='latest_license_articles_atom'),
)
|
<commit_before><commit_msg>Add special urls for license (add-on)<commit_after>"""
URLCONF for the blog app.
"""
from django.conf.urls import url
from . import views, feeds
# URL patterns configuration
urlpatterns = (
# License index page
url(r'^(?P<slug>[-a-zA-Z0-9_]+)/$', views.license_detail, name='license_detail'),
# Related articles feed
url(r'^(?P<slug>[-a-zA-Z0-9_]+)/flux/$', feeds.LatestArticlesFeed(), name='latest_license_articles_rss'),
url(r'^(?P<slug>[-a-zA-Z0-9_]+)/flux/atom/$', feeds.LatestArticlesAtomFeed(), name='latest_license_articles_atom'),
)
|
|
76f037ee6aa4fb54cf483f44f0a8dfa098d1f57c
|
tests/test_grid_sampling.py
|
tests/test_grid_sampling.py
|
from parcels import Grid
import numpy as np
import pytest
@pytest.fixture
def grid(xdim=200, ydim=100):
""" Standard grid spanning the earth's coordinates with U and V
equivalent to longitude and latitude.
"""
lon = np.linspace(-180, 180, xdim, dtype=np.float32)
lat = np.linspace(-90, 90, ydim, dtype=np.float32)
depth = np.zeros(1, dtype=np.float32)
time = np.zeros(1, dtype=np.float64)
U, V = np.meshgrid(lat, lon)
return Grid.from_data(np.array(U, dtype=np.float32), lon, lat,
np.array(V, dtype=np.float32), lon, lat,
depth, time)
def test_grid_sample(grid, xdim=120, ydim=80):
""" Sample the grid using indexing notation. """
lon = np.linspace(-170, 170, xdim, dtype=np.float32)
lat = np.linspace(-80, 80, ydim, dtype=np.float32)
v_s = np.array([grid.V[0, x, 70.] for x in lon])
u_s = np.array([grid.U[0, -45., y] for y in lat])
assert np.allclose(v_s, lon, rtol=1e-12)
assert np.allclose(u_s, lat, rtol=1e-12)
def test_grid_sample_eval(grid, xdim=60, ydim=60):
""" Sample the grid using the explicit eval function. """
lon = np.linspace(-170, 170, xdim, dtype=np.float32)
lat = np.linspace(-80, 80, ydim, dtype=np.float32)
v_s = np.array([grid.V.eval(0, x, 70.) for x in lon])
u_s = np.array([grid.U.eval(0, -45., y) for y in lat])
assert np.allclose(v_s, lon, rtol=1e-12)
assert np.allclose(u_s, lat, rtol=1e-12)
|
Add a simple set of tests for explicit grid sampling
|
Grid: Add a simple set of tests for explicit grid sampling
|
Python
|
mit
|
OceanPARCELS/parcels,OceanPARCELS/parcels
|
Grid: Add a simple set of tests for explicit grid sampling
|
from parcels import Grid
import numpy as np
import pytest
@pytest.fixture
def grid(xdim=200, ydim=100):
""" Standard grid spanning the earth's coordinates with U and V
equivalent to longitude and latitude.
"""
lon = np.linspace(-180, 180, xdim, dtype=np.float32)
lat = np.linspace(-90, 90, ydim, dtype=np.float32)
depth = np.zeros(1, dtype=np.float32)
time = np.zeros(1, dtype=np.float64)
U, V = np.meshgrid(lat, lon)
return Grid.from_data(np.array(U, dtype=np.float32), lon, lat,
np.array(V, dtype=np.float32), lon, lat,
depth, time)
def test_grid_sample(grid, xdim=120, ydim=80):
""" Sample the grid using indexing notation. """
lon = np.linspace(-170, 170, xdim, dtype=np.float32)
lat = np.linspace(-80, 80, ydim, dtype=np.float32)
v_s = np.array([grid.V[0, x, 70.] for x in lon])
u_s = np.array([grid.U[0, -45., y] for y in lat])
assert np.allclose(v_s, lon, rtol=1e-12)
assert np.allclose(u_s, lat, rtol=1e-12)
def test_grid_sample_eval(grid, xdim=60, ydim=60):
""" Sample the grid using the explicit eval function. """
lon = np.linspace(-170, 170, xdim, dtype=np.float32)
lat = np.linspace(-80, 80, ydim, dtype=np.float32)
v_s = np.array([grid.V.eval(0, x, 70.) for x in lon])
u_s = np.array([grid.U.eval(0, -45., y) for y in lat])
assert np.allclose(v_s, lon, rtol=1e-12)
assert np.allclose(u_s, lat, rtol=1e-12)
|
<commit_before><commit_msg>Grid: Add a simple set of tests for explicit grid sampling<commit_after>
|
from parcels import Grid
import numpy as np
import pytest
@pytest.fixture
def grid(xdim=200, ydim=100):
""" Standard grid spanning the earth's coordinates with U and V
equivalent to longitude and latitude.
"""
lon = np.linspace(-180, 180, xdim, dtype=np.float32)
lat = np.linspace(-90, 90, ydim, dtype=np.float32)
depth = np.zeros(1, dtype=np.float32)
time = np.zeros(1, dtype=np.float64)
U, V = np.meshgrid(lat, lon)
return Grid.from_data(np.array(U, dtype=np.float32), lon, lat,
np.array(V, dtype=np.float32), lon, lat,
depth, time)
def test_grid_sample(grid, xdim=120, ydim=80):
""" Sample the grid using indexing notation. """
lon = np.linspace(-170, 170, xdim, dtype=np.float32)
lat = np.linspace(-80, 80, ydim, dtype=np.float32)
v_s = np.array([grid.V[0, x, 70.] for x in lon])
u_s = np.array([grid.U[0, -45., y] for y in lat])
assert np.allclose(v_s, lon, rtol=1e-12)
assert np.allclose(u_s, lat, rtol=1e-12)
def test_grid_sample_eval(grid, xdim=60, ydim=60):
""" Sample the grid using the explicit eval function. """
lon = np.linspace(-170, 170, xdim, dtype=np.float32)
lat = np.linspace(-80, 80, ydim, dtype=np.float32)
v_s = np.array([grid.V.eval(0, x, 70.) for x in lon])
u_s = np.array([grid.U.eval(0, -45., y) for y in lat])
assert np.allclose(v_s, lon, rtol=1e-12)
assert np.allclose(u_s, lat, rtol=1e-12)
|
Grid: Add a simple set of tests for explicit grid samplingfrom parcels import Grid
import numpy as np
import pytest
@pytest.fixture
def grid(xdim=200, ydim=100):
""" Standard grid spanning the earth's coordinates with U and V
equivalent to longitude and latitude.
"""
lon = np.linspace(-180, 180, xdim, dtype=np.float32)
lat = np.linspace(-90, 90, ydim, dtype=np.float32)
depth = np.zeros(1, dtype=np.float32)
time = np.zeros(1, dtype=np.float64)
U, V = np.meshgrid(lat, lon)
return Grid.from_data(np.array(U, dtype=np.float32), lon, lat,
np.array(V, dtype=np.float32), lon, lat,
depth, time)
def test_grid_sample(grid, xdim=120, ydim=80):
""" Sample the grid using indexing notation. """
lon = np.linspace(-170, 170, xdim, dtype=np.float32)
lat = np.linspace(-80, 80, ydim, dtype=np.float32)
v_s = np.array([grid.V[0, x, 70.] for x in lon])
u_s = np.array([grid.U[0, -45., y] for y in lat])
assert np.allclose(v_s, lon, rtol=1e-12)
assert np.allclose(u_s, lat, rtol=1e-12)
def test_grid_sample_eval(grid, xdim=60, ydim=60):
""" Sample the grid using the explicit eval function. """
lon = np.linspace(-170, 170, xdim, dtype=np.float32)
lat = np.linspace(-80, 80, ydim, dtype=np.float32)
v_s = np.array([grid.V.eval(0, x, 70.) for x in lon])
u_s = np.array([grid.U.eval(0, -45., y) for y in lat])
assert np.allclose(v_s, lon, rtol=1e-12)
assert np.allclose(u_s, lat, rtol=1e-12)
|
<commit_before><commit_msg>Grid: Add a simple set of tests for explicit grid sampling<commit_after>from parcels import Grid
import numpy as np
import pytest
@pytest.fixture
def grid(xdim=200, ydim=100):
""" Standard grid spanning the earth's coordinates with U and V
equivalent to longitude and latitude.
"""
lon = np.linspace(-180, 180, xdim, dtype=np.float32)
lat = np.linspace(-90, 90, ydim, dtype=np.float32)
depth = np.zeros(1, dtype=np.float32)
time = np.zeros(1, dtype=np.float64)
U, V = np.meshgrid(lat, lon)
return Grid.from_data(np.array(U, dtype=np.float32), lon, lat,
np.array(V, dtype=np.float32), lon, lat,
depth, time)
def test_grid_sample(grid, xdim=120, ydim=80):
""" Sample the grid using indexing notation. """
lon = np.linspace(-170, 170, xdim, dtype=np.float32)
lat = np.linspace(-80, 80, ydim, dtype=np.float32)
v_s = np.array([grid.V[0, x, 70.] for x in lon])
u_s = np.array([grid.U[0, -45., y] for y in lat])
assert np.allclose(v_s, lon, rtol=1e-12)
assert np.allclose(u_s, lat, rtol=1e-12)
def test_grid_sample_eval(grid, xdim=60, ydim=60):
""" Sample the grid using the explicit eval function. """
lon = np.linspace(-170, 170, xdim, dtype=np.float32)
lat = np.linspace(-80, 80, ydim, dtype=np.float32)
v_s = np.array([grid.V.eval(0, x, 70.) for x in lon])
u_s = np.array([grid.U.eval(0, -45., y) for y in lat])
assert np.allclose(v_s, lon, rtol=1e-12)
assert np.allclose(u_s, lat, rtol=1e-12)
|
|
91e9e5a38a0466721de4027e6578c228e4efe799
|
lily/deals/migrations/0021_auto_20160222_1513.py
|
lily/deals/migrations/0021_auto_20160222_1513.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('deals', '0020_auto_20160211_1039'),
]
operations = [
migrations.AlterField(
model_name='deal',
name='currency',
field=models.CharField(max_length=3, verbose_name='currency', choices=[(b'EUR', 'Euro'), (b'GBP', 'British pound'), (b'USD', 'United States dollar'), (b'ZAR', 'South African rand'), (b'NOR', 'Norwegian krone'), (b'DKK', 'Danish krone'), (b'SEK', 'Swedish krone'), (b'CHF', 'Swiss franc')]),
preserve_default=True,
),
]
|
Add migration to deals to add new currency
|
Add migration to deals to add new currency
|
Python
|
agpl-3.0
|
HelloLily/hellolily,HelloLily/hellolily,HelloLily/hellolily,HelloLily/hellolily
|
Add migration to deals to add new currency
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('deals', '0020_auto_20160211_1039'),
]
operations = [
migrations.AlterField(
model_name='deal',
name='currency',
field=models.CharField(max_length=3, verbose_name='currency', choices=[(b'EUR', 'Euro'), (b'GBP', 'British pound'), (b'USD', 'United States dollar'), (b'ZAR', 'South African rand'), (b'NOR', 'Norwegian krone'), (b'DKK', 'Danish krone'), (b'SEK', 'Swedish krone'), (b'CHF', 'Swiss franc')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration to deals to add new currency<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('deals', '0020_auto_20160211_1039'),
]
operations = [
migrations.AlterField(
model_name='deal',
name='currency',
field=models.CharField(max_length=3, verbose_name='currency', choices=[(b'EUR', 'Euro'), (b'GBP', 'British pound'), (b'USD', 'United States dollar'), (b'ZAR', 'South African rand'), (b'NOR', 'Norwegian krone'), (b'DKK', 'Danish krone'), (b'SEK', 'Swedish krone'), (b'CHF', 'Swiss franc')]),
preserve_default=True,
),
]
|
Add migration to deals to add new currency# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('deals', '0020_auto_20160211_1039'),
]
operations = [
migrations.AlterField(
model_name='deal',
name='currency',
field=models.CharField(max_length=3, verbose_name='currency', choices=[(b'EUR', 'Euro'), (b'GBP', 'British pound'), (b'USD', 'United States dollar'), (b'ZAR', 'South African rand'), (b'NOR', 'Norwegian krone'), (b'DKK', 'Danish krone'), (b'SEK', 'Swedish krone'), (b'CHF', 'Swiss franc')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration to deals to add new currency<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('deals', '0020_auto_20160211_1039'),
]
operations = [
migrations.AlterField(
model_name='deal',
name='currency',
field=models.CharField(max_length=3, verbose_name='currency', choices=[(b'EUR', 'Euro'), (b'GBP', 'British pound'), (b'USD', 'United States dollar'), (b'ZAR', 'South African rand'), (b'NOR', 'Norwegian krone'), (b'DKK', 'Danish krone'), (b'SEK', 'Swedish krone'), (b'CHF', 'Swiss franc')]),
preserve_default=True,
),
]
|
|
28407c8761d22c46c302342d2bcd2bf2a1c274b9
|
leetcode/q994/solution.py
|
leetcode/q994/solution.py
|
"""
In a given grid, each cell can have one of three values:
the value 0 representing an empty cell;
the value 1 representing a fresh orange;
the value 2 representing a rotten orange.
Every minute, any fresh orange that is adjacent (4-directionally) to a rotten orange becomes rotten.
Return the minimum number of minutes that must elapse until no cell has a fresh orange. If this is impossible, return -1 instead.
"""
class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
counter = -1
lenX, lenY = len(grid), len(grid[0])
rot = True
while rot:
print(grid, rot)
changed = False
for i in range(lenX):
for j in range(lenY):
if grid[i][j] == 2:
changed = rotFreshNeighbours(i, j, grid) or changed
rot = changed
counter += 1
for i in range(lenX):
for j in range(lenY):
if grid[i][j] == 1:
return -1
return counter
def rotFreshNeighbours(x, y: int, grid: List[List[int]]) -> bool:
rotted = False
neighbours = [[x+1, y], [x-1, y], [x, y+1], [x, y-1]]
for coords in neighbours:
nX, nY = coords
if nX < 0 or nX >= len(grid):
continue
if nY < 0 or nY >= len(grid[0]):
continue
if grid[nX][nY] == 1:
rotted = True
grid[nX][nY] = 2
return rotted
|
Add iterative approach that changes to quickly
|
Add iterative approach that changes to quickly
|
Python
|
mit
|
lemming52/white_pawn,lemming52/white_pawn
|
Add iterative approach that changes to quickly
|
"""
In a given grid, each cell can have one of three values:
the value 0 representing an empty cell;
the value 1 representing a fresh orange;
the value 2 representing a rotten orange.
Every minute, any fresh orange that is adjacent (4-directionally) to a rotten orange becomes rotten.
Return the minimum number of minutes that must elapse until no cell has a fresh orange. If this is impossible, return -1 instead.
"""
class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
counter = -1
lenX, lenY = len(grid), len(grid[0])
rot = True
while rot:
print(grid, rot)
changed = False
for i in range(lenX):
for j in range(lenY):
if grid[i][j] == 2:
changed = rotFreshNeighbours(i, j, grid) or changed
rot = changed
counter += 1
for i in range(lenX):
for j in range(lenY):
if grid[i][j] == 1:
return -1
return counter
def rotFreshNeighbours(x, y: int, grid: List[List[int]]) -> bool:
rotted = False
neighbours = [[x+1, y], [x-1, y], [x, y+1], [x, y-1]]
for coords in neighbours:
nX, nY = coords
if nX < 0 or nX >= len(grid):
continue
if nY < 0 or nY >= len(grid[0]):
continue
if grid[nX][nY] == 1:
rotted = True
grid[nX][nY] = 2
return rotted
|
<commit_before><commit_msg>Add iterative approach that changes to quickly<commit_after>
|
"""
In a given grid, each cell can have one of three values:
the value 0 representing an empty cell;
the value 1 representing a fresh orange;
the value 2 representing a rotten orange.
Every minute, any fresh orange that is adjacent (4-directionally) to a rotten orange becomes rotten.
Return the minimum number of minutes that must elapse until no cell has a fresh orange. If this is impossible, return -1 instead.
"""
class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
counter = -1
lenX, lenY = len(grid), len(grid[0])
rot = True
while rot:
print(grid, rot)
changed = False
for i in range(lenX):
for j in range(lenY):
if grid[i][j] == 2:
changed = rotFreshNeighbours(i, j, grid) or changed
rot = changed
counter += 1
for i in range(lenX):
for j in range(lenY):
if grid[i][j] == 1:
return -1
return counter
def rotFreshNeighbours(x, y: int, grid: List[List[int]]) -> bool:
rotted = False
neighbours = [[x+1, y], [x-1, y], [x, y+1], [x, y-1]]
for coords in neighbours:
nX, nY = coords
if nX < 0 or nX >= len(grid):
continue
if nY < 0 or nY >= len(grid[0]):
continue
if grid[nX][nY] == 1:
rotted = True
grid[nX][nY] = 2
return rotted
|
Add iterative approach that changes to quickly"""
In a given grid, each cell can have one of three values:
the value 0 representing an empty cell;
the value 1 representing a fresh orange;
the value 2 representing a rotten orange.
Every minute, any fresh orange that is adjacent (4-directionally) to a rotten orange becomes rotten.
Return the minimum number of minutes that must elapse until no cell has a fresh orange. If this is impossible, return -1 instead.
"""
class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
counter = -1
lenX, lenY = len(grid), len(grid[0])
rot = True
while rot:
print(grid, rot)
changed = False
for i in range(lenX):
for j in range(lenY):
if grid[i][j] == 2:
changed = rotFreshNeighbours(i, j, grid) or changed
rot = changed
counter += 1
for i in range(lenX):
for j in range(lenY):
if grid[i][j] == 1:
return -1
return counter
def rotFreshNeighbours(x, y: int, grid: List[List[int]]) -> bool:
rotted = False
neighbours = [[x+1, y], [x-1, y], [x, y+1], [x, y-1]]
for coords in neighbours:
nX, nY = coords
if nX < 0 or nX >= len(grid):
continue
if nY < 0 or nY >= len(grid[0]):
continue
if grid[nX][nY] == 1:
rotted = True
grid[nX][nY] = 2
return rotted
|
<commit_before><commit_msg>Add iterative approach that changes to quickly<commit_after>"""
In a given grid, each cell can have one of three values:
the value 0 representing an empty cell;
the value 1 representing a fresh orange;
the value 2 representing a rotten orange.
Every minute, any fresh orange that is adjacent (4-directionally) to a rotten orange becomes rotten.
Return the minimum number of minutes that must elapse until no cell has a fresh orange. If this is impossible, return -1 instead.
"""
class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
counter = -1
lenX, lenY = len(grid), len(grid[0])
rot = True
while rot:
print(grid, rot)
changed = False
for i in range(lenX):
for j in range(lenY):
if grid[i][j] == 2:
changed = rotFreshNeighbours(i, j, grid) or changed
rot = changed
counter += 1
for i in range(lenX):
for j in range(lenY):
if grid[i][j] == 1:
return -1
return counter
def rotFreshNeighbours(x, y: int, grid: List[List[int]]) -> bool:
rotted = False
neighbours = [[x+1, y], [x-1, y], [x, y+1], [x, y-1]]
for coords in neighbours:
nX, nY = coords
if nX < 0 or nX >= len(grid):
continue
if nY < 0 or nY >= len(grid[0]):
continue
if grid[nX][nY] == 1:
rotted = True
grid[nX][nY] = 2
return rotted
|
|
62c24f6edaa91834d4a7b2a3f9b99b8b96322230
|
nova/policies/hide_server_addresses.py
|
nova/policies/hide_server_addresses.py
|
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
BASE_POLICY_NAME = 'os_compute_api:os-hide-server-addresses'
hide_server_addresses_policies = [
policy.RuleDefault(
name=BASE_POLICY_NAME,
check_str='is_admin:False'),
]
def list_rules():
return hide_server_addresses_policies
|
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-hide-server-addresses'
hide_server_addresses_policies = [
base.create_rule_default(
BASE_POLICY_NAME,
'is_admin:False',
"""Hide server's 'addresses' key in the server response.
This set the 'addresses' key in the server response to an empty dictionary
when the server is in a specific set of states as defined in
CONF.api.hide_server_address_states.
By default 'addresses' is hidden only when the server is in 'BUILDING'
state.""",
[
{
'method': 'GET',
'path': '/servers/{id}'
},
{
'method': 'GET',
'path': '/servers/detail'
}
]),
]
def list_rules():
return hide_server_addresses_policies
|
Add policy description for 'os-hide-server-addresses'
|
Add policy description for 'os-hide-server-addresses'
This commit adds policy doc for 'os-hide-server-addresses' policies.
Partial implement blueprint policy-docs
Change-Id: I98edbd8579f052c74283bde2ec4f85d301a0807a
|
Python
|
apache-2.0
|
rahulunair/nova,gooddata/openstack-nova,mikalstill/nova,mahak/nova,Juniper/nova,mahak/nova,mikalstill/nova,Juniper/nova,rahulunair/nova,gooddata/openstack-nova,vmturbo/nova,openstack/nova,Juniper/nova,jianghuaw/nova,openstack/nova,gooddata/openstack-nova,klmitch/nova,gooddata/openstack-nova,jianghuaw/nova,klmitch/nova,klmitch/nova,mahak/nova,vmturbo/nova,klmitch/nova,jianghuaw/nova,phenoxim/nova,mikalstill/nova,vmturbo/nova,openstack/nova,vmturbo/nova,jianghuaw/nova,rahulunair/nova,phenoxim/nova,Juniper/nova
|
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
BASE_POLICY_NAME = 'os_compute_api:os-hide-server-addresses'
hide_server_addresses_policies = [
policy.RuleDefault(
name=BASE_POLICY_NAME,
check_str='is_admin:False'),
]
def list_rules():
return hide_server_addresses_policies
Add policy description for 'os-hide-server-addresses'
This commit adds policy doc for 'os-hide-server-addresses' policies.
Partial implement blueprint policy-docs
Change-Id: I98edbd8579f052c74283bde2ec4f85d301a0807a
|
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-hide-server-addresses'
hide_server_addresses_policies = [
base.create_rule_default(
BASE_POLICY_NAME,
'is_admin:False',
"""Hide server's 'addresses' key in the server response.
This set the 'addresses' key in the server response to an empty dictionary
when the server is in a specific set of states as defined in
CONF.api.hide_server_address_states.
By default 'addresses' is hidden only when the server is in 'BUILDING'
state.""",
[
{
'method': 'GET',
'path': '/servers/{id}'
},
{
'method': 'GET',
'path': '/servers/detail'
}
]),
]
def list_rules():
return hide_server_addresses_policies
|
<commit_before># Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
BASE_POLICY_NAME = 'os_compute_api:os-hide-server-addresses'
hide_server_addresses_policies = [
policy.RuleDefault(
name=BASE_POLICY_NAME,
check_str='is_admin:False'),
]
def list_rules():
return hide_server_addresses_policies
<commit_msg>Add policy description for 'os-hide-server-addresses'
This commit adds policy doc for 'os-hide-server-addresses' policies.
Partial implement blueprint policy-docs
Change-Id: I98edbd8579f052c74283bde2ec4f85d301a0807a<commit_after>
|
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-hide-server-addresses'
hide_server_addresses_policies = [
base.create_rule_default(
BASE_POLICY_NAME,
'is_admin:False',
"""Hide server's 'addresses' key in the server response.
This set the 'addresses' key in the server response to an empty dictionary
when the server is in a specific set of states as defined in
CONF.api.hide_server_address_states.
By default 'addresses' is hidden only when the server is in 'BUILDING'
state.""",
[
{
'method': 'GET',
'path': '/servers/{id}'
},
{
'method': 'GET',
'path': '/servers/detail'
}
]),
]
def list_rules():
return hide_server_addresses_policies
|
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
BASE_POLICY_NAME = 'os_compute_api:os-hide-server-addresses'
hide_server_addresses_policies = [
policy.RuleDefault(
name=BASE_POLICY_NAME,
check_str='is_admin:False'),
]
def list_rules():
return hide_server_addresses_policies
Add policy description for 'os-hide-server-addresses'
This commit adds policy doc for 'os-hide-server-addresses' policies.
Partial implement blueprint policy-docs
Change-Id: I98edbd8579f052c74283bde2ec4f85d301a0807a# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-hide-server-addresses'
hide_server_addresses_policies = [
base.create_rule_default(
BASE_POLICY_NAME,
'is_admin:False',
"""Hide server's 'addresses' key in the server response.
This set the 'addresses' key in the server response to an empty dictionary
when the server is in a specific set of states as defined in
CONF.api.hide_server_address_states.
By default 'addresses' is hidden only when the server is in 'BUILDING'
state.""",
[
{
'method': 'GET',
'path': '/servers/{id}'
},
{
'method': 'GET',
'path': '/servers/detail'
}
]),
]
def list_rules():
return hide_server_addresses_policies
|
<commit_before># Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
BASE_POLICY_NAME = 'os_compute_api:os-hide-server-addresses'
hide_server_addresses_policies = [
policy.RuleDefault(
name=BASE_POLICY_NAME,
check_str='is_admin:False'),
]
def list_rules():
return hide_server_addresses_policies
<commit_msg>Add policy description for 'os-hide-server-addresses'
This commit adds policy doc for 'os-hide-server-addresses' policies.
Partial implement blueprint policy-docs
Change-Id: I98edbd8579f052c74283bde2ec4f85d301a0807a<commit_after># Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-hide-server-addresses'
hide_server_addresses_policies = [
base.create_rule_default(
BASE_POLICY_NAME,
'is_admin:False',
"""Hide server's 'addresses' key in the server response.
This set the 'addresses' key in the server response to an empty dictionary
when the server is in a specific set of states as defined in
CONF.api.hide_server_address_states.
By default 'addresses' is hidden only when the server is in 'BUILDING'
state.""",
[
{
'method': 'GET',
'path': '/servers/{id}'
},
{
'method': 'GET',
'path': '/servers/detail'
}
]),
]
def list_rules():
return hide_server_addresses_policies
|
b846a96e10eaac0bb9e90086198ce1175758adb6
|
cla_backend/apps/cla_eventlog/tests/test_find_and_delete_old_cases.py
|
cla_backend/apps/cla_eventlog/tests/test_find_and_delete_old_cases.py
|
from django.test import TestCase
from django.utils import timezone
from dateutil.relativedelta import relativedelta
from freezegun import freeze_time
from core.tests.mommy_utils import make_recipe
from cla_eventlog.models import Log
from legalaid.models import Case
class FindAndDeleteOldCases(TestCase):
def create_three_year_old_case(self):
case = None
freezer = freeze_time(timezone.now() + relativedelta(years=-3))
freezer.start()
case = make_recipe("legalaid.case")
freezer.stop()
return case
def test_find_cases_viewed_two_years_ago(self):
new_case = make_recipe("legalaid.case")
old_case_1 = self.create_three_year_old_case()
old_case_2 = self.create_three_year_old_case()
self.assertEqual(len(Case.objects.all()), 3)
make_recipe("cla_eventlog.log", case=new_case, code="CASE_VIEWED")
make_recipe("cla_eventlog.log", case=old_case_1, code="CASE_VIEWED")
make_recipe(
"cla_eventlog.log", case=old_case_2, code="CASE_VIEWED", created=timezone.now() + relativedelta(years=-3)
)
self.assertEqual(len(Log.objects.all()), 3)
logs_for_cases_created_two_years_old = Log.objects.filter(
case__created__lte=timezone.now() + relativedelta(years=-2)
)
case_viewed_logs = logs_for_cases_created_two_years_old.filter(code__contains="CASE_VIEWED")
cases_viewed_two_years_ago = case_viewed_logs.filter(created__lte=timezone.now() + relativedelta(years=-2))
self.assertEqual(len(cases_viewed_two_years_ago), 1)
|
Create and find cases that were viewed two years ago
|
Create and find cases that were viewed two years ago
Steps:
1. Create 3 case: one new case and two cases with their 'created' property set to 3 years ago
2. Create an event log for each case with code property 'CASE_VIEWED'
3. The new case and one of the old cases will have the 'created' property for the event log set to right now
4. The other old case has it's 'created' property set to 2 years ago
|
Python
|
mit
|
ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend
|
Create and find cases that were viewed two years ago
Steps:
1. Create 3 case: one new case and two cases with their 'created' property set to 3 years ago
2. Create an event log for each case with code property 'CASE_VIEWED'
3. The new case and one of the old cases will have the 'created' property for the event log set to right now
4. The other old case has it's 'created' property set to 2 years ago
|
from django.test import TestCase
from django.utils import timezone
from dateutil.relativedelta import relativedelta
from freezegun import freeze_time
from core.tests.mommy_utils import make_recipe
from cla_eventlog.models import Log
from legalaid.models import Case
class FindAndDeleteOldCases(TestCase):
def create_three_year_old_case(self):
case = None
freezer = freeze_time(timezone.now() + relativedelta(years=-3))
freezer.start()
case = make_recipe("legalaid.case")
freezer.stop()
return case
def test_find_cases_viewed_two_years_ago(self):
new_case = make_recipe("legalaid.case")
old_case_1 = self.create_three_year_old_case()
old_case_2 = self.create_three_year_old_case()
self.assertEqual(len(Case.objects.all()), 3)
make_recipe("cla_eventlog.log", case=new_case, code="CASE_VIEWED")
make_recipe("cla_eventlog.log", case=old_case_1, code="CASE_VIEWED")
make_recipe(
"cla_eventlog.log", case=old_case_2, code="CASE_VIEWED", created=timezone.now() + relativedelta(years=-3)
)
self.assertEqual(len(Log.objects.all()), 3)
logs_for_cases_created_two_years_old = Log.objects.filter(
case__created__lte=timezone.now() + relativedelta(years=-2)
)
case_viewed_logs = logs_for_cases_created_two_years_old.filter(code__contains="CASE_VIEWED")
cases_viewed_two_years_ago = case_viewed_logs.filter(created__lte=timezone.now() + relativedelta(years=-2))
self.assertEqual(len(cases_viewed_two_years_ago), 1)
|
<commit_before><commit_msg>Create and find cases that were viewed two years ago
Steps:
1. Create 3 case: one new case and two cases with their 'created' property set to 3 years ago
2. Create an event log for each case with code property 'CASE_VIEWED'
3. The new case and one of the old cases will have the 'created' property for the event log set to right now
4. The other old case has it's 'created' property set to 2 years ago<commit_after>
|
from django.test import TestCase
from django.utils import timezone
from dateutil.relativedelta import relativedelta
from freezegun import freeze_time
from core.tests.mommy_utils import make_recipe
from cla_eventlog.models import Log
from legalaid.models import Case
class FindAndDeleteOldCases(TestCase):
def create_three_year_old_case(self):
case = None
freezer = freeze_time(timezone.now() + relativedelta(years=-3))
freezer.start()
case = make_recipe("legalaid.case")
freezer.stop()
return case
def test_find_cases_viewed_two_years_ago(self):
new_case = make_recipe("legalaid.case")
old_case_1 = self.create_three_year_old_case()
old_case_2 = self.create_three_year_old_case()
self.assertEqual(len(Case.objects.all()), 3)
make_recipe("cla_eventlog.log", case=new_case, code="CASE_VIEWED")
make_recipe("cla_eventlog.log", case=old_case_1, code="CASE_VIEWED")
make_recipe(
"cla_eventlog.log", case=old_case_2, code="CASE_VIEWED", created=timezone.now() + relativedelta(years=-3)
)
self.assertEqual(len(Log.objects.all()), 3)
logs_for_cases_created_two_years_old = Log.objects.filter(
case__created__lte=timezone.now() + relativedelta(years=-2)
)
case_viewed_logs = logs_for_cases_created_two_years_old.filter(code__contains="CASE_VIEWED")
cases_viewed_two_years_ago = case_viewed_logs.filter(created__lte=timezone.now() + relativedelta(years=-2))
self.assertEqual(len(cases_viewed_two_years_ago), 1)
|
Create and find cases that were viewed two years ago
Steps:
1. Create 3 case: one new case and two cases with their 'created' property set to 3 years ago
2. Create an event log for each case with code property 'CASE_VIEWED'
3. The new case and one of the old cases will have the 'created' property for the event log set to right now
4. The other old case has it's 'created' property set to 2 years agofrom django.test import TestCase
from django.utils import timezone
from dateutil.relativedelta import relativedelta
from freezegun import freeze_time
from core.tests.mommy_utils import make_recipe
from cla_eventlog.models import Log
from legalaid.models import Case
class FindAndDeleteOldCases(TestCase):
def create_three_year_old_case(self):
case = None
freezer = freeze_time(timezone.now() + relativedelta(years=-3))
freezer.start()
case = make_recipe("legalaid.case")
freezer.stop()
return case
def test_find_cases_viewed_two_years_ago(self):
new_case = make_recipe("legalaid.case")
old_case_1 = self.create_three_year_old_case()
old_case_2 = self.create_three_year_old_case()
self.assertEqual(len(Case.objects.all()), 3)
make_recipe("cla_eventlog.log", case=new_case, code="CASE_VIEWED")
make_recipe("cla_eventlog.log", case=old_case_1, code="CASE_VIEWED")
make_recipe(
"cla_eventlog.log", case=old_case_2, code="CASE_VIEWED", created=timezone.now() + relativedelta(years=-3)
)
self.assertEqual(len(Log.objects.all()), 3)
logs_for_cases_created_two_years_old = Log.objects.filter(
case__created__lte=timezone.now() + relativedelta(years=-2)
)
case_viewed_logs = logs_for_cases_created_two_years_old.filter(code__contains="CASE_VIEWED")
cases_viewed_two_years_ago = case_viewed_logs.filter(created__lte=timezone.now() + relativedelta(years=-2))
self.assertEqual(len(cases_viewed_two_years_ago), 1)
|
<commit_before><commit_msg>Create and find cases that were viewed two years ago
Steps:
1. Create 3 case: one new case and two cases with their 'created' property set to 3 years ago
2. Create an event log for each case with code property 'CASE_VIEWED'
3. The new case and one of the old cases will have the 'created' property for the event log set to right now
4. The other old case has it's 'created' property set to 2 years ago<commit_after>from django.test import TestCase
from django.utils import timezone
from dateutil.relativedelta import relativedelta
from freezegun import freeze_time
from core.tests.mommy_utils import make_recipe
from cla_eventlog.models import Log
from legalaid.models import Case
class FindAndDeleteOldCases(TestCase):
def create_three_year_old_case(self):
case = None
freezer = freeze_time(timezone.now() + relativedelta(years=-3))
freezer.start()
case = make_recipe("legalaid.case")
freezer.stop()
return case
def test_find_cases_viewed_two_years_ago(self):
new_case = make_recipe("legalaid.case")
old_case_1 = self.create_three_year_old_case()
old_case_2 = self.create_three_year_old_case()
self.assertEqual(len(Case.objects.all()), 3)
make_recipe("cla_eventlog.log", case=new_case, code="CASE_VIEWED")
make_recipe("cla_eventlog.log", case=old_case_1, code="CASE_VIEWED")
make_recipe(
"cla_eventlog.log", case=old_case_2, code="CASE_VIEWED", created=timezone.now() + relativedelta(years=-3)
)
self.assertEqual(len(Log.objects.all()), 3)
logs_for_cases_created_two_years_old = Log.objects.filter(
case__created__lte=timezone.now() + relativedelta(years=-2)
)
case_viewed_logs = logs_for_cases_created_two_years_old.filter(code__contains="CASE_VIEWED")
cases_viewed_two_years_ago = case_viewed_logs.filter(created__lte=timezone.now() + relativedelta(years=-2))
self.assertEqual(len(cases_viewed_two_years_ago), 1)
|
|
4e6f834056141667fdbbe4e5a1101f65d5cb61a8
|
66_split_file_in_list.py
|
66_split_file_in_list.py
|
openFile = open("sample.txt")
for line in openFile:
line = line.rstrip()
if not line.startswith("From "):
continue
words = line.split()
#print type(words)
print words[2]
|
Read data from file and split the data nad print
|
Read data from file and split the data nad print
|
Python
|
mit
|
rahulbohra/Python-Basic
|
Read data from file and split the data nad print
|
openFile = open("sample.txt")
for line in openFile:
line = line.rstrip()
if not line.startswith("From "):
continue
words = line.split()
#print type(words)
print words[2]
|
<commit_before><commit_msg>Read data from file and split the data nad print<commit_after>
|
openFile = open("sample.txt")
for line in openFile:
line = line.rstrip()
if not line.startswith("From "):
continue
words = line.split()
#print type(words)
print words[2]
|
Read data from file and split the data nad printopenFile = open("sample.txt")
for line in openFile:
line = line.rstrip()
if not line.startswith("From "):
continue
words = line.split()
#print type(words)
print words[2]
|
<commit_before><commit_msg>Read data from file and split the data nad print<commit_after>openFile = open("sample.txt")
for line in openFile:
line = line.rstrip()
if not line.startswith("From "):
continue
words = line.split()
#print type(words)
print words[2]
|
|
bf90cd6fd3966eb2d96a9357593f8cfc13daab12
|
hearthstone/hslog/tests/test_main.py
|
hearthstone/hslog/tests/test_main.py
|
import pytest
from io import StringIO
from hearthstone.enums import PowerType
from hearthstone.hslog import LogParser
EMPTY_GAME = """
D 02:59:14.6088620 GameState.DebugPrintPower() - CREATE_GAME
D 02:59:14.6149420 GameState.DebugPrintPower() - GameEntity EntityID=1
D 02:59:14.6446530 GameState.DebugPrintPower() - Player EntityID=2 PlayerID=1 GameAccountId=[hi=1 lo=0]
D 02:59:14.6481950 GameState.DebugPrintPower() - Player EntityID=3 PlayerID=2 GameAccountId=[hi=3 lo=2]
""".strip()
def test_create_empty_game():
parser = LogParser()
parser.read(StringIO(EMPTY_GAME))
# Test resulting game/entities
assert len(parser.games) == 1
game = parser.games[0]
assert len(game.entities) == 3
assert len(game.players) == 2
assert game.entities[0] is game
assert game.entities[1] is game.players[0]
assert game.entities[2] is game.players[1]
# Test player objects
assert game.players[0].id == 2
assert game.players[0].player_id == 1
assert game.players[0].account_hi == 1
assert game.players[0].account_lo == 0
assert game.players[0].is_ai
assert not game.players[0].name
assert game.players[1].id == 3
assert game.players[1].player_id == 2
assert game.players[1].account_hi == 3
assert game.players[1].account_lo == 2
assert not game.players[1].is_ai
assert not game.players[1].name
# Test packet structure
assert len(game.packets) == 1
packet = game.packets[0]
assert packet.type == PowerType.CREATE_GAME
assert packet.entity is game
# Player packet objects are not the same as players
assert packet.players[0].entity is game.players[0]
assert packet.players[0].playerid == game.players[0].player_id
assert packet.players[1].entity is game.players[1]
assert packet.players[1].playerid == game.players[1].player_id
# All tags should be empty (we didn't pass any)
assert not game.tags
assert not game.players[0].tags
assert not game.players[1].tags
# Check some basic logic
assert game.get_player(1) is game.players[0]
assert game.get_player(2) is game.players[1]
|
Add some tests for an empty game
|
hslog: Add some tests for an empty game
|
Python
|
mit
|
HearthSim/python-hearthstone
|
hslog: Add some tests for an empty game
|
import pytest
from io import StringIO
from hearthstone.enums import PowerType
from hearthstone.hslog import LogParser
EMPTY_GAME = """
D 02:59:14.6088620 GameState.DebugPrintPower() - CREATE_GAME
D 02:59:14.6149420 GameState.DebugPrintPower() - GameEntity EntityID=1
D 02:59:14.6446530 GameState.DebugPrintPower() - Player EntityID=2 PlayerID=1 GameAccountId=[hi=1 lo=0]
D 02:59:14.6481950 GameState.DebugPrintPower() - Player EntityID=3 PlayerID=2 GameAccountId=[hi=3 lo=2]
""".strip()
def test_create_empty_game():
parser = LogParser()
parser.read(StringIO(EMPTY_GAME))
# Test resulting game/entities
assert len(parser.games) == 1
game = parser.games[0]
assert len(game.entities) == 3
assert len(game.players) == 2
assert game.entities[0] is game
assert game.entities[1] is game.players[0]
assert game.entities[2] is game.players[1]
# Test player objects
assert game.players[0].id == 2
assert game.players[0].player_id == 1
assert game.players[0].account_hi == 1
assert game.players[0].account_lo == 0
assert game.players[0].is_ai
assert not game.players[0].name
assert game.players[1].id == 3
assert game.players[1].player_id == 2
assert game.players[1].account_hi == 3
assert game.players[1].account_lo == 2
assert not game.players[1].is_ai
assert not game.players[1].name
# Test packet structure
assert len(game.packets) == 1
packet = game.packets[0]
assert packet.type == PowerType.CREATE_GAME
assert packet.entity is game
# Player packet objects are not the same as players
assert packet.players[0].entity is game.players[0]
assert packet.players[0].playerid == game.players[0].player_id
assert packet.players[1].entity is game.players[1]
assert packet.players[1].playerid == game.players[1].player_id
# All tags should be empty (we didn't pass any)
assert not game.tags
assert not game.players[0].tags
assert not game.players[1].tags
# Check some basic logic
assert game.get_player(1) is game.players[0]
assert game.get_player(2) is game.players[1]
|
<commit_before><commit_msg>hslog: Add some tests for an empty game<commit_after>
|
import pytest
from io import StringIO
from hearthstone.enums import PowerType
from hearthstone.hslog import LogParser
EMPTY_GAME = """
D 02:59:14.6088620 GameState.DebugPrintPower() - CREATE_GAME
D 02:59:14.6149420 GameState.DebugPrintPower() - GameEntity EntityID=1
D 02:59:14.6446530 GameState.DebugPrintPower() - Player EntityID=2 PlayerID=1 GameAccountId=[hi=1 lo=0]
D 02:59:14.6481950 GameState.DebugPrintPower() - Player EntityID=3 PlayerID=2 GameAccountId=[hi=3 lo=2]
""".strip()
def test_create_empty_game():
parser = LogParser()
parser.read(StringIO(EMPTY_GAME))
# Test resulting game/entities
assert len(parser.games) == 1
game = parser.games[0]
assert len(game.entities) == 3
assert len(game.players) == 2
assert game.entities[0] is game
assert game.entities[1] is game.players[0]
assert game.entities[2] is game.players[1]
# Test player objects
assert game.players[0].id == 2
assert game.players[0].player_id == 1
assert game.players[0].account_hi == 1
assert game.players[0].account_lo == 0
assert game.players[0].is_ai
assert not game.players[0].name
assert game.players[1].id == 3
assert game.players[1].player_id == 2
assert game.players[1].account_hi == 3
assert game.players[1].account_lo == 2
assert not game.players[1].is_ai
assert not game.players[1].name
# Test packet structure
assert len(game.packets) == 1
packet = game.packets[0]
assert packet.type == PowerType.CREATE_GAME
assert packet.entity is game
# Player packet objects are not the same as players
assert packet.players[0].entity is game.players[0]
assert packet.players[0].playerid == game.players[0].player_id
assert packet.players[1].entity is game.players[1]
assert packet.players[1].playerid == game.players[1].player_id
# All tags should be empty (we didn't pass any)
assert not game.tags
assert not game.players[0].tags
assert not game.players[1].tags
# Check some basic logic
assert game.get_player(1) is game.players[0]
assert game.get_player(2) is game.players[1]
|
hslog: Add some tests for an empty gameimport pytest
from io import StringIO
from hearthstone.enums import PowerType
from hearthstone.hslog import LogParser
EMPTY_GAME = """
D 02:59:14.6088620 GameState.DebugPrintPower() - CREATE_GAME
D 02:59:14.6149420 GameState.DebugPrintPower() - GameEntity EntityID=1
D 02:59:14.6446530 GameState.DebugPrintPower() - Player EntityID=2 PlayerID=1 GameAccountId=[hi=1 lo=0]
D 02:59:14.6481950 GameState.DebugPrintPower() - Player EntityID=3 PlayerID=2 GameAccountId=[hi=3 lo=2]
""".strip()
def test_create_empty_game():
parser = LogParser()
parser.read(StringIO(EMPTY_GAME))
# Test resulting game/entities
assert len(parser.games) == 1
game = parser.games[0]
assert len(game.entities) == 3
assert len(game.players) == 2
assert game.entities[0] is game
assert game.entities[1] is game.players[0]
assert game.entities[2] is game.players[1]
# Test player objects
assert game.players[0].id == 2
assert game.players[0].player_id == 1
assert game.players[0].account_hi == 1
assert game.players[0].account_lo == 0
assert game.players[0].is_ai
assert not game.players[0].name
assert game.players[1].id == 3
assert game.players[1].player_id == 2
assert game.players[1].account_hi == 3
assert game.players[1].account_lo == 2
assert not game.players[1].is_ai
assert not game.players[1].name
# Test packet structure
assert len(game.packets) == 1
packet = game.packets[0]
assert packet.type == PowerType.CREATE_GAME
assert packet.entity is game
# Player packet objects are not the same as players
assert packet.players[0].entity is game.players[0]
assert packet.players[0].playerid == game.players[0].player_id
assert packet.players[1].entity is game.players[1]
assert packet.players[1].playerid == game.players[1].player_id
# All tags should be empty (we didn't pass any)
assert not game.tags
assert not game.players[0].tags
assert not game.players[1].tags
# Check some basic logic
assert game.get_player(1) is game.players[0]
assert game.get_player(2) is game.players[1]
|
<commit_before><commit_msg>hslog: Add some tests for an empty game<commit_after>import pytest
from io import StringIO
from hearthstone.enums import PowerType
from hearthstone.hslog import LogParser
EMPTY_GAME = """
D 02:59:14.6088620 GameState.DebugPrintPower() - CREATE_GAME
D 02:59:14.6149420 GameState.DebugPrintPower() - GameEntity EntityID=1
D 02:59:14.6446530 GameState.DebugPrintPower() - Player EntityID=2 PlayerID=1 GameAccountId=[hi=1 lo=0]
D 02:59:14.6481950 GameState.DebugPrintPower() - Player EntityID=3 PlayerID=2 GameAccountId=[hi=3 lo=2]
""".strip()
def test_create_empty_game():
parser = LogParser()
parser.read(StringIO(EMPTY_GAME))
# Test resulting game/entities
assert len(parser.games) == 1
game = parser.games[0]
assert len(game.entities) == 3
assert len(game.players) == 2
assert game.entities[0] is game
assert game.entities[1] is game.players[0]
assert game.entities[2] is game.players[1]
# Test player objects
assert game.players[0].id == 2
assert game.players[0].player_id == 1
assert game.players[0].account_hi == 1
assert game.players[0].account_lo == 0
assert game.players[0].is_ai
assert not game.players[0].name
assert game.players[1].id == 3
assert game.players[1].player_id == 2
assert game.players[1].account_hi == 3
assert game.players[1].account_lo == 2
assert not game.players[1].is_ai
assert not game.players[1].name
# Test packet structure
assert len(game.packets) == 1
packet = game.packets[0]
assert packet.type == PowerType.CREATE_GAME
assert packet.entity is game
# Player packet objects are not the same as players
assert packet.players[0].entity is game.players[0]
assert packet.players[0].playerid == game.players[0].player_id
assert packet.players[1].entity is game.players[1]
assert packet.players[1].playerid == game.players[1].player_id
# All tags should be empty (we didn't pass any)
assert not game.tags
assert not game.players[0].tags
assert not game.players[1].tags
# Check some basic logic
assert game.get_player(1) is game.players[0]
assert game.get_player(2) is game.players[1]
|
|
44e0336db0af70acc0ff890e6dd512bb3ce01065
|
keystone/tests/test_v2_controller.py
|
keystone/tests/test_v2_controller.py
|
# -*- coding: utf-8 -*-
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystone.assignment import controllers
from keystone import exception
from keystone import tests
from keystone.tests import default_fixtures
_ADMIN_CONTEXT = {'is_admin': True}
class TenantTestCase(tests.TestCase):
"""Tests for the V2 Tenant controller.
These tests exercise :class:`keystone.assignment.controllers.Tenant`.
"""
def test_get_project_users_no_user(self):
"""get_project_users when user doesn't exist, raises UserNotFound.
When a user that's not known to `identity` has a role on a project,
then `get_project_users` raises
:class:`keystone.exception.UserNotFound`.
"""
self.load_backends()
self.load_fixtures(default_fixtures)
tenant_controller = controllers.Tenant()
role_controller = controllers.Role()
# Assign a role to a user that doesn't exist to the `bar` project.
project_id = self.tenant_bar['id']
user_id = uuid.uuid4().hex
role_controller.add_role_to_user(
_ADMIN_CONTEXT, user_id, self.role_other['id'], project_id)
self.assertRaisesRegexp(exception.UserNotFound,
'Could not find user, %s' % user_id,
tenant_controller.get_project_users,
_ADMIN_CONTEXT, project_id)
|
Add test for list project users when no user
|
Add test for list project users when no user
There was no test that shows what happens when you list project users
(GET /v2.0/tenants/{tenant_id}/users) and a user with a role on the
project doesn't exist. The server returns a 404 Not Found for the user.
bp no-check-id
Change-Id: Iba8836200c22eb5d744b42114cc8c6e864c3e02f
|
Python
|
apache-2.0
|
cernops/keystone,reeshupatel/demo,promptworks/keystone,nuxeh/keystone,idjaw/keystone,mahak/keystone,blueboxgroup/keystone,ilay09/keystone,openstack/keystone,ilay09/keystone,vivekdhayaal/keystone,nuxeh/keystone,reeshupatel/demo,jamielennox/keystone,UTSA-ICS/keystone-kerberos,jumpstarter-io/keystone,ilay09/keystone,klmitch/keystone,blueboxgroup/keystone,JioCloud/keystone,rushiagr/keystone,dstanek/keystone,promptworks/keystone,dims/keystone,ging/keystone,openstack/keystone,dims/keystone,rodrigods/keystone,rushiagr/keystone,jonnary/keystone,rushiagr/keystone,takeshineshiro/keystone,ajayaa/keystone,vivekdhayaal/keystone,rajalokan/keystone,ajayaa/keystone,ging/keystone,openstack/keystone,maestro-hybrid-cloud/keystone,himanshu-setia/keystone,idjaw/keystone,reeshupatel/demo,klmitch/keystone,takeshineshiro/keystone,rajalokan/keystone,jumpstarter-io/keystone,rodrigods/keystone,rajalokan/keystone,mahak/keystone,jumpstarter-io/keystone,jamielennox/keystone,roopali8/keystone,nuxeh/keystone,MaheshIBM/keystone,JioCloud/keystone,dstanek/keystone,vivekdhayaal/keystone,cernops/keystone,maestro-hybrid-cloud/keystone,jonnary/keystone,MaheshIBM/keystone,roopali8/keystone,himanshu-setia/keystone,UTSA-ICS/keystone-kerberos,mahak/keystone,dstanek/keystone,promptworks/keystone
|
Add test for list project users when no user
There was no test that shows what happens when you list project users
(GET /v2.0/tenants/{tenant_id}/users) and a user with a role on the
project doesn't exist. The server returns a 404 Not Found for the user.
bp no-check-id
Change-Id: Iba8836200c22eb5d744b42114cc8c6e864c3e02f
|
# -*- coding: utf-8 -*-
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystone.assignment import controllers
from keystone import exception
from keystone import tests
from keystone.tests import default_fixtures
_ADMIN_CONTEXT = {'is_admin': True}
class TenantTestCase(tests.TestCase):
"""Tests for the V2 Tenant controller.
These tests exercise :class:`keystone.assignment.controllers.Tenant`.
"""
def test_get_project_users_no_user(self):
"""get_project_users when user doesn't exist, raises UserNotFound.
When a user that's not known to `identity` has a role on a project,
then `get_project_users` raises
:class:`keystone.exception.UserNotFound`.
"""
self.load_backends()
self.load_fixtures(default_fixtures)
tenant_controller = controllers.Tenant()
role_controller = controllers.Role()
# Assign a role to a user that doesn't exist to the `bar` project.
project_id = self.tenant_bar['id']
user_id = uuid.uuid4().hex
role_controller.add_role_to_user(
_ADMIN_CONTEXT, user_id, self.role_other['id'], project_id)
self.assertRaisesRegexp(exception.UserNotFound,
'Could not find user, %s' % user_id,
tenant_controller.get_project_users,
_ADMIN_CONTEXT, project_id)
|
<commit_before><commit_msg>Add test for list project users when no user
There was no test that shows what happens when you list project users
(GET /v2.0/tenants/{tenant_id}/users) and a user with a role on the
project doesn't exist. The server returns a 404 Not Found for the user.
bp no-check-id
Change-Id: Iba8836200c22eb5d744b42114cc8c6e864c3e02f<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystone.assignment import controllers
from keystone import exception
from keystone import tests
from keystone.tests import default_fixtures
_ADMIN_CONTEXT = {'is_admin': True}
class TenantTestCase(tests.TestCase):
"""Tests for the V2 Tenant controller.
These tests exercise :class:`keystone.assignment.controllers.Tenant`.
"""
def test_get_project_users_no_user(self):
"""get_project_users when user doesn't exist, raises UserNotFound.
When a user that's not known to `identity` has a role on a project,
then `get_project_users` raises
:class:`keystone.exception.UserNotFound`.
"""
self.load_backends()
self.load_fixtures(default_fixtures)
tenant_controller = controllers.Tenant()
role_controller = controllers.Role()
# Assign a role to a user that doesn't exist to the `bar` project.
project_id = self.tenant_bar['id']
user_id = uuid.uuid4().hex
role_controller.add_role_to_user(
_ADMIN_CONTEXT, user_id, self.role_other['id'], project_id)
self.assertRaisesRegexp(exception.UserNotFound,
'Could not find user, %s' % user_id,
tenant_controller.get_project_users,
_ADMIN_CONTEXT, project_id)
|
Add test for list project users when no user
There was no test that shows what happens when you list project users
(GET /v2.0/tenants/{tenant_id}/users) and a user with a role on the
project doesn't exist. The server returns a 404 Not Found for the user.
bp no-check-id
Change-Id: Iba8836200c22eb5d744b42114cc8c6e864c3e02f# -*- coding: utf-8 -*-
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystone.assignment import controllers
from keystone import exception
from keystone import tests
from keystone.tests import default_fixtures
_ADMIN_CONTEXT = {'is_admin': True}
class TenantTestCase(tests.TestCase):
"""Tests for the V2 Tenant controller.
These tests exercise :class:`keystone.assignment.controllers.Tenant`.
"""
def test_get_project_users_no_user(self):
"""get_project_users when user doesn't exist, raises UserNotFound.
When a user that's not known to `identity` has a role on a project,
then `get_project_users` raises
:class:`keystone.exception.UserNotFound`.
"""
self.load_backends()
self.load_fixtures(default_fixtures)
tenant_controller = controllers.Tenant()
role_controller = controllers.Role()
# Assign a role to a user that doesn't exist to the `bar` project.
project_id = self.tenant_bar['id']
user_id = uuid.uuid4().hex
role_controller.add_role_to_user(
_ADMIN_CONTEXT, user_id, self.role_other['id'], project_id)
self.assertRaisesRegexp(exception.UserNotFound,
'Could not find user, %s' % user_id,
tenant_controller.get_project_users,
_ADMIN_CONTEXT, project_id)
|
<commit_before><commit_msg>Add test for list project users when no user
There was no test that shows what happens when you list project users
(GET /v2.0/tenants/{tenant_id}/users) and a user with a role on the
project doesn't exist. The server returns a 404 Not Found for the user.
bp no-check-id
Change-Id: Iba8836200c22eb5d744b42114cc8c6e864c3e02f<commit_after># -*- coding: utf-8 -*-
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystone.assignment import controllers
from keystone import exception
from keystone import tests
from keystone.tests import default_fixtures
_ADMIN_CONTEXT = {'is_admin': True}
class TenantTestCase(tests.TestCase):
"""Tests for the V2 Tenant controller.
These tests exercise :class:`keystone.assignment.controllers.Tenant`.
"""
def test_get_project_users_no_user(self):
"""get_project_users when user doesn't exist, raises UserNotFound.
When a user that's not known to `identity` has a role on a project,
then `get_project_users` raises
:class:`keystone.exception.UserNotFound`.
"""
self.load_backends()
self.load_fixtures(default_fixtures)
tenant_controller = controllers.Tenant()
role_controller = controllers.Role()
# Assign a role to a user that doesn't exist to the `bar` project.
project_id = self.tenant_bar['id']
user_id = uuid.uuid4().hex
role_controller.add_role_to_user(
_ADMIN_CONTEXT, user_id, self.role_other['id'], project_id)
self.assertRaisesRegexp(exception.UserNotFound,
'Could not find user, %s' % user_id,
tenant_controller.get_project_users,
_ADMIN_CONTEXT, project_id)
|
|
0de96828c959ef2dd3c007618c81e1b8f8a42f77
|
open_humans/management/commands/suspend_users.py
|
open_humans/management/commands/suspend_users.py
|
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
UserModel = get_user_model()
class Command(BaseCommand):
"""
A management command for suspending one or more users.
"""
help = 'Suspend users by username'
def add_arguments(self, parser):
parser.add_argument('-u', '--username',
dest='username',
required=False,
help=('one or more usernames, comma separated'))
parser.add_argument('-i', '--id',
dest='id',
required=False,
help=('one or more ids, comma separated'))
def handle(self, *args, **options):
users_to_suspend = []
if options['username']:
usernames = options['username'].split(',')
for username in usernames:
try:
users_to_suspend.append(
UserModel.objects.get(username=username))
except UserModel.DoesNotExist:
raise CommandError('Username "{}" does not exist!'
.format(username))
if options['id']:
ids = options['id'].split(',')
for id_str in ids:
try:
users_to_suspend.append(
UserModel.objects.get(id=int(id_str)))
except UserModel.DoesNotExist:
raise CommandError('User ID "{}" does not exist!'
.format(id_str))
for user in users_to_suspend:
user.is_active = False
user.save()
print('{} (ID: {}) is suspended.'.format(user.username, user.id))
|
Add management command to suspend users
|
Add management command to suspend users
|
Python
|
mit
|
PersonalGenomesOrg/open-humans,PersonalGenomesOrg/open-humans,OpenHumans/open-humans,PersonalGenomesOrg/open-humans,OpenHumans/open-humans,OpenHumans/open-humans,PersonalGenomesOrg/open-humans,OpenHumans/open-humans
|
Add management command to suspend users
|
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
UserModel = get_user_model()
class Command(BaseCommand):
"""
A management command for suspending one or more users.
"""
help = 'Suspend users by username'
def add_arguments(self, parser):
parser.add_argument('-u', '--username',
dest='username',
required=False,
help=('one or more usernames, comma separated'))
parser.add_argument('-i', '--id',
dest='id',
required=False,
help=('one or more ids, comma separated'))
def handle(self, *args, **options):
users_to_suspend = []
if options['username']:
usernames = options['username'].split(',')
for username in usernames:
try:
users_to_suspend.append(
UserModel.objects.get(username=username))
except UserModel.DoesNotExist:
raise CommandError('Username "{}" does not exist!'
.format(username))
if options['id']:
ids = options['id'].split(',')
for id_str in ids:
try:
users_to_suspend.append(
UserModel.objects.get(id=int(id_str)))
except UserModel.DoesNotExist:
raise CommandError('User ID "{}" does not exist!'
.format(id_str))
for user in users_to_suspend:
user.is_active = False
user.save()
print('{} (ID: {}) is suspended.'.format(user.username, user.id))
|
<commit_before><commit_msg>Add management command to suspend users<commit_after>
|
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
UserModel = get_user_model()
class Command(BaseCommand):
"""
A management command for suspending one or more users.
"""
help = 'Suspend users by username'
def add_arguments(self, parser):
parser.add_argument('-u', '--username',
dest='username',
required=False,
help=('one or more usernames, comma separated'))
parser.add_argument('-i', '--id',
dest='id',
required=False,
help=('one or more ids, comma separated'))
def handle(self, *args, **options):
users_to_suspend = []
if options['username']:
usernames = options['username'].split(',')
for username in usernames:
try:
users_to_suspend.append(
UserModel.objects.get(username=username))
except UserModel.DoesNotExist:
raise CommandError('Username "{}" does not exist!'
.format(username))
if options['id']:
ids = options['id'].split(',')
for id_str in ids:
try:
users_to_suspend.append(
UserModel.objects.get(id=int(id_str)))
except UserModel.DoesNotExist:
raise CommandError('User ID "{}" does not exist!'
.format(id_str))
for user in users_to_suspend:
user.is_active = False
user.save()
print('{} (ID: {}) is suspended.'.format(user.username, user.id))
|
Add management command to suspend usersfrom django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
UserModel = get_user_model()
class Command(BaseCommand):
"""
A management command for suspending one or more users.
"""
help = 'Suspend users by username'
def add_arguments(self, parser):
parser.add_argument('-u', '--username',
dest='username',
required=False,
help=('one or more usernames, comma separated'))
parser.add_argument('-i', '--id',
dest='id',
required=False,
help=('one or more ids, comma separated'))
def handle(self, *args, **options):
users_to_suspend = []
if options['username']:
usernames = options['username'].split(',')
for username in usernames:
try:
users_to_suspend.append(
UserModel.objects.get(username=username))
except UserModel.DoesNotExist:
raise CommandError('Username "{}" does not exist!'
.format(username))
if options['id']:
ids = options['id'].split(',')
for id_str in ids:
try:
users_to_suspend.append(
UserModel.objects.get(id=int(id_str)))
except UserModel.DoesNotExist:
raise CommandError('User ID "{}" does not exist!'
.format(id_str))
for user in users_to_suspend:
user.is_active = False
user.save()
print('{} (ID: {}) is suspended.'.format(user.username, user.id))
|
<commit_before><commit_msg>Add management command to suspend users<commit_after>from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
UserModel = get_user_model()
class Command(BaseCommand):
"""
A management command for suspending one or more users.
"""
help = 'Suspend users by username'
def add_arguments(self, parser):
parser.add_argument('-u', '--username',
dest='username',
required=False,
help=('one or more usernames, comma separated'))
parser.add_argument('-i', '--id',
dest='id',
required=False,
help=('one or more ids, comma separated'))
def handle(self, *args, **options):
users_to_suspend = []
if options['username']:
usernames = options['username'].split(',')
for username in usernames:
try:
users_to_suspend.append(
UserModel.objects.get(username=username))
except UserModel.DoesNotExist:
raise CommandError('Username "{}" does not exist!'
.format(username))
if options['id']:
ids = options['id'].split(',')
for id_str in ids:
try:
users_to_suspend.append(
UserModel.objects.get(id=int(id_str)))
except UserModel.DoesNotExist:
raise CommandError('User ID "{}" does not exist!'
.format(id_str))
for user in users_to_suspend:
user.is_active = False
user.save()
print('{} (ID: {}) is suspended.'.format(user.username, user.id))
|
|
8e1f5fbcb50b31f32b6de5ea76f46222156fda72
|
admin_panel/forms.py
|
admin_panel/forms.py
|
from django import forms
from blog_posting import models
class CreatePost(forms.ModelForm):
class Meta:
model = models.Post
exclude = ("created_at", "modified_at", "published")
|
Create an object for create post request
|
Create an object for create post request
|
Python
|
mpl-2.0
|
Apo11onian/Apollo-Blog,Apo11onian/Apollo-Blog,Apo11onian/Apollo-Blog
|
Create an object for create post request
|
from django import forms
from blog_posting import models
class CreatePost(forms.ModelForm):
class Meta:
model = models.Post
exclude = ("created_at", "modified_at", "published")
|
<commit_before><commit_msg>Create an object for create post request<commit_after>
|
from django import forms
from blog_posting import models
class CreatePost(forms.ModelForm):
class Meta:
model = models.Post
exclude = ("created_at", "modified_at", "published")
|
Create an object for create post requestfrom django import forms
from blog_posting import models
class CreatePost(forms.ModelForm):
class Meta:
model = models.Post
exclude = ("created_at", "modified_at", "published")
|
<commit_before><commit_msg>Create an object for create post request<commit_after>from django import forms
from blog_posting import models
class CreatePost(forms.ModelForm):
class Meta:
model = models.Post
exclude = ("created_at", "modified_at", "published")
|
|
6ff83af42d87ca8b36574b4c71ffeb02d51356ad
|
tests/test_assess_cloud.py
|
tests/test_assess_cloud.py
|
from mock import call, Mock, patch
from assess_cloud import assess_cloud_combined
from deploy_stack import BootstrapManager
from fakejuju import fake_juju_client
from tests import (
FakeHomeTestCase,
observable_temp_file,
)
class TestAssessCloudCombined(FakeHomeTestCase):
def backend_call(self, client, cmd, args, model=None, check=True,
timeout=None, extra_env=None):
return call(cmd, args, client.used_feature_flags,
client.env.juju_home, model, check, timeout, extra_env)
def test_assess_cloud_combined(self):
client = fake_juju_client()
client.env.juju_home = self.juju_home
bs_manager = BootstrapManager(
'foo', client, client, bootstrap_host=None, machines=[],
series=None, agent_url=None, agent_stream=None, region=None,
log_dir=self.juju_home, keep_env=False, permanent=True,
jes_enabled=True)
backend = client._backend
with patch.object(backend, 'juju', wraps=backend.juju):
juju_wrapper = backend.juju
with observable_temp_file() as temp_file:
assess_cloud_combined(bs_manager)
juju_wrapper.assert_has_calls([
self.backend_call(
client, 'bootstrap', (
'--constraints', 'mem=2G', 'foo/bar', 'foo', '--config',
temp_file.name, '--default-model', 'foo',
'--agent-version', client.version)),
self.backend_call(client, 'deploy', 'ubuntu', 'foo:foo'),
self.backend_call(client, 'remove-unit', 'ubuntu/0', 'foo:foo'),
self.backend_call(
client, 'destroy-controller',
('foo', '-y', '--destroy-all-models'),
timeout=600),
], any_order=True)
|
Add initial test of assess_cloud_combined.
|
Add initial test of assess_cloud_combined.
|
Python
|
agpl-3.0
|
mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju
|
Add initial test of assess_cloud_combined.
|
from mock import call, Mock, patch
from assess_cloud import assess_cloud_combined
from deploy_stack import BootstrapManager
from fakejuju import fake_juju_client
from tests import (
FakeHomeTestCase,
observable_temp_file,
)
class TestAssessCloudCombined(FakeHomeTestCase):
def backend_call(self, client, cmd, args, model=None, check=True,
timeout=None, extra_env=None):
return call(cmd, args, client.used_feature_flags,
client.env.juju_home, model, check, timeout, extra_env)
def test_assess_cloud_combined(self):
client = fake_juju_client()
client.env.juju_home = self.juju_home
bs_manager = BootstrapManager(
'foo', client, client, bootstrap_host=None, machines=[],
series=None, agent_url=None, agent_stream=None, region=None,
log_dir=self.juju_home, keep_env=False, permanent=True,
jes_enabled=True)
backend = client._backend
with patch.object(backend, 'juju', wraps=backend.juju):
juju_wrapper = backend.juju
with observable_temp_file() as temp_file:
assess_cloud_combined(bs_manager)
juju_wrapper.assert_has_calls([
self.backend_call(
client, 'bootstrap', (
'--constraints', 'mem=2G', 'foo/bar', 'foo', '--config',
temp_file.name, '--default-model', 'foo',
'--agent-version', client.version)),
self.backend_call(client, 'deploy', 'ubuntu', 'foo:foo'),
self.backend_call(client, 'remove-unit', 'ubuntu/0', 'foo:foo'),
self.backend_call(
client, 'destroy-controller',
('foo', '-y', '--destroy-all-models'),
timeout=600),
], any_order=True)
|
<commit_before><commit_msg>Add initial test of assess_cloud_combined.<commit_after>
|
from mock import call, Mock, patch
from assess_cloud import assess_cloud_combined
from deploy_stack import BootstrapManager
from fakejuju import fake_juju_client
from tests import (
FakeHomeTestCase,
observable_temp_file,
)
class TestAssessCloudCombined(FakeHomeTestCase):
def backend_call(self, client, cmd, args, model=None, check=True,
timeout=None, extra_env=None):
return call(cmd, args, client.used_feature_flags,
client.env.juju_home, model, check, timeout, extra_env)
def test_assess_cloud_combined(self):
client = fake_juju_client()
client.env.juju_home = self.juju_home
bs_manager = BootstrapManager(
'foo', client, client, bootstrap_host=None, machines=[],
series=None, agent_url=None, agent_stream=None, region=None,
log_dir=self.juju_home, keep_env=False, permanent=True,
jes_enabled=True)
backend = client._backend
with patch.object(backend, 'juju', wraps=backend.juju):
juju_wrapper = backend.juju
with observable_temp_file() as temp_file:
assess_cloud_combined(bs_manager)
juju_wrapper.assert_has_calls([
self.backend_call(
client, 'bootstrap', (
'--constraints', 'mem=2G', 'foo/bar', 'foo', '--config',
temp_file.name, '--default-model', 'foo',
'--agent-version', client.version)),
self.backend_call(client, 'deploy', 'ubuntu', 'foo:foo'),
self.backend_call(client, 'remove-unit', 'ubuntu/0', 'foo:foo'),
self.backend_call(
client, 'destroy-controller',
('foo', '-y', '--destroy-all-models'),
timeout=600),
], any_order=True)
|
Add initial test of assess_cloud_combined.
from mock import call, Mock, patch
from assess_cloud import assess_cloud_combined
from deploy_stack import BootstrapManager
from fakejuju import fake_juju_client
from tests import (
FakeHomeTestCase,
observable_temp_file,
)
class TestAssessCloudCombined(FakeHomeTestCase):
def backend_call(self, client, cmd, args, model=None, check=True,
timeout=None, extra_env=None):
return call(cmd, args, client.used_feature_flags,
client.env.juju_home, model, check, timeout, extra_env)
def test_assess_cloud_combined(self):
client = fake_juju_client()
client.env.juju_home = self.juju_home
bs_manager = BootstrapManager(
'foo', client, client, bootstrap_host=None, machines=[],
series=None, agent_url=None, agent_stream=None, region=None,
log_dir=self.juju_home, keep_env=False, permanent=True,
jes_enabled=True)
backend = client._backend
with patch.object(backend, 'juju', wraps=backend.juju):
juju_wrapper = backend.juju
with observable_temp_file() as temp_file:
assess_cloud_combined(bs_manager)
juju_wrapper.assert_has_calls([
self.backend_call(
client, 'bootstrap', (
'--constraints', 'mem=2G', 'foo/bar', 'foo', '--config',
temp_file.name, '--default-model', 'foo',
'--agent-version', client.version)),
self.backend_call(client, 'deploy', 'ubuntu', 'foo:foo'),
self.backend_call(client, 'remove-unit', 'ubuntu/0', 'foo:foo'),
self.backend_call(
client, 'destroy-controller',
('foo', '-y', '--destroy-all-models'),
timeout=600),
], any_order=True)
|
<commit_before><commit_msg>Add initial test of assess_cloud_combined.<commit_after>
from mock import call, Mock, patch
from assess_cloud import assess_cloud_combined
from deploy_stack import BootstrapManager
from fakejuju import fake_juju_client
from tests import (
FakeHomeTestCase,
observable_temp_file,
)
class TestAssessCloudCombined(FakeHomeTestCase):
def backend_call(self, client, cmd, args, model=None, check=True,
timeout=None, extra_env=None):
return call(cmd, args, client.used_feature_flags,
client.env.juju_home, model, check, timeout, extra_env)
def test_assess_cloud_combined(self):
client = fake_juju_client()
client.env.juju_home = self.juju_home
bs_manager = BootstrapManager(
'foo', client, client, bootstrap_host=None, machines=[],
series=None, agent_url=None, agent_stream=None, region=None,
log_dir=self.juju_home, keep_env=False, permanent=True,
jes_enabled=True)
backend = client._backend
with patch.object(backend, 'juju', wraps=backend.juju):
juju_wrapper = backend.juju
with observable_temp_file() as temp_file:
assess_cloud_combined(bs_manager)
juju_wrapper.assert_has_calls([
self.backend_call(
client, 'bootstrap', (
'--constraints', 'mem=2G', 'foo/bar', 'foo', '--config',
temp_file.name, '--default-model', 'foo',
'--agent-version', client.version)),
self.backend_call(client, 'deploy', 'ubuntu', 'foo:foo'),
self.backend_call(client, 'remove-unit', 'ubuntu/0', 'foo:foo'),
self.backend_call(
client, 'destroy-controller',
('foo', '-y', '--destroy-all-models'),
timeout=600),
], any_order=True)
|
|
f328e60bf62e7bee662113f9f2fdb41f69b3ee24
|
tests/test_visualise.py
|
tests/test_visualise.py
|
"""test_visualise.py
Unit testing for the pointcloud.visualise module
"""
import pytest
from simulocloud.visualise import scatter
from simulocloud.exceptions import BadAxes, InvalidAxesDims
from test_pointcloud import pc_las
def test_scatter_rejects_wrong_axes_type(pc_las):
"""Is an error raised when axes argument to scatter is not str?."""
with pytest.raises(BadAxes):
scatter((pc_las,), pc_las)
def test_scatter_rejects_wrong_axes_length(pc_las):
"""Is an error raised when axes argument to scatter is not str?."""
with pytest.raises(InvalidAxesDims):
scatter((pc_las,), 'x')
|
Write basic unit tests for scatter argument checks
|
Write basic unit tests for scatter argument checks
|
Python
|
mit
|
stainbank/simulocloud
|
Write basic unit tests for scatter argument checks
|
"""test_visualise.py
Unit testing for the pointcloud.visualise module
"""
import pytest
from simulocloud.visualise import scatter
from simulocloud.exceptions import BadAxes, InvalidAxesDims
from test_pointcloud import pc_las
def test_scatter_rejects_wrong_axes_type(pc_las):
"""Is an error raised when axes argument to scatter is not str?."""
with pytest.raises(BadAxes):
scatter((pc_las,), pc_las)
def test_scatter_rejects_wrong_axes_length(pc_las):
"""Is an error raised when axes argument to scatter is not str?."""
with pytest.raises(InvalidAxesDims):
scatter((pc_las,), 'x')
|
<commit_before><commit_msg>Write basic unit tests for scatter argument checks<commit_after>
|
"""test_visualise.py
Unit testing for the pointcloud.visualise module
"""
import pytest
from simulocloud.visualise import scatter
from simulocloud.exceptions import BadAxes, InvalidAxesDims
from test_pointcloud import pc_las
def test_scatter_rejects_wrong_axes_type(pc_las):
"""Is an error raised when axes argument to scatter is not str?."""
with pytest.raises(BadAxes):
scatter((pc_las,), pc_las)
def test_scatter_rejects_wrong_axes_length(pc_las):
"""Is an error raised when axes argument to scatter is not str?."""
with pytest.raises(InvalidAxesDims):
scatter((pc_las,), 'x')
|
Write basic unit tests for scatter argument checks"""test_visualise.py
Unit testing for the pointcloud.visualise module
"""
import pytest
from simulocloud.visualise import scatter
from simulocloud.exceptions import BadAxes, InvalidAxesDims
from test_pointcloud import pc_las
def test_scatter_rejects_wrong_axes_type(pc_las):
"""Is an error raised when axes argument to scatter is not str?."""
with pytest.raises(BadAxes):
scatter((pc_las,), pc_las)
def test_scatter_rejects_wrong_axes_length(pc_las):
"""Is an error raised when axes argument to scatter is not str?."""
with pytest.raises(InvalidAxesDims):
scatter((pc_las,), 'x')
|
<commit_before><commit_msg>Write basic unit tests for scatter argument checks<commit_after>"""test_visualise.py
Unit testing for the pointcloud.visualise module
"""
import pytest
from simulocloud.visualise import scatter
from simulocloud.exceptions import BadAxes, InvalidAxesDims
from test_pointcloud import pc_las
def test_scatter_rejects_wrong_axes_type(pc_las):
"""Is an error raised when axes argument to scatter is not str?."""
with pytest.raises(BadAxes):
scatter((pc_las,), pc_las)
def test_scatter_rejects_wrong_axes_length(pc_las):
"""Is an error raised when axes argument to scatter is not str?."""
with pytest.raises(InvalidAxesDims):
scatter((pc_las,), 'x')
|
|
440675022f5fbdc3dc009cb009e80ceb388990df
|
andalusian/migrations/0005_auto_20190717_1211.py
|
andalusian/migrations/0005_auto_20190717_1211.py
|
# Generated by Django 2.1.7 on 2019-07-17 10:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('andalusian', '0004_auto_20190122_1554'),
]
operations = [
migrations.RemoveField(
model_name='instrument',
name='original_name',
),
migrations.AddField(
model_name='instrument',
name='mbid',
field=models.UUIDField(blank=True, null=True),
),
]
|
Add migration file for the addition of andalusian instruments
|
Add migration file for the addition of andalusian instruments
|
Python
|
agpl-3.0
|
MTG/dunya,MTG/dunya,MTG/dunya,MTG/dunya
|
Add migration file for the addition of andalusian instruments
|
# Generated by Django 2.1.7 on 2019-07-17 10:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('andalusian', '0004_auto_20190122_1554'),
]
operations = [
migrations.RemoveField(
model_name='instrument',
name='original_name',
),
migrations.AddField(
model_name='instrument',
name='mbid',
field=models.UUIDField(blank=True, null=True),
),
]
|
<commit_before><commit_msg>Add migration file for the addition of andalusian instruments<commit_after>
|
# Generated by Django 2.1.7 on 2019-07-17 10:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('andalusian', '0004_auto_20190122_1554'),
]
operations = [
migrations.RemoveField(
model_name='instrument',
name='original_name',
),
migrations.AddField(
model_name='instrument',
name='mbid',
field=models.UUIDField(blank=True, null=True),
),
]
|
Add migration file for the addition of andalusian instruments# Generated by Django 2.1.7 on 2019-07-17 10:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('andalusian', '0004_auto_20190122_1554'),
]
operations = [
migrations.RemoveField(
model_name='instrument',
name='original_name',
),
migrations.AddField(
model_name='instrument',
name='mbid',
field=models.UUIDField(blank=True, null=True),
),
]
|
<commit_before><commit_msg>Add migration file for the addition of andalusian instruments<commit_after># Generated by Django 2.1.7 on 2019-07-17 10:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('andalusian', '0004_auto_20190122_1554'),
]
operations = [
migrations.RemoveField(
model_name='instrument',
name='original_name',
),
migrations.AddField(
model_name='instrument',
name='mbid',
field=models.UUIDField(blank=True, null=True),
),
]
|
|
a2e16379067b37a12e147ee2b35fbebdadc17451
|
custom/icds/management/commands/copy_icds_app.py
|
custom/icds/management/commands/copy_icds_app.py
|
from __future__ import absolute_import, print_function, unicode_literals
from django.core.management import BaseCommand
from corehq.apps.app_manager.dbaccessors import get_build_doc_by_version, wrap_app
from corehq.apps.app_manager.models import import_app
class Command(BaseCommand):
help = "Make a copy of a specific version of an application on the same domain"
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('app_id')
parser.add_argument('version')
parser.add_argument('new_name')
def handle(self, domain, app_id, version, new_name, **options):
old_app = get_build_doc_by_version(domain, app_id, version)
if not old_app:
raise Exception("No app found with id '{}' and version '{}', on '{}'"
.format(app_id, version, domain))
old_app = wrap_app(old_app)
old_app.convert_build_to_app()
new_app = import_app(old_app.to_json(), domain, source_properties={'name': new_name})
|
Add basic "copy_app" mgmt cmd
|
Add basic "copy_app" mgmt cmd
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add basic "copy_app" mgmt cmd
|
from __future__ import absolute_import, print_function, unicode_literals
from django.core.management import BaseCommand
from corehq.apps.app_manager.dbaccessors import get_build_doc_by_version, wrap_app
from corehq.apps.app_manager.models import import_app
class Command(BaseCommand):
help = "Make a copy of a specific version of an application on the same domain"
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('app_id')
parser.add_argument('version')
parser.add_argument('new_name')
def handle(self, domain, app_id, version, new_name, **options):
old_app = get_build_doc_by_version(domain, app_id, version)
if not old_app:
raise Exception("No app found with id '{}' and version '{}', on '{}'"
.format(app_id, version, domain))
old_app = wrap_app(old_app)
old_app.convert_build_to_app()
new_app = import_app(old_app.to_json(), domain, source_properties={'name': new_name})
|
<commit_before><commit_msg>Add basic "copy_app" mgmt cmd<commit_after>
|
from __future__ import absolute_import, print_function, unicode_literals
from django.core.management import BaseCommand
from corehq.apps.app_manager.dbaccessors import get_build_doc_by_version, wrap_app
from corehq.apps.app_manager.models import import_app
class Command(BaseCommand):
help = "Make a copy of a specific version of an application on the same domain"
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('app_id')
parser.add_argument('version')
parser.add_argument('new_name')
def handle(self, domain, app_id, version, new_name, **options):
old_app = get_build_doc_by_version(domain, app_id, version)
if not old_app:
raise Exception("No app found with id '{}' and version '{}', on '{}'"
.format(app_id, version, domain))
old_app = wrap_app(old_app)
old_app.convert_build_to_app()
new_app = import_app(old_app.to_json(), domain, source_properties={'name': new_name})
|
Add basic "copy_app" mgmt cmdfrom __future__ import absolute_import, print_function, unicode_literals
from django.core.management import BaseCommand
from corehq.apps.app_manager.dbaccessors import get_build_doc_by_version, wrap_app
from corehq.apps.app_manager.models import import_app
class Command(BaseCommand):
help = "Make a copy of a specific version of an application on the same domain"
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('app_id')
parser.add_argument('version')
parser.add_argument('new_name')
def handle(self, domain, app_id, version, new_name, **options):
old_app = get_build_doc_by_version(domain, app_id, version)
if not old_app:
raise Exception("No app found with id '{}' and version '{}', on '{}'"
.format(app_id, version, domain))
old_app = wrap_app(old_app)
old_app.convert_build_to_app()
new_app = import_app(old_app.to_json(), domain, source_properties={'name': new_name})
|
<commit_before><commit_msg>Add basic "copy_app" mgmt cmd<commit_after>from __future__ import absolute_import, print_function, unicode_literals
from django.core.management import BaseCommand
from corehq.apps.app_manager.dbaccessors import get_build_doc_by_version, wrap_app
from corehq.apps.app_manager.models import import_app
class Command(BaseCommand):
help = "Make a copy of a specific version of an application on the same domain"
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('app_id')
parser.add_argument('version')
parser.add_argument('new_name')
def handle(self, domain, app_id, version, new_name, **options):
old_app = get_build_doc_by_version(domain, app_id, version)
if not old_app:
raise Exception("No app found with id '{}' and version '{}', on '{}'"
.format(app_id, version, domain))
old_app = wrap_app(old_app)
old_app.convert_build_to_app()
new_app = import_app(old_app.to_json(), domain, source_properties={'name': new_name})
|
|
86ba77f9aa26ccc70d45596745b6532b5eeeff52
|
tools/security/check_message_owners.py
|
tools/security/check_message_owners.py
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make sure all of the per-file *_messages.h OWNERS are consistent"""
import os
import re
import sys
def main():
file_path = os.path.dirname(__file__);
root_dir = os.path.abspath(os.path.join(file_path, '..', '..'))
owners = collect_owners(root_dir)
all_owners = get_all_owners(owners)
print_missing_owners(owners, all_owners)
return 0
def collect_owners(root_dir):
result = {}
for root, dirs, files in os.walk(root_dir):
if "OWNERS" in files:
owner_file_path = os.path.join(root, "OWNERS")
owner_set = extract_owners_from_file(owner_file_path)
if owner_set:
result[owner_file_path] = owner_set
return result
def extract_owners_from_file(owner_file_path):
result = set()
regexp = re.compile('^per-file.*_messages[^=]*=\s*(.*)@([^#]*)')
with open(owner_file_path) as f:
for line in f:
match = regexp.match(line)
if match:
result.add(match.group(1).strip())
return result
def get_all_owners(owner_dict):
result = set()
for key in owner_dict:
result = result.union(owner_dict[key])
return result
def print_missing_owners(owner_dict, owner_set):
for key in owner_dict:
for owner in owner_set:
if not owner in owner_dict[key]:
print key + " is missing " + owner
if '__main__' == __name__:
sys.exit(main())
|
Add script to tools/security to check consistency of message file owners.
|
Add script to tools/security to check consistency of message file owners.
Periodic use of this script will show places where the OWNERS files
have become inconsistent. This poses a problem for CL authors when
the reviewer they have selected doesn't cover all message files, so
they would have to add another.
I've stopped short of adding code to correct the files, this would
make a nice follow-up.
R=jschuh@chromium.org
BUG=
Review URL: https://codereview.chromium.org/138033013
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@245320 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,ondra-novak/chromium.src,markYoungH/chromium.src,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,dushu1203/chromium.src,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,ondra-novak/chromium.src,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,ChromiumWebApps/chromium,Chilledheart/chromium,anirudhSK/chromium,Just-D/chromium-1,ltilve/chromium,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,dednal/chromium.src,mohamed--abdel-maksoud/chromium.src,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,bright-sparks/chromium-spacewalk,markYoungH/chromium.src,fujunwei/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,dushu1203/chromium.src,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,bright-sparks/chromium-spacewalk,Fireblend/chromium-crosswalk,Chilledheart/chromium,axinging/chromium-crosswalk,markYoungH/chromium.src,ChromiumWebApps/chromium,dednal/chromium.src,ChromiumWebApps/chromium,anirudhSK/chromium,M4sse/chromium.src,ondra-novak/chromium.src,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,jaruba/chromium.src,Fireblend/chromium-crosswalk,Fireblend/chromium-crosswalk,Fireblend/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Jonekee/chromium.src,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,Chilledheart/chromium,fujunwei/chromium-crosswalk,Jonekee/chromium.src,anirudhSK/chromium,fujunwei/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,anirudhSK/chromium,Just-D/chromium-1,patrickm/chromium.src,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,dednal/chromium.src,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,ChromiumWebApps/chromium,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk,dushu1203/chromium.src,Chilledheart/chromium,crosswalk-project/chromium-crosswalk-efl,dednal/chromium.src,chuan9/chromium-crosswalk,dednal/chromium.src,axinging/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,ltilve/chromium,Jonekee/chromium.src,ChromiumWebApps/chromium,M4sse/chromium.src,ondra-novak/chromium.src,Just-D/chromium-1,hgl888/chromium-crosswalk-efl,ltilve/chromium,markYoungH/chromium.src,jaruba/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,jaruba/chromium.src,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,Chilledheart/chromium,ChromiumWebApps/chromium,patrickm/chromium.src,hgl888/chromium-crosswalk-efl,anirudhSK/chromium,M4sse/chromium.src,M4sse/chromium.src,ChromiumWebApps/chromium,crosswalk-project/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,krieger-od/nwjs_chromium.src,littlstar/chromium.src,patrickm/chromium.src,patrickm/chromium.src,littlstar/chromium.src,littlstar/chromium.src,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,hgl888/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,littlstar/chromium.src,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,jaruba/chromium.src,axinging/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,patrickm/chromium.src,M4sse/chromium.src,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,M4sse/chromium.src,anirudhSK/chromium,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,ltilve/chromium,markYoungH/chromium.src,chuan9/chromium-crosswalk,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,anirudhSK/chromium,ltilve/chromium,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,ondra-novak/chromium.src,jaruba/chromium.src,crosswalk-project/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,Jonekee/chromium.src,dushu1203/chromium.src,dushu1203/chromium.src,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,ltilve/chromium,ondra-novak/chromium.src,anirudhSK/chromium,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,dednal/chromium.src,dednal/chromium.src,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,jaruba/chromium.src,Fireblend/chromium-crosswalk,ltilve/chromium,jaruba/chromium.src,axinging/chromium-crosswalk,Just-D/chromium-1,anirudhSK/chromium,krieger-od/nwjs_chromium.src,littlstar/chromium.src,Jonekee/chromium.src,dednal/chromium.src,anirudhSK/chromium,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,patrickm/chromium.src,fujunwei/chromium-crosswalk,ltilve/chromium,M4sse/chromium.src,chuan9/chromium-crosswalk,patrickm/chromium.src,Jonekee/chromium.src,patrickm/chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,bright-sparks/chromium-spacewalk,patrickm/chromium.src,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,ltilve/chromium,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,Just-D/chromium-1,jaruba/chromium.src,anirudhSK/chromium,dednal/chromium.src,M4sse/chromium.src,ChromiumWebApps/chromium,M4sse/chromium.src,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,fujunwei/chromium-crosswalk,chuan9/chromium-crosswalk,dushu1203/chromium.src,PeterWangIntel/chromium-crosswalk,fujunwei/chromium-crosswalk,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ChromiumWebApps/chromium,TheTypoMaster/chromium-crosswalk,littlstar/chromium.src,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,dednal/chromium.src,Chilledheart/chromium,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk
|
Add script to tools/security to check consistency of message file owners.
Periodic use of this script will show places where the OWNERS files
have become inconsistent. This poses a problem for CL authors when
the reviewer they have selected doesn't cover all message files, so
they would have to add another.
I've stopped short of adding code to correct the files, this would
make a nice follow-up.
R=jschuh@chromium.org
BUG=
Review URL: https://codereview.chromium.org/138033013
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@245320 0039d316-1c4b-4281-b951-d872f2087c98
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make sure all of the per-file *_messages.h OWNERS are consistent"""
import os
import re
import sys
def main():
file_path = os.path.dirname(__file__);
root_dir = os.path.abspath(os.path.join(file_path, '..', '..'))
owners = collect_owners(root_dir)
all_owners = get_all_owners(owners)
print_missing_owners(owners, all_owners)
return 0
def collect_owners(root_dir):
result = {}
for root, dirs, files in os.walk(root_dir):
if "OWNERS" in files:
owner_file_path = os.path.join(root, "OWNERS")
owner_set = extract_owners_from_file(owner_file_path)
if owner_set:
result[owner_file_path] = owner_set
return result
def extract_owners_from_file(owner_file_path):
result = set()
regexp = re.compile('^per-file.*_messages[^=]*=\s*(.*)@([^#]*)')
with open(owner_file_path) as f:
for line in f:
match = regexp.match(line)
if match:
result.add(match.group(1).strip())
return result
def get_all_owners(owner_dict):
result = set()
for key in owner_dict:
result = result.union(owner_dict[key])
return result
def print_missing_owners(owner_dict, owner_set):
for key in owner_dict:
for owner in owner_set:
if not owner in owner_dict[key]:
print key + " is missing " + owner
if '__main__' == __name__:
sys.exit(main())
|
<commit_before><commit_msg>Add script to tools/security to check consistency of message file owners.
Periodic use of this script will show places where the OWNERS files
have become inconsistent. This poses a problem for CL authors when
the reviewer they have selected doesn't cover all message files, so
they would have to add another.
I've stopped short of adding code to correct the files, this would
make a nice follow-up.
R=jschuh@chromium.org
BUG=
Review URL: https://codereview.chromium.org/138033013
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@245320 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make sure all of the per-file *_messages.h OWNERS are consistent"""
import os
import re
import sys
def main():
file_path = os.path.dirname(__file__);
root_dir = os.path.abspath(os.path.join(file_path, '..', '..'))
owners = collect_owners(root_dir)
all_owners = get_all_owners(owners)
print_missing_owners(owners, all_owners)
return 0
def collect_owners(root_dir):
result = {}
for root, dirs, files in os.walk(root_dir):
if "OWNERS" in files:
owner_file_path = os.path.join(root, "OWNERS")
owner_set = extract_owners_from_file(owner_file_path)
if owner_set:
result[owner_file_path] = owner_set
return result
def extract_owners_from_file(owner_file_path):
result = set()
regexp = re.compile('^per-file.*_messages[^=]*=\s*(.*)@([^#]*)')
with open(owner_file_path) as f:
for line in f:
match = regexp.match(line)
if match:
result.add(match.group(1).strip())
return result
def get_all_owners(owner_dict):
result = set()
for key in owner_dict:
result = result.union(owner_dict[key])
return result
def print_missing_owners(owner_dict, owner_set):
for key in owner_dict:
for owner in owner_set:
if not owner in owner_dict[key]:
print key + " is missing " + owner
if '__main__' == __name__:
sys.exit(main())
|
Add script to tools/security to check consistency of message file owners.
Periodic use of this script will show places where the OWNERS files
have become inconsistent. This poses a problem for CL authors when
the reviewer they have selected doesn't cover all message files, so
they would have to add another.
I've stopped short of adding code to correct the files, this would
make a nice follow-up.
R=jschuh@chromium.org
BUG=
Review URL: https://codereview.chromium.org/138033013
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@245320 0039d316-1c4b-4281-b951-d872f2087c98#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make sure all of the per-file *_messages.h OWNERS are consistent"""
import os
import re
import sys
def main():
file_path = os.path.dirname(__file__);
root_dir = os.path.abspath(os.path.join(file_path, '..', '..'))
owners = collect_owners(root_dir)
all_owners = get_all_owners(owners)
print_missing_owners(owners, all_owners)
return 0
def collect_owners(root_dir):
result = {}
for root, dirs, files in os.walk(root_dir):
if "OWNERS" in files:
owner_file_path = os.path.join(root, "OWNERS")
owner_set = extract_owners_from_file(owner_file_path)
if owner_set:
result[owner_file_path] = owner_set
return result
def extract_owners_from_file(owner_file_path):
result = set()
regexp = re.compile('^per-file.*_messages[^=]*=\s*(.*)@([^#]*)')
with open(owner_file_path) as f:
for line in f:
match = regexp.match(line)
if match:
result.add(match.group(1).strip())
return result
def get_all_owners(owner_dict):
result = set()
for key in owner_dict:
result = result.union(owner_dict[key])
return result
def print_missing_owners(owner_dict, owner_set):
for key in owner_dict:
for owner in owner_set:
if not owner in owner_dict[key]:
print key + " is missing " + owner
if '__main__' == __name__:
sys.exit(main())
|
<commit_before><commit_msg>Add script to tools/security to check consistency of message file owners.
Periodic use of this script will show places where the OWNERS files
have become inconsistent. This poses a problem for CL authors when
the reviewer they have selected doesn't cover all message files, so
they would have to add another.
I've stopped short of adding code to correct the files, this would
make a nice follow-up.
R=jschuh@chromium.org
BUG=
Review URL: https://codereview.chromium.org/138033013
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@245320 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make sure all of the per-file *_messages.h OWNERS are consistent"""
import os
import re
import sys
def main():
file_path = os.path.dirname(__file__);
root_dir = os.path.abspath(os.path.join(file_path, '..', '..'))
owners = collect_owners(root_dir)
all_owners = get_all_owners(owners)
print_missing_owners(owners, all_owners)
return 0
def collect_owners(root_dir):
result = {}
for root, dirs, files in os.walk(root_dir):
if "OWNERS" in files:
owner_file_path = os.path.join(root, "OWNERS")
owner_set = extract_owners_from_file(owner_file_path)
if owner_set:
result[owner_file_path] = owner_set
return result
def extract_owners_from_file(owner_file_path):
result = set()
regexp = re.compile('^per-file.*_messages[^=]*=\s*(.*)@([^#]*)')
with open(owner_file_path) as f:
for line in f:
match = regexp.match(line)
if match:
result.add(match.group(1).strip())
return result
def get_all_owners(owner_dict):
result = set()
for key in owner_dict:
result = result.union(owner_dict[key])
return result
def print_missing_owners(owner_dict, owner_set):
for key in owner_dict:
for owner in owner_set:
if not owner in owner_dict[key]:
print key + " is missing " + owner
if '__main__' == __name__:
sys.exit(main())
|
|
4887243e578e175ee97740cc275a69f32fedd669
|
test_display.py
|
test_display.py
|
# -*- coding: utf-8 -*-
"""
Tests for board display engine
"""
import pytest
from fen import Position
from display import render_ascii_board
@pytest.fixture
def samples():
board_samples = [
[
'rnbqkbnr',
'pppppppp',
' ',
' ',
' ',
' ',
'PPPPPPPP',
'RNBQKBNR',
]
]
expected_output = {
'ascii':
""" ---------------------------------
8 | r | n | b | q | k | b | n | r |
|-------------------------------|
7 | p | p | p | p | p | p | p | p |
|-------------------------------|
6 | | | | | | | | |
|-------------------------------|
5 | | | | | | | | |
|-------------------------------|
4 | | | | | | | | |
|-------------------------------|
3 | | | | | | | | |
|-------------------------------|
2 | P | P | P | P | P | P | P | P |
|-------------------------------|
1 | R | N | B | Q | K | B | N | R |
---------------------------------
a b c d e f g h
"""
}
return { 'boards': board_samples, 'output': expected_output }
def test_ascii_display(samples):
print("%r" % render_ascii_board(samples['boards'][0]))
print("%r" % samples['output']['ascii'])
assert render_ascii_board(samples['boards'][0]) == samples['output']['ascii']
|
Add test for display module.
|
Add test for display module.
|
Python
|
mit
|
gisraptor/fen-parser
|
Add test for display module.
|
# -*- coding: utf-8 -*-
"""
Tests for board display engine
"""
import pytest
from fen import Position
from display import render_ascii_board
@pytest.fixture
def samples():
board_samples = [
[
'rnbqkbnr',
'pppppppp',
' ',
' ',
' ',
' ',
'PPPPPPPP',
'RNBQKBNR',
]
]
expected_output = {
'ascii':
""" ---------------------------------
8 | r | n | b | q | k | b | n | r |
|-------------------------------|
7 | p | p | p | p | p | p | p | p |
|-------------------------------|
6 | | | | | | | | |
|-------------------------------|
5 | | | | | | | | |
|-------------------------------|
4 | | | | | | | | |
|-------------------------------|
3 | | | | | | | | |
|-------------------------------|
2 | P | P | P | P | P | P | P | P |
|-------------------------------|
1 | R | N | B | Q | K | B | N | R |
---------------------------------
a b c d e f g h
"""
}
return { 'boards': board_samples, 'output': expected_output }
def test_ascii_display(samples):
print("%r" % render_ascii_board(samples['boards'][0]))
print("%r" % samples['output']['ascii'])
assert render_ascii_board(samples['boards'][0]) == samples['output']['ascii']
|
<commit_before><commit_msg>Add test for display module.<commit_after>
|
# -*- coding: utf-8 -*-
"""
Tests for board display engine
"""
import pytest
from fen import Position
from display import render_ascii_board
@pytest.fixture
def samples():
board_samples = [
[
'rnbqkbnr',
'pppppppp',
' ',
' ',
' ',
' ',
'PPPPPPPP',
'RNBQKBNR',
]
]
expected_output = {
'ascii':
""" ---------------------------------
8 | r | n | b | q | k | b | n | r |
|-------------------------------|
7 | p | p | p | p | p | p | p | p |
|-------------------------------|
6 | | | | | | | | |
|-------------------------------|
5 | | | | | | | | |
|-------------------------------|
4 | | | | | | | | |
|-------------------------------|
3 | | | | | | | | |
|-------------------------------|
2 | P | P | P | P | P | P | P | P |
|-------------------------------|
1 | R | N | B | Q | K | B | N | R |
---------------------------------
a b c d e f g h
"""
}
return { 'boards': board_samples, 'output': expected_output }
def test_ascii_display(samples):
print("%r" % render_ascii_board(samples['boards'][0]))
print("%r" % samples['output']['ascii'])
assert render_ascii_board(samples['boards'][0]) == samples['output']['ascii']
|
Add test for display module.# -*- coding: utf-8 -*-
"""
Tests for board display engine
"""
import pytest
from fen import Position
from display import render_ascii_board
@pytest.fixture
def samples():
board_samples = [
[
'rnbqkbnr',
'pppppppp',
' ',
' ',
' ',
' ',
'PPPPPPPP',
'RNBQKBNR',
]
]
expected_output = {
'ascii':
""" ---------------------------------
8 | r | n | b | q | k | b | n | r |
|-------------------------------|
7 | p | p | p | p | p | p | p | p |
|-------------------------------|
6 | | | | | | | | |
|-------------------------------|
5 | | | | | | | | |
|-------------------------------|
4 | | | | | | | | |
|-------------------------------|
3 | | | | | | | | |
|-------------------------------|
2 | P | P | P | P | P | P | P | P |
|-------------------------------|
1 | R | N | B | Q | K | B | N | R |
---------------------------------
a b c d e f g h
"""
}
return { 'boards': board_samples, 'output': expected_output }
def test_ascii_display(samples):
print("%r" % render_ascii_board(samples['boards'][0]))
print("%r" % samples['output']['ascii'])
assert render_ascii_board(samples['boards'][0]) == samples['output']['ascii']
|
<commit_before><commit_msg>Add test for display module.<commit_after># -*- coding: utf-8 -*-
"""
Tests for board display engine
"""
import pytest
from fen import Position
from display import render_ascii_board
@pytest.fixture
def samples():
board_samples = [
[
'rnbqkbnr',
'pppppppp',
' ',
' ',
' ',
' ',
'PPPPPPPP',
'RNBQKBNR',
]
]
expected_output = {
'ascii':
""" ---------------------------------
8 | r | n | b | q | k | b | n | r |
|-------------------------------|
7 | p | p | p | p | p | p | p | p |
|-------------------------------|
6 | | | | | | | | |
|-------------------------------|
5 | | | | | | | | |
|-------------------------------|
4 | | | | | | | | |
|-------------------------------|
3 | | | | | | | | |
|-------------------------------|
2 | P | P | P | P | P | P | P | P |
|-------------------------------|
1 | R | N | B | Q | K | B | N | R |
---------------------------------
a b c d e f g h
"""
}
return { 'boards': board_samples, 'output': expected_output }
def test_ascii_display(samples):
print("%r" % render_ascii_board(samples['boards'][0]))
print("%r" % samples['output']['ascii'])
assert render_ascii_board(samples['boards'][0]) == samples['output']['ascii']
|
|
cdc40da26edfcb00a1da3125a925232fc947d143
|
test_fractal.py
|
test_fractal.py
|
#!/usr/bin/env py.test
# -*- coding: utf-8 -*-
# Created on Fri Apr 25 02:33:04 2014
# License is MIT, see COPYING.txt for more details.
# @author: Danilo de Jesus da Silva Bellini
import os, re, pytest
from fractal import generate_fractal, call_kw, cli_parse_args
from io import BytesIO
from pylab import imread, imsave
def show_parameters(fname):
""" String with CLI args to show the fractal with the given ``fname`` """
re_complex = re.compile("(?:([+-]?\s*[0-9.]+))?\s*"
"(?:([+-]\s*[0-9.]+)\s*)?(.*)")
def part_generator():
for part in fname.rsplit(".", 1)[0].split("_"):
if "=" in part:
yield "--" + part
else:
yield " ".join(filter(lambda x: x, re_complex.match(part).groups()))
yield "--show"
return " ".join(part_generator())
def to_dict_params(fname):
""" Get full kwargs from file name """
return cli_parse_args(show_parameters(fname).split())
@pytest.mark.parametrize("fname", os.listdir("images"))
def test_file_image(fname):
ext = os.path.splitext(fname)[-1][len(os.path.extsep):]
kwargs = to_dict_params(fname)
# Creates the image in memory
mem = BytesIO()
fractal_data = call_kw(generate_fractal, kwargs)
imsave(mem, fractal_data, cmap=kwargs["cmap"], format=ext)
mem.seek(0) # Return stream psition back for reading
# Comparison pixel-by-pixel
img_file = imread("images/" + fname)
img_mem = imread(mem, format=ext)
assert img_file.tolist() == img_mem.tolist()
|
Test with each example image in the repository
|
Test with each example image in the repository
|
Python
|
mit
|
danilobellini/fractals
|
Test with each example image in the repository
|
#!/usr/bin/env py.test
# -*- coding: utf-8 -*-
# Created on Fri Apr 25 02:33:04 2014
# License is MIT, see COPYING.txt for more details.
# @author: Danilo de Jesus da Silva Bellini
import os, re, pytest
from fractal import generate_fractal, call_kw, cli_parse_args
from io import BytesIO
from pylab import imread, imsave
def show_parameters(fname):
""" String with CLI args to show the fractal with the given ``fname`` """
re_complex = re.compile("(?:([+-]?\s*[0-9.]+))?\s*"
"(?:([+-]\s*[0-9.]+)\s*)?(.*)")
def part_generator():
for part in fname.rsplit(".", 1)[0].split("_"):
if "=" in part:
yield "--" + part
else:
yield " ".join(filter(lambda x: x, re_complex.match(part).groups()))
yield "--show"
return " ".join(part_generator())
def to_dict_params(fname):
""" Get full kwargs from file name """
return cli_parse_args(show_parameters(fname).split())
@pytest.mark.parametrize("fname", os.listdir("images"))
def test_file_image(fname):
ext = os.path.splitext(fname)[-1][len(os.path.extsep):]
kwargs = to_dict_params(fname)
# Creates the image in memory
mem = BytesIO()
fractal_data = call_kw(generate_fractal, kwargs)
imsave(mem, fractal_data, cmap=kwargs["cmap"], format=ext)
mem.seek(0) # Return stream psition back for reading
# Comparison pixel-by-pixel
img_file = imread("images/" + fname)
img_mem = imread(mem, format=ext)
assert img_file.tolist() == img_mem.tolist()
|
<commit_before><commit_msg>Test with each example image in the repository<commit_after>
|
#!/usr/bin/env py.test
# -*- coding: utf-8 -*-
# Created on Fri Apr 25 02:33:04 2014
# License is MIT, see COPYING.txt for more details.
# @author: Danilo de Jesus da Silva Bellini
import os, re, pytest
from fractal import generate_fractal, call_kw, cli_parse_args
from io import BytesIO
from pylab import imread, imsave
def show_parameters(fname):
""" String with CLI args to show the fractal with the given ``fname`` """
re_complex = re.compile("(?:([+-]?\s*[0-9.]+))?\s*"
"(?:([+-]\s*[0-9.]+)\s*)?(.*)")
def part_generator():
for part in fname.rsplit(".", 1)[0].split("_"):
if "=" in part:
yield "--" + part
else:
yield " ".join(filter(lambda x: x, re_complex.match(part).groups()))
yield "--show"
return " ".join(part_generator())
def to_dict_params(fname):
""" Get full kwargs from file name """
return cli_parse_args(show_parameters(fname).split())
@pytest.mark.parametrize("fname", os.listdir("images"))
def test_file_image(fname):
ext = os.path.splitext(fname)[-1][len(os.path.extsep):]
kwargs = to_dict_params(fname)
# Creates the image in memory
mem = BytesIO()
fractal_data = call_kw(generate_fractal, kwargs)
imsave(mem, fractal_data, cmap=kwargs["cmap"], format=ext)
mem.seek(0) # Return stream psition back for reading
# Comparison pixel-by-pixel
img_file = imread("images/" + fname)
img_mem = imread(mem, format=ext)
assert img_file.tolist() == img_mem.tolist()
|
Test with each example image in the repository#!/usr/bin/env py.test
# -*- coding: utf-8 -*-
# Created on Fri Apr 25 02:33:04 2014
# License is MIT, see COPYING.txt for more details.
# @author: Danilo de Jesus da Silva Bellini
import os, re, pytest
from fractal import generate_fractal, call_kw, cli_parse_args
from io import BytesIO
from pylab import imread, imsave
def show_parameters(fname):
""" String with CLI args to show the fractal with the given ``fname`` """
re_complex = re.compile("(?:([+-]?\s*[0-9.]+))?\s*"
"(?:([+-]\s*[0-9.]+)\s*)?(.*)")
def part_generator():
for part in fname.rsplit(".", 1)[0].split("_"):
if "=" in part:
yield "--" + part
else:
yield " ".join(filter(lambda x: x, re_complex.match(part).groups()))
yield "--show"
return " ".join(part_generator())
def to_dict_params(fname):
""" Get full kwargs from file name """
return cli_parse_args(show_parameters(fname).split())
@pytest.mark.parametrize("fname", os.listdir("images"))
def test_file_image(fname):
ext = os.path.splitext(fname)[-1][len(os.path.extsep):]
kwargs = to_dict_params(fname)
# Creates the image in memory
mem = BytesIO()
fractal_data = call_kw(generate_fractal, kwargs)
imsave(mem, fractal_data, cmap=kwargs["cmap"], format=ext)
mem.seek(0) # Return stream psition back for reading
# Comparison pixel-by-pixel
img_file = imread("images/" + fname)
img_mem = imread(mem, format=ext)
assert img_file.tolist() == img_mem.tolist()
|
<commit_before><commit_msg>Test with each example image in the repository<commit_after>#!/usr/bin/env py.test
# -*- coding: utf-8 -*-
# Created on Fri Apr 25 02:33:04 2014
# License is MIT, see COPYING.txt for more details.
# @author: Danilo de Jesus da Silva Bellini
import os, re, pytest
from fractal import generate_fractal, call_kw, cli_parse_args
from io import BytesIO
from pylab import imread, imsave
def show_parameters(fname):
""" String with CLI args to show the fractal with the given ``fname`` """
re_complex = re.compile("(?:([+-]?\s*[0-9.]+))?\s*"
"(?:([+-]\s*[0-9.]+)\s*)?(.*)")
def part_generator():
for part in fname.rsplit(".", 1)[0].split("_"):
if "=" in part:
yield "--" + part
else:
yield " ".join(filter(lambda x: x, re_complex.match(part).groups()))
yield "--show"
return " ".join(part_generator())
def to_dict_params(fname):
""" Get full kwargs from file name """
return cli_parse_args(show_parameters(fname).split())
@pytest.mark.parametrize("fname", os.listdir("images"))
def test_file_image(fname):
ext = os.path.splitext(fname)[-1][len(os.path.extsep):]
kwargs = to_dict_params(fname)
# Creates the image in memory
mem = BytesIO()
fractal_data = call_kw(generate_fractal, kwargs)
imsave(mem, fractal_data, cmap=kwargs["cmap"], format=ext)
mem.seek(0) # Return stream psition back for reading
# Comparison pixel-by-pixel
img_file = imread("images/" + fname)
img_mem = imread(mem, format=ext)
assert img_file.tolist() == img_mem.tolist()
|
|
00a312cd7e4928d859dbe7cfc68719d814439b1f
|
thinglang/tl.py
|
thinglang/tl.py
|
import click
from thinglang import run
@click.command()
@click.argument('file', type=click.File('r'))
def thinglang(file):
source = file.read()
run(source)
if __name__ == '__main__':
thinglang()
|
Add simple wrapper script for command line usage
|
Add simple wrapper script for command line usage
|
Python
|
mit
|
ytanay/thinglang,ytanay/thinglang,ytanay/thinglang,ytanay/thinglang
|
Add simple wrapper script for command line usage
|
import click
from thinglang import run
@click.command()
@click.argument('file', type=click.File('r'))
def thinglang(file):
source = file.read()
run(source)
if __name__ == '__main__':
thinglang()
|
<commit_before><commit_msg>Add simple wrapper script for command line usage<commit_after>
|
import click
from thinglang import run
@click.command()
@click.argument('file', type=click.File('r'))
def thinglang(file):
source = file.read()
run(source)
if __name__ == '__main__':
thinglang()
|
Add simple wrapper script for command line usageimport click
from thinglang import run
@click.command()
@click.argument('file', type=click.File('r'))
def thinglang(file):
source = file.read()
run(source)
if __name__ == '__main__':
thinglang()
|
<commit_before><commit_msg>Add simple wrapper script for command line usage<commit_after>import click
from thinglang import run
@click.command()
@click.argument('file', type=click.File('r'))
def thinglang(file):
source = file.read()
run(source)
if __name__ == '__main__':
thinglang()
|
|
74798cdc6a68e30fcb0583f69fd9dd361608988b
|
tests/rules_tests/isValid_tests/ValidRulesTest.py
|
tests/rules_tests/isValid_tests/ValidRulesTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
class ValidRulesTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Create file for positive is_valid tests
|
Create file for positive is_valid tests
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Create file for positive is_valid tests
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
class ValidRulesTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Create file for positive is_valid tests<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
class ValidRulesTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Create file for positive is_valid tests#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
class ValidRulesTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Create file for positive is_valid tests<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
class ValidRulesTest(TestCase):
pass
if __name__ == '__main__':
main()
|
|
bb3e67e317eb97929f8dcb79184afee767b4caae
|
genealogio/migrations/0005_auto_20150223_2319.py
|
genealogio/migrations/0005_auto_20150223_2319.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import partialdate.fields
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0004_auto_20150220_1242'),
]
operations = [
migrations.AlterField(
model_name='family',
name='end_date',
field=partialdate.fields.PartialDateField(null=True, verbose_name='Enddatum', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='family',
name='father',
field=models.ForeignKey(related_name='father_ref', verbose_name='Vater', blank=True, to='genealogio.Person', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='family',
name='mother',
field=models.ForeignKey(related_name='mother_ref', verbose_name='Mutter', blank=True, to='genealogio.Person', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='family',
name='start_date',
field=partialdate.fields.PartialDateField(null=True, verbose_name='Anfangsdatum', blank=True),
preserve_default=True,
),
]
|
Add migration (only changes verbose names).
|
Add migration (only changes verbose names).
|
Python
|
bsd-3-clause
|
ugoertz/django-familio,ugoertz/django-familio,ugoertz/django-familio,ugoertz/django-familio
|
Add migration (only changes verbose names).
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import partialdate.fields
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0004_auto_20150220_1242'),
]
operations = [
migrations.AlterField(
model_name='family',
name='end_date',
field=partialdate.fields.PartialDateField(null=True, verbose_name='Enddatum', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='family',
name='father',
field=models.ForeignKey(related_name='father_ref', verbose_name='Vater', blank=True, to='genealogio.Person', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='family',
name='mother',
field=models.ForeignKey(related_name='mother_ref', verbose_name='Mutter', blank=True, to='genealogio.Person', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='family',
name='start_date',
field=partialdate.fields.PartialDateField(null=True, verbose_name='Anfangsdatum', blank=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration (only changes verbose names).<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import partialdate.fields
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0004_auto_20150220_1242'),
]
operations = [
migrations.AlterField(
model_name='family',
name='end_date',
field=partialdate.fields.PartialDateField(null=True, verbose_name='Enddatum', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='family',
name='father',
field=models.ForeignKey(related_name='father_ref', verbose_name='Vater', blank=True, to='genealogio.Person', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='family',
name='mother',
field=models.ForeignKey(related_name='mother_ref', verbose_name='Mutter', blank=True, to='genealogio.Person', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='family',
name='start_date',
field=partialdate.fields.PartialDateField(null=True, verbose_name='Anfangsdatum', blank=True),
preserve_default=True,
),
]
|
Add migration (only changes verbose names).# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import partialdate.fields
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0004_auto_20150220_1242'),
]
operations = [
migrations.AlterField(
model_name='family',
name='end_date',
field=partialdate.fields.PartialDateField(null=True, verbose_name='Enddatum', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='family',
name='father',
field=models.ForeignKey(related_name='father_ref', verbose_name='Vater', blank=True, to='genealogio.Person', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='family',
name='mother',
field=models.ForeignKey(related_name='mother_ref', verbose_name='Mutter', blank=True, to='genealogio.Person', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='family',
name='start_date',
field=partialdate.fields.PartialDateField(null=True, verbose_name='Anfangsdatum', blank=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration (only changes verbose names).<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import partialdate.fields
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0004_auto_20150220_1242'),
]
operations = [
migrations.AlterField(
model_name='family',
name='end_date',
field=partialdate.fields.PartialDateField(null=True, verbose_name='Enddatum', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='family',
name='father',
field=models.ForeignKey(related_name='father_ref', verbose_name='Vater', blank=True, to='genealogio.Person', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='family',
name='mother',
field=models.ForeignKey(related_name='mother_ref', verbose_name='Mutter', blank=True, to='genealogio.Person', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='family',
name='start_date',
field=partialdate.fields.PartialDateField(null=True, verbose_name='Anfangsdatum', blank=True),
preserve_default=True,
),
]
|
|
8e328a265e624c3dad6ea702d61475308bbce1ff
|
examples/heap_snapshot.py
|
examples/heap_snapshot.py
|
#!/usr/bin/env python3
# https://chromedevtools.github.io/debugger-protocol-viewer/tot/HeapProfiler/
import json
import requests
import selenium
import websocket
from devtools.proxy import CHROME_WRAPPER_PATH
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
if __name__ == '__main__':
devtools_proxy_port = 9222
desired_capabilities = DesiredCapabilities.CHROME.copy()
desired_capabilities['chromeOptions'] = {
'binary': CHROME_WRAPPER_PATH,
'args': [
'--devtools-proxy-binary=devtools-proxy',
'--devtools-proxy-args=--port {}'.format(devtools_proxy_port),
]
}
driver = selenium.webdriver.Chrome(desired_capabilities=desired_capabilities)
try:
tabs = requests.get('http://localhost:{}/json/list'.format(devtools_proxy_port)).json()
tab = next(tab for tab in tabs if tab.get('type') == 'page')
devtools_url = tab['webSocketDebuggerUrl']
driver.get('https://google.co.uk')
ws = websocket.create_connection(devtools_url)
data = {
"method": "HeapProfiler.enable",
"params": {},
"id": 0,
}
ws.send(json.dumps(data))
ws.recv()
data = {
"method": "HeapProfiler.takeHeapSnapshot",
"params": {},
"id": 0,
}
ws.send(json.dumps(data))
heap_data = ''
while True:
raw_data = ws.recv()
result = json.loads(raw_data)
if result.get('id') == 0:
break
if result.get('method') == 'HeapProfiler.addHeapSnapshotChunk':
heap_data += result['params']['chunk']
ws.close()
with open('example.heapsnapshot', 'w') as f:
f.write(heap_data)
finally:
driver.quit()
|
Add example for taking heap snapshot
|
Add example for taking heap snapshot
|
Python
|
mit
|
bayandin/devtools-proxy,bayandin/devtools-proxy
|
Add example for taking heap snapshot
|
#!/usr/bin/env python3
# https://chromedevtools.github.io/debugger-protocol-viewer/tot/HeapProfiler/
import json
import requests
import selenium
import websocket
from devtools.proxy import CHROME_WRAPPER_PATH
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
if __name__ == '__main__':
devtools_proxy_port = 9222
desired_capabilities = DesiredCapabilities.CHROME.copy()
desired_capabilities['chromeOptions'] = {
'binary': CHROME_WRAPPER_PATH,
'args': [
'--devtools-proxy-binary=devtools-proxy',
'--devtools-proxy-args=--port {}'.format(devtools_proxy_port),
]
}
driver = selenium.webdriver.Chrome(desired_capabilities=desired_capabilities)
try:
tabs = requests.get('http://localhost:{}/json/list'.format(devtools_proxy_port)).json()
tab = next(tab for tab in tabs if tab.get('type') == 'page')
devtools_url = tab['webSocketDebuggerUrl']
driver.get('https://google.co.uk')
ws = websocket.create_connection(devtools_url)
data = {
"method": "HeapProfiler.enable",
"params": {},
"id": 0,
}
ws.send(json.dumps(data))
ws.recv()
data = {
"method": "HeapProfiler.takeHeapSnapshot",
"params": {},
"id": 0,
}
ws.send(json.dumps(data))
heap_data = ''
while True:
raw_data = ws.recv()
result = json.loads(raw_data)
if result.get('id') == 0:
break
if result.get('method') == 'HeapProfiler.addHeapSnapshotChunk':
heap_data += result['params']['chunk']
ws.close()
with open('example.heapsnapshot', 'w') as f:
f.write(heap_data)
finally:
driver.quit()
|
<commit_before><commit_msg>Add example for taking heap snapshot<commit_after>
|
#!/usr/bin/env python3
# https://chromedevtools.github.io/debugger-protocol-viewer/tot/HeapProfiler/
import json
import requests
import selenium
import websocket
from devtools.proxy import CHROME_WRAPPER_PATH
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
if __name__ == '__main__':
devtools_proxy_port = 9222
desired_capabilities = DesiredCapabilities.CHROME.copy()
desired_capabilities['chromeOptions'] = {
'binary': CHROME_WRAPPER_PATH,
'args': [
'--devtools-proxy-binary=devtools-proxy',
'--devtools-proxy-args=--port {}'.format(devtools_proxy_port),
]
}
driver = selenium.webdriver.Chrome(desired_capabilities=desired_capabilities)
try:
tabs = requests.get('http://localhost:{}/json/list'.format(devtools_proxy_port)).json()
tab = next(tab for tab in tabs if tab.get('type') == 'page')
devtools_url = tab['webSocketDebuggerUrl']
driver.get('https://google.co.uk')
ws = websocket.create_connection(devtools_url)
data = {
"method": "HeapProfiler.enable",
"params": {},
"id": 0,
}
ws.send(json.dumps(data))
ws.recv()
data = {
"method": "HeapProfiler.takeHeapSnapshot",
"params": {},
"id": 0,
}
ws.send(json.dumps(data))
heap_data = ''
while True:
raw_data = ws.recv()
result = json.loads(raw_data)
if result.get('id') == 0:
break
if result.get('method') == 'HeapProfiler.addHeapSnapshotChunk':
heap_data += result['params']['chunk']
ws.close()
with open('example.heapsnapshot', 'w') as f:
f.write(heap_data)
finally:
driver.quit()
|
Add example for taking heap snapshot#!/usr/bin/env python3
# https://chromedevtools.github.io/debugger-protocol-viewer/tot/HeapProfiler/
import json
import requests
import selenium
import websocket
from devtools.proxy import CHROME_WRAPPER_PATH
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
if __name__ == '__main__':
devtools_proxy_port = 9222
desired_capabilities = DesiredCapabilities.CHROME.copy()
desired_capabilities['chromeOptions'] = {
'binary': CHROME_WRAPPER_PATH,
'args': [
'--devtools-proxy-binary=devtools-proxy',
'--devtools-proxy-args=--port {}'.format(devtools_proxy_port),
]
}
driver = selenium.webdriver.Chrome(desired_capabilities=desired_capabilities)
try:
tabs = requests.get('http://localhost:{}/json/list'.format(devtools_proxy_port)).json()
tab = next(tab for tab in tabs if tab.get('type') == 'page')
devtools_url = tab['webSocketDebuggerUrl']
driver.get('https://google.co.uk')
ws = websocket.create_connection(devtools_url)
data = {
"method": "HeapProfiler.enable",
"params": {},
"id": 0,
}
ws.send(json.dumps(data))
ws.recv()
data = {
"method": "HeapProfiler.takeHeapSnapshot",
"params": {},
"id": 0,
}
ws.send(json.dumps(data))
heap_data = ''
while True:
raw_data = ws.recv()
result = json.loads(raw_data)
if result.get('id') == 0:
break
if result.get('method') == 'HeapProfiler.addHeapSnapshotChunk':
heap_data += result['params']['chunk']
ws.close()
with open('example.heapsnapshot', 'w') as f:
f.write(heap_data)
finally:
driver.quit()
|
<commit_before><commit_msg>Add example for taking heap snapshot<commit_after>#!/usr/bin/env python3
# https://chromedevtools.github.io/debugger-protocol-viewer/tot/HeapProfiler/
import json
import requests
import selenium
import websocket
from devtools.proxy import CHROME_WRAPPER_PATH
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
if __name__ == '__main__':
devtools_proxy_port = 9222
desired_capabilities = DesiredCapabilities.CHROME.copy()
desired_capabilities['chromeOptions'] = {
'binary': CHROME_WRAPPER_PATH,
'args': [
'--devtools-proxy-binary=devtools-proxy',
'--devtools-proxy-args=--port {}'.format(devtools_proxy_port),
]
}
driver = selenium.webdriver.Chrome(desired_capabilities=desired_capabilities)
try:
tabs = requests.get('http://localhost:{}/json/list'.format(devtools_proxy_port)).json()
tab = next(tab for tab in tabs if tab.get('type') == 'page')
devtools_url = tab['webSocketDebuggerUrl']
driver.get('https://google.co.uk')
ws = websocket.create_connection(devtools_url)
data = {
"method": "HeapProfiler.enable",
"params": {},
"id": 0,
}
ws.send(json.dumps(data))
ws.recv()
data = {
"method": "HeapProfiler.takeHeapSnapshot",
"params": {},
"id": 0,
}
ws.send(json.dumps(data))
heap_data = ''
while True:
raw_data = ws.recv()
result = json.loads(raw_data)
if result.get('id') == 0:
break
if result.get('method') == 'HeapProfiler.addHeapSnapshotChunk':
heap_data += result['params']['chunk']
ws.close()
with open('example.heapsnapshot', 'w') as f:
f.write(heap_data)
finally:
driver.quit()
|
|
30bb17a94cb6c5c31b3e6f87a9b6f17fc86012ba
|
non-edx-people.py
|
non-edx-people.py
|
"""Produce a list of the emails of all non-edX people in people.yaml"""
import yaml
with open("people.yaml") as people_yaml:
people = yaml.load(people_yaml)
non_edx = (e for e in people.itervalues() if e.get('institution') != 'edX')
emails = (e['authors_entry'].partition('<')[2].strip(">") for e in non_edx)
print ",\n".join(em for em in emails if em.strip())
|
Make a list of all non-edX people from people.yaml
|
Make a list of all non-edX people from people.yaml
|
Python
|
apache-2.0
|
edx/repo-tools,edx/repo-tools
|
Make a list of all non-edX people from people.yaml
|
"""Produce a list of the emails of all non-edX people in people.yaml"""
import yaml
with open("people.yaml") as people_yaml:
people = yaml.load(people_yaml)
non_edx = (e for e in people.itervalues() if e.get('institution') != 'edX')
emails = (e['authors_entry'].partition('<')[2].strip(">") for e in non_edx)
print ",\n".join(em for em in emails if em.strip())
|
<commit_before><commit_msg>Make a list of all non-edX people from people.yaml<commit_after>
|
"""Produce a list of the emails of all non-edX people in people.yaml"""
import yaml
with open("people.yaml") as people_yaml:
people = yaml.load(people_yaml)
non_edx = (e for e in people.itervalues() if e.get('institution') != 'edX')
emails = (e['authors_entry'].partition('<')[2].strip(">") for e in non_edx)
print ",\n".join(em for em in emails if em.strip())
|
Make a list of all non-edX people from people.yaml"""Produce a list of the emails of all non-edX people in people.yaml"""
import yaml
with open("people.yaml") as people_yaml:
people = yaml.load(people_yaml)
non_edx = (e for e in people.itervalues() if e.get('institution') != 'edX')
emails = (e['authors_entry'].partition('<')[2].strip(">") for e in non_edx)
print ",\n".join(em for em in emails if em.strip())
|
<commit_before><commit_msg>Make a list of all non-edX people from people.yaml<commit_after>"""Produce a list of the emails of all non-edX people in people.yaml"""
import yaml
with open("people.yaml") as people_yaml:
people = yaml.load(people_yaml)
non_edx = (e for e in people.itervalues() if e.get('institution') != 'edX')
emails = (e['authors_entry'].partition('<')[2].strip(">") for e in non_edx)
print ",\n".join(em for em in emails if em.strip())
|
|
3ceac19fb4321669e9b0129f84a4da418c62bb2e
|
temba/msgs/migrations/0048_auto_20160308_2131.py
|
temba/msgs/migrations/0048_auto_20160308_2131.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
INDEX_SQL = """
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_class c JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname = 'msgs_msg_responded_to_not_null' AND n.nspname = 'public') THEN
CREATE INDEX msgs_msg_responded_to_not_null ON msgs_msg (response_to_id) WHERE response_to_id IS NOT NULL;
END IF;
END$$;"""
class Migration(migrations.Migration):
dependencies = [
('msgs', '0047_auto_20160307_1919'),
]
operations = [
migrations.RunSQL(INDEX_SQL),
]
|
Use partial index for responded_to_id
|
Use partial index for responded_to_id
|
Python
|
agpl-3.0
|
pulilab/rapidpro,tsotetsi/textily-web,ewheeler/rapidpro,pulilab/rapidpro,ewheeler/rapidpro,tsotetsi/textily-web,pulilab/rapidpro,tsotetsi/textily-web,ewheeler/rapidpro,pulilab/rapidpro,ewheeler/rapidpro,tsotetsi/textily-web,pulilab/rapidpro,tsotetsi/textily-web
|
Use partial index for responded_to_id
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
INDEX_SQL = """
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_class c JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname = 'msgs_msg_responded_to_not_null' AND n.nspname = 'public') THEN
CREATE INDEX msgs_msg_responded_to_not_null ON msgs_msg (response_to_id) WHERE response_to_id IS NOT NULL;
END IF;
END$$;"""
class Migration(migrations.Migration):
dependencies = [
('msgs', '0047_auto_20160307_1919'),
]
operations = [
migrations.RunSQL(INDEX_SQL),
]
|
<commit_before><commit_msg>Use partial index for responded_to_id<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
INDEX_SQL = """
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_class c JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname = 'msgs_msg_responded_to_not_null' AND n.nspname = 'public') THEN
CREATE INDEX msgs_msg_responded_to_not_null ON msgs_msg (response_to_id) WHERE response_to_id IS NOT NULL;
END IF;
END$$;"""
class Migration(migrations.Migration):
dependencies = [
('msgs', '0047_auto_20160307_1919'),
]
operations = [
migrations.RunSQL(INDEX_SQL),
]
|
Use partial index for responded_to_id# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
INDEX_SQL = """
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_class c JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname = 'msgs_msg_responded_to_not_null' AND n.nspname = 'public') THEN
CREATE INDEX msgs_msg_responded_to_not_null ON msgs_msg (response_to_id) WHERE response_to_id IS NOT NULL;
END IF;
END$$;"""
class Migration(migrations.Migration):
dependencies = [
('msgs', '0047_auto_20160307_1919'),
]
operations = [
migrations.RunSQL(INDEX_SQL),
]
|
<commit_before><commit_msg>Use partial index for responded_to_id<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
INDEX_SQL = """
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_class c JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname = 'msgs_msg_responded_to_not_null' AND n.nspname = 'public') THEN
CREATE INDEX msgs_msg_responded_to_not_null ON msgs_msg (response_to_id) WHERE response_to_id IS NOT NULL;
END IF;
END$$;"""
class Migration(migrations.Migration):
dependencies = [
('msgs', '0047_auto_20160307_1919'),
]
operations = [
migrations.RunSQL(INDEX_SQL),
]
|
|
dffb641b91e8f02750b4b925a7e2eb0d30a2e96d
|
plexus/.ycm_extra_conf.py
|
plexus/.ycm_extra_conf.py
|
def Settings(**kwargs):
return {
'flags': [
'-pthread',
'-DNDEBUG',
'-g',
'-fwrapv',
'-02',
'-Wall',
'-g',
'-fstack-protector-strong',
'-Wformat',
'-Werror=format-security',
'-Wdate-time',
'-D_FORTIFY_SOURCE=2',
'-fPIC',
'-I/usr/include/python3.6m',
'-w',
],
}
|
Add a YouCompleteMe configuration file that contains C-family compilations tags specific to Plexus
|
Add a YouCompleteMe configuration file that contains C-family compilations tags specific to Plexus
|
Python
|
mit
|
mertyildiran/Plexus,mertyildiran/Plexus
|
Add a YouCompleteMe configuration file that contains C-family compilations tags specific to Plexus
|
def Settings(**kwargs):
return {
'flags': [
'-pthread',
'-DNDEBUG',
'-g',
'-fwrapv',
'-02',
'-Wall',
'-g',
'-fstack-protector-strong',
'-Wformat',
'-Werror=format-security',
'-Wdate-time',
'-D_FORTIFY_SOURCE=2',
'-fPIC',
'-I/usr/include/python3.6m',
'-w',
],
}
|
<commit_before><commit_msg>Add a YouCompleteMe configuration file that contains C-family compilations tags specific to Plexus<commit_after>
|
def Settings(**kwargs):
return {
'flags': [
'-pthread',
'-DNDEBUG',
'-g',
'-fwrapv',
'-02',
'-Wall',
'-g',
'-fstack-protector-strong',
'-Wformat',
'-Werror=format-security',
'-Wdate-time',
'-D_FORTIFY_SOURCE=2',
'-fPIC',
'-I/usr/include/python3.6m',
'-w',
],
}
|
Add a YouCompleteMe configuration file that contains C-family compilations tags specific to Plexusdef Settings(**kwargs):
return {
'flags': [
'-pthread',
'-DNDEBUG',
'-g',
'-fwrapv',
'-02',
'-Wall',
'-g',
'-fstack-protector-strong',
'-Wformat',
'-Werror=format-security',
'-Wdate-time',
'-D_FORTIFY_SOURCE=2',
'-fPIC',
'-I/usr/include/python3.6m',
'-w',
],
}
|
<commit_before><commit_msg>Add a YouCompleteMe configuration file that contains C-family compilations tags specific to Plexus<commit_after>def Settings(**kwargs):
return {
'flags': [
'-pthread',
'-DNDEBUG',
'-g',
'-fwrapv',
'-02',
'-Wall',
'-g',
'-fstack-protector-strong',
'-Wformat',
'-Werror=format-security',
'-Wdate-time',
'-D_FORTIFY_SOURCE=2',
'-fPIC',
'-I/usr/include/python3.6m',
'-w',
],
}
|
|
ae558a68765cd884f1d702ebe613c173faaff062
|
st2common/tests/unit/test_json_schema.py
|
st2common/tests/unit/test_json_schema.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest2 import TestCase
from jsonschema.exceptions import ValidationError
from st2common.util import schema as util_schema
TEST_SCHEMA_1 = {
'additionalProperties': False,
'title': 'foo',
'description': 'Foo.',
'type': 'object',
'properties': {
'cmd_no_default': {
'description': 'Foo',
'required': True,
'type': 'string'
}
}
}
TEST_SCHEMA_2 = {
'additionalProperties': False,
'title': 'foo',
'description': 'Foo.',
'type': 'object',
'properties': {
'cmd_default': {
'default': 'date',
'description': 'Foo',
'required': True,
'type': 'string'
}
}
}
class JSONSchemaTestCase(TestCase):
def test_use_default_value(self):
# No default, no value provided, should fail
instance = {}
validator = util_schema.get_validator()
expected_msg = '\'cmd_no_default\' is a required property'
self.assertRaisesRegexp(ValidationError, expected_msg, util_schema.validate,
instance=instance, schema=TEST_SCHEMA_1, cls=validator,
use_default=True)
# No default, value provided
instance = {'cmd_no_default': 'foo'}
util_schema.validate(instance=instance, schema=TEST_SCHEMA_1, cls=validator,
use_default=True)
# default value provided, no value, should pass
instance = {}
validator = util_schema.get_validator()
util_schema.validate(instance=instance, schema=TEST_SCHEMA_2, cls=validator,
use_default=True)
# default value provided, value provided, should pass
instance = {'cmd_default': 'foo'}
validator = util_schema.get_validator()
util_schema.validate(instance=instance, schema=TEST_SCHEMA_2, cls=validator,
use_default=True)
|
Add test case for "use_default".
|
Add test case for "use_default".
|
Python
|
apache-2.0
|
Itxaka/st2,tonybaloney/st2,Itxaka/st2,StackStorm/st2,pinterb/st2,nzlosh/st2,StackStorm/st2,Plexxi/st2,nzlosh/st2,StackStorm/st2,grengojbo/st2,grengojbo/st2,jtopjian/st2,peak6/st2,emedvedev/st2,alfasin/st2,emedvedev/st2,nzlosh/st2,Plexxi/st2,nzlosh/st2,lakshmi-kannan/st2,emedvedev/st2,StackStorm/st2,pixelrebel/st2,dennybaa/st2,armab/st2,Plexxi/st2,armab/st2,Itxaka/st2,pixelrebel/st2,pixelrebel/st2,lakshmi-kannan/st2,lakshmi-kannan/st2,armab/st2,tonybaloney/st2,peak6/st2,tonybaloney/st2,punalpatel/st2,peak6/st2,grengojbo/st2,pinterb/st2,jtopjian/st2,alfasin/st2,jtopjian/st2,pinterb/st2,dennybaa/st2,punalpatel/st2,dennybaa/st2,Plexxi/st2,punalpatel/st2,alfasin/st2
|
Add test case for "use_default".
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest2 import TestCase
from jsonschema.exceptions import ValidationError
from st2common.util import schema as util_schema
TEST_SCHEMA_1 = {
'additionalProperties': False,
'title': 'foo',
'description': 'Foo.',
'type': 'object',
'properties': {
'cmd_no_default': {
'description': 'Foo',
'required': True,
'type': 'string'
}
}
}
TEST_SCHEMA_2 = {
'additionalProperties': False,
'title': 'foo',
'description': 'Foo.',
'type': 'object',
'properties': {
'cmd_default': {
'default': 'date',
'description': 'Foo',
'required': True,
'type': 'string'
}
}
}
class JSONSchemaTestCase(TestCase):
def test_use_default_value(self):
# No default, no value provided, should fail
instance = {}
validator = util_schema.get_validator()
expected_msg = '\'cmd_no_default\' is a required property'
self.assertRaisesRegexp(ValidationError, expected_msg, util_schema.validate,
instance=instance, schema=TEST_SCHEMA_1, cls=validator,
use_default=True)
# No default, value provided
instance = {'cmd_no_default': 'foo'}
util_schema.validate(instance=instance, schema=TEST_SCHEMA_1, cls=validator,
use_default=True)
# default value provided, no value, should pass
instance = {}
validator = util_schema.get_validator()
util_schema.validate(instance=instance, schema=TEST_SCHEMA_2, cls=validator,
use_default=True)
# default value provided, value provided, should pass
instance = {'cmd_default': 'foo'}
validator = util_schema.get_validator()
util_schema.validate(instance=instance, schema=TEST_SCHEMA_2, cls=validator,
use_default=True)
|
<commit_before><commit_msg>Add test case for "use_default".<commit_after>
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest2 import TestCase
from jsonschema.exceptions import ValidationError
from st2common.util import schema as util_schema
TEST_SCHEMA_1 = {
'additionalProperties': False,
'title': 'foo',
'description': 'Foo.',
'type': 'object',
'properties': {
'cmd_no_default': {
'description': 'Foo',
'required': True,
'type': 'string'
}
}
}
TEST_SCHEMA_2 = {
'additionalProperties': False,
'title': 'foo',
'description': 'Foo.',
'type': 'object',
'properties': {
'cmd_default': {
'default': 'date',
'description': 'Foo',
'required': True,
'type': 'string'
}
}
}
class JSONSchemaTestCase(TestCase):
def test_use_default_value(self):
# No default, no value provided, should fail
instance = {}
validator = util_schema.get_validator()
expected_msg = '\'cmd_no_default\' is a required property'
self.assertRaisesRegexp(ValidationError, expected_msg, util_schema.validate,
instance=instance, schema=TEST_SCHEMA_1, cls=validator,
use_default=True)
# No default, value provided
instance = {'cmd_no_default': 'foo'}
util_schema.validate(instance=instance, schema=TEST_SCHEMA_1, cls=validator,
use_default=True)
# default value provided, no value, should pass
instance = {}
validator = util_schema.get_validator()
util_schema.validate(instance=instance, schema=TEST_SCHEMA_2, cls=validator,
use_default=True)
# default value provided, value provided, should pass
instance = {'cmd_default': 'foo'}
validator = util_schema.get_validator()
util_schema.validate(instance=instance, schema=TEST_SCHEMA_2, cls=validator,
use_default=True)
|
Add test case for "use_default".# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest2 import TestCase
from jsonschema.exceptions import ValidationError
from st2common.util import schema as util_schema
TEST_SCHEMA_1 = {
'additionalProperties': False,
'title': 'foo',
'description': 'Foo.',
'type': 'object',
'properties': {
'cmd_no_default': {
'description': 'Foo',
'required': True,
'type': 'string'
}
}
}
TEST_SCHEMA_2 = {
'additionalProperties': False,
'title': 'foo',
'description': 'Foo.',
'type': 'object',
'properties': {
'cmd_default': {
'default': 'date',
'description': 'Foo',
'required': True,
'type': 'string'
}
}
}
class JSONSchemaTestCase(TestCase):
def test_use_default_value(self):
# No default, no value provided, should fail
instance = {}
validator = util_schema.get_validator()
expected_msg = '\'cmd_no_default\' is a required property'
self.assertRaisesRegexp(ValidationError, expected_msg, util_schema.validate,
instance=instance, schema=TEST_SCHEMA_1, cls=validator,
use_default=True)
# No default, value provided
instance = {'cmd_no_default': 'foo'}
util_schema.validate(instance=instance, schema=TEST_SCHEMA_1, cls=validator,
use_default=True)
# default value provided, no value, should pass
instance = {}
validator = util_schema.get_validator()
util_schema.validate(instance=instance, schema=TEST_SCHEMA_2, cls=validator,
use_default=True)
# default value provided, value provided, should pass
instance = {'cmd_default': 'foo'}
validator = util_schema.get_validator()
util_schema.validate(instance=instance, schema=TEST_SCHEMA_2, cls=validator,
use_default=True)
|
<commit_before><commit_msg>Add test case for "use_default".<commit_after># Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest2 import TestCase
from jsonschema.exceptions import ValidationError
from st2common.util import schema as util_schema
TEST_SCHEMA_1 = {
'additionalProperties': False,
'title': 'foo',
'description': 'Foo.',
'type': 'object',
'properties': {
'cmd_no_default': {
'description': 'Foo',
'required': True,
'type': 'string'
}
}
}
TEST_SCHEMA_2 = {
'additionalProperties': False,
'title': 'foo',
'description': 'Foo.',
'type': 'object',
'properties': {
'cmd_default': {
'default': 'date',
'description': 'Foo',
'required': True,
'type': 'string'
}
}
}
class JSONSchemaTestCase(TestCase):
def test_use_default_value(self):
# No default, no value provided, should fail
instance = {}
validator = util_schema.get_validator()
expected_msg = '\'cmd_no_default\' is a required property'
self.assertRaisesRegexp(ValidationError, expected_msg, util_schema.validate,
instance=instance, schema=TEST_SCHEMA_1, cls=validator,
use_default=True)
# No default, value provided
instance = {'cmd_no_default': 'foo'}
util_schema.validate(instance=instance, schema=TEST_SCHEMA_1, cls=validator,
use_default=True)
# default value provided, no value, should pass
instance = {}
validator = util_schema.get_validator()
util_schema.validate(instance=instance, schema=TEST_SCHEMA_2, cls=validator,
use_default=True)
# default value provided, value provided, should pass
instance = {'cmd_default': 'foo'}
validator = util_schema.get_validator()
util_schema.validate(instance=instance, schema=TEST_SCHEMA_2, cls=validator,
use_default=True)
|
|
4d547c1ed3c5d28770b75f183e9c34c0083e0f7e
|
tempest/api/compute/volumes/test_attach_volume_negative.py
|
tempest/api/compute/volumes/test_attach_volume_negative.py
|
# Copyright 2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import config
from tempest.lib import exceptions as lib_exc
from tempest import test
CONF = config.CONF
class AttachVolumeNegativeTest(base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
super(AttachVolumeNegativeTest, cls).skip_checks()
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@test.idempotent_id('a313b5cd-fbd0-49cc-94de-870e99f763c7')
def test_delete_attached_volume(self):
server = self.create_test_server(wait_until='ACTIVE')
volume = self.create_volume()
path = "/dev/%s" % CONF.compute.volume_device_name
self.attach_volume(server, volume, device=path)
self.assertRaises(lib_exc.BadRequest,
self.delete_volume, volume['id'])
|
Add negative tests for deleting attached volume
|
Add negative tests for deleting attached volume
Due to lack of an error handling, Nova's delete-volume API returns
HTTP500 error when deleting an attached volume.
This patch adds the corresponding test for reproducing the problem
on the gate and blocking the same issue in the future.
Change-Id: Idb6267be770bcf2541595babebf269cdc71c2b8d
Depends-On: Ia07556b2dc18678baa4c8fbd65820d8047362ef9
Related-Bug: #1630783
|
Python
|
apache-2.0
|
openstack/tempest,Juniper/tempest,vedujoshi/tempest,Tesora/tesora-tempest,openstack/tempest,vedujoshi/tempest,sebrandon1/tempest,Tesora/tesora-tempest,masayukig/tempest,sebrandon1/tempest,cisco-openstack/tempest,cisco-openstack/tempest,masayukig/tempest,Juniper/tempest
|
Add negative tests for deleting attached volume
Due to lack of an error handling, Nova's delete-volume API returns
HTTP500 error when deleting an attached volume.
This patch adds the corresponding test for reproducing the problem
on the gate and blocking the same issue in the future.
Change-Id: Idb6267be770bcf2541595babebf269cdc71c2b8d
Depends-On: Ia07556b2dc18678baa4c8fbd65820d8047362ef9
Related-Bug: #1630783
|
# Copyright 2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import config
from tempest.lib import exceptions as lib_exc
from tempest import test
CONF = config.CONF
class AttachVolumeNegativeTest(base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
super(AttachVolumeNegativeTest, cls).skip_checks()
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@test.idempotent_id('a313b5cd-fbd0-49cc-94de-870e99f763c7')
def test_delete_attached_volume(self):
server = self.create_test_server(wait_until='ACTIVE')
volume = self.create_volume()
path = "/dev/%s" % CONF.compute.volume_device_name
self.attach_volume(server, volume, device=path)
self.assertRaises(lib_exc.BadRequest,
self.delete_volume, volume['id'])
|
<commit_before><commit_msg>Add negative tests for deleting attached volume
Due to lack of an error handling, Nova's delete-volume API returns
HTTP500 error when deleting an attached volume.
This patch adds the corresponding test for reproducing the problem
on the gate and blocking the same issue in the future.
Change-Id: Idb6267be770bcf2541595babebf269cdc71c2b8d
Depends-On: Ia07556b2dc18678baa4c8fbd65820d8047362ef9
Related-Bug: #1630783<commit_after>
|
# Copyright 2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import config
from tempest.lib import exceptions as lib_exc
from tempest import test
CONF = config.CONF
class AttachVolumeNegativeTest(base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
super(AttachVolumeNegativeTest, cls).skip_checks()
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@test.idempotent_id('a313b5cd-fbd0-49cc-94de-870e99f763c7')
def test_delete_attached_volume(self):
server = self.create_test_server(wait_until='ACTIVE')
volume = self.create_volume()
path = "/dev/%s" % CONF.compute.volume_device_name
self.attach_volume(server, volume, device=path)
self.assertRaises(lib_exc.BadRequest,
self.delete_volume, volume['id'])
|
Add negative tests for deleting attached volume
Due to lack of an error handling, Nova's delete-volume API returns
HTTP500 error when deleting an attached volume.
This patch adds the corresponding test for reproducing the problem
on the gate and blocking the same issue in the future.
Change-Id: Idb6267be770bcf2541595babebf269cdc71c2b8d
Depends-On: Ia07556b2dc18678baa4c8fbd65820d8047362ef9
Related-Bug: #1630783# Copyright 2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import config
from tempest.lib import exceptions as lib_exc
from tempest import test
CONF = config.CONF
class AttachVolumeNegativeTest(base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
super(AttachVolumeNegativeTest, cls).skip_checks()
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@test.idempotent_id('a313b5cd-fbd0-49cc-94de-870e99f763c7')
def test_delete_attached_volume(self):
server = self.create_test_server(wait_until='ACTIVE')
volume = self.create_volume()
path = "/dev/%s" % CONF.compute.volume_device_name
self.attach_volume(server, volume, device=path)
self.assertRaises(lib_exc.BadRequest,
self.delete_volume, volume['id'])
|
<commit_before><commit_msg>Add negative tests for deleting attached volume
Due to lack of an error handling, Nova's delete-volume API returns
HTTP500 error when deleting an attached volume.
This patch adds the corresponding test for reproducing the problem
on the gate and blocking the same issue in the future.
Change-Id: Idb6267be770bcf2541595babebf269cdc71c2b8d
Depends-On: Ia07556b2dc18678baa4c8fbd65820d8047362ef9
Related-Bug: #1630783<commit_after># Copyright 2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import config
from tempest.lib import exceptions as lib_exc
from tempest import test
CONF = config.CONF
class AttachVolumeNegativeTest(base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
super(AttachVolumeNegativeTest, cls).skip_checks()
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@test.idempotent_id('a313b5cd-fbd0-49cc-94de-870e99f763c7')
def test_delete_attached_volume(self):
server = self.create_test_server(wait_until='ACTIVE')
volume = self.create_volume()
path = "/dev/%s" % CONF.compute.volume_device_name
self.attach_volume(server, volume, device=path)
self.assertRaises(lib_exc.BadRequest,
self.delete_volume, volume['id'])
|
|
6c293b42bdae81082c73754b8cf2d8616cbf1ec2
|
test/integration/ggrc/converters/test_base_block.py
|
test/integration/ggrc/converters/test_base_block.py
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import mock
from collections import defaultdict
from ddt import data, ddt
from ggrc.converters import base_block
from ggrc import models
from integration.ggrc import TestCase
from integration.ggrc.models import factories
@ddt
class TestBaseBlock(TestCase):
@staticmethod
def dd_to_dict(ddict):
return {k: dict(v) for k, v in ddict.items()}
@data(0, 1, 2, 4)
def test_create_mapping_cache(self, count):
regulations = [factories.RegulationFactory() for _ in range(count)]
markets = [factories.MarketFactory() for _ in range(count)]
controls = [factories.ControlFactory() for _ in range(count)]
expected_cache = defaultdict(lambda: defaultdict(list))
for i in range(count):
for j in range(i):
factories.RelationshipFactory(
source=regulations[j] if i % 2 == 0 else markets[i],
destination=regulations[j] if i % 2 == 1 else markets[i],
)
factories.RelationshipFactory(
source=regulations[j] if i % 2 == 0 else controls[i],
destination=regulations[j] if i % 2 == 1 else controls[i],
)
expected_cache[regulations[j].id]["Control"].append(
controls[i].slug
)
expected_cache[regulations[j].id]["Market"].append(
markets[i].slug
)
block = base_block.BlockConverter(mock.MagicMock())
block.object_class = models.Regulation
block.object_ids = [r.id for r in regulations]
cache = block._create_mapping_cache()
self.assertEqual(
self.dd_to_dict(cache),
self.dd_to_dict(expected_cache),
)
|
Add export mapping cache tests
|
Add export mapping cache tests
|
Python
|
apache-2.0
|
AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core
|
Add export mapping cache tests
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import mock
from collections import defaultdict
from ddt import data, ddt
from ggrc.converters import base_block
from ggrc import models
from integration.ggrc import TestCase
from integration.ggrc.models import factories
@ddt
class TestBaseBlock(TestCase):
@staticmethod
def dd_to_dict(ddict):
return {k: dict(v) for k, v in ddict.items()}
@data(0, 1, 2, 4)
def test_create_mapping_cache(self, count):
regulations = [factories.RegulationFactory() for _ in range(count)]
markets = [factories.MarketFactory() for _ in range(count)]
controls = [factories.ControlFactory() for _ in range(count)]
expected_cache = defaultdict(lambda: defaultdict(list))
for i in range(count):
for j in range(i):
factories.RelationshipFactory(
source=regulations[j] if i % 2 == 0 else markets[i],
destination=regulations[j] if i % 2 == 1 else markets[i],
)
factories.RelationshipFactory(
source=regulations[j] if i % 2 == 0 else controls[i],
destination=regulations[j] if i % 2 == 1 else controls[i],
)
expected_cache[regulations[j].id]["Control"].append(
controls[i].slug
)
expected_cache[regulations[j].id]["Market"].append(
markets[i].slug
)
block = base_block.BlockConverter(mock.MagicMock())
block.object_class = models.Regulation
block.object_ids = [r.id for r in regulations]
cache = block._create_mapping_cache()
self.assertEqual(
self.dd_to_dict(cache),
self.dd_to_dict(expected_cache),
)
|
<commit_before><commit_msg>Add export mapping cache tests<commit_after>
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import mock
from collections import defaultdict
from ddt import data, ddt
from ggrc.converters import base_block
from ggrc import models
from integration.ggrc import TestCase
from integration.ggrc.models import factories
@ddt
class TestBaseBlock(TestCase):
@staticmethod
def dd_to_dict(ddict):
return {k: dict(v) for k, v in ddict.items()}
@data(0, 1, 2, 4)
def test_create_mapping_cache(self, count):
regulations = [factories.RegulationFactory() for _ in range(count)]
markets = [factories.MarketFactory() for _ in range(count)]
controls = [factories.ControlFactory() for _ in range(count)]
expected_cache = defaultdict(lambda: defaultdict(list))
for i in range(count):
for j in range(i):
factories.RelationshipFactory(
source=regulations[j] if i % 2 == 0 else markets[i],
destination=regulations[j] if i % 2 == 1 else markets[i],
)
factories.RelationshipFactory(
source=regulations[j] if i % 2 == 0 else controls[i],
destination=regulations[j] if i % 2 == 1 else controls[i],
)
expected_cache[regulations[j].id]["Control"].append(
controls[i].slug
)
expected_cache[regulations[j].id]["Market"].append(
markets[i].slug
)
block = base_block.BlockConverter(mock.MagicMock())
block.object_class = models.Regulation
block.object_ids = [r.id for r in regulations]
cache = block._create_mapping_cache()
self.assertEqual(
self.dd_to_dict(cache),
self.dd_to_dict(expected_cache),
)
|
Add export mapping cache tests# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import mock
from collections import defaultdict
from ddt import data, ddt
from ggrc.converters import base_block
from ggrc import models
from integration.ggrc import TestCase
from integration.ggrc.models import factories
@ddt
class TestBaseBlock(TestCase):
@staticmethod
def dd_to_dict(ddict):
return {k: dict(v) for k, v in ddict.items()}
@data(0, 1, 2, 4)
def test_create_mapping_cache(self, count):
regulations = [factories.RegulationFactory() for _ in range(count)]
markets = [factories.MarketFactory() for _ in range(count)]
controls = [factories.ControlFactory() for _ in range(count)]
expected_cache = defaultdict(lambda: defaultdict(list))
for i in range(count):
for j in range(i):
factories.RelationshipFactory(
source=regulations[j] if i % 2 == 0 else markets[i],
destination=regulations[j] if i % 2 == 1 else markets[i],
)
factories.RelationshipFactory(
source=regulations[j] if i % 2 == 0 else controls[i],
destination=regulations[j] if i % 2 == 1 else controls[i],
)
expected_cache[regulations[j].id]["Control"].append(
controls[i].slug
)
expected_cache[regulations[j].id]["Market"].append(
markets[i].slug
)
block = base_block.BlockConverter(mock.MagicMock())
block.object_class = models.Regulation
block.object_ids = [r.id for r in regulations]
cache = block._create_mapping_cache()
self.assertEqual(
self.dd_to_dict(cache),
self.dd_to_dict(expected_cache),
)
|
<commit_before><commit_msg>Add export mapping cache tests<commit_after># Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import mock
from collections import defaultdict
from ddt import data, ddt
from ggrc.converters import base_block
from ggrc import models
from integration.ggrc import TestCase
from integration.ggrc.models import factories
@ddt
class TestBaseBlock(TestCase):
@staticmethod
def dd_to_dict(ddict):
return {k: dict(v) for k, v in ddict.items()}
@data(0, 1, 2, 4)
def test_create_mapping_cache(self, count):
regulations = [factories.RegulationFactory() for _ in range(count)]
markets = [factories.MarketFactory() for _ in range(count)]
controls = [factories.ControlFactory() for _ in range(count)]
expected_cache = defaultdict(lambda: defaultdict(list))
for i in range(count):
for j in range(i):
factories.RelationshipFactory(
source=regulations[j] if i % 2 == 0 else markets[i],
destination=regulations[j] if i % 2 == 1 else markets[i],
)
factories.RelationshipFactory(
source=regulations[j] if i % 2 == 0 else controls[i],
destination=regulations[j] if i % 2 == 1 else controls[i],
)
expected_cache[regulations[j].id]["Control"].append(
controls[i].slug
)
expected_cache[regulations[j].id]["Market"].append(
markets[i].slug
)
block = base_block.BlockConverter(mock.MagicMock())
block.object_class = models.Regulation
block.object_ids = [r.id for r in regulations]
cache = block._create_mapping_cache()
self.assertEqual(
self.dd_to_dict(cache),
self.dd_to_dict(expected_cache),
)
|
|
3e509b8350c79d1f65dee45faac025e3942fa80f
|
test/test_builder.py
|
test/test_builder.py
|
"""
test.test_builder
~~~~~~~~~~~~~~~~~
This is the unit test file for site builder.
:Copyright: Copyright 2014 Yang LIU <zesikliu@gmail.com>
:License: BSD, see LICENSE for details.
"""
import unittest
import datetime
import io
from zkb.builder import FileProcessor, SiteBuilder
from zkb.config import SiteConfig
class TestSiteBuilder(unittest.TestCase):
class MockFileProcessor(FileProcessor):
def get_article_files(self, dirname, ignored_dirs=None):
return [('article1.md', 'article1', datetime.datetime(2002, 1, 5),
'yaml', None),
('article2.md', 'article2', datetime.datetime(2002, 2, 1),
'yaml', None),
('article2.md', 'article2', datetime.datetime(2002, 3, 1),
'yaml', None)]
def read(self, filename):
output = io.BytesIO()
if filename == 'article1.md':
data = (u'title: Test\n'
u'tag: tag1\n'
u'\nContent')
elif filename == 'article2.md':
data = (u'title: Test2\n'
u'tag: tag1, tag2\n'
u'\nContent')
else:
data = (u'title: Test3\n'
u'date: 2002/1/30\n'
u'tag: tag2, tag3\n'
u'\nContent')
output.write(data.encode('utf-8', 'replace'))
return output
def write(self, filename, encoding, content):
pass
def write_stream(self, filename, stream):
pass
def copy_file(self, source, destination):
pass
def exists(self, file):
return True
class MockSiteBuilder(SiteBuilder):
def _do_build(self):
return self.config
def test_site_builder(self):
config = SiteConfig()
config.site_builder = "test.test_builder/" \
"TestSiteBuilder.MockSiteBuilder"
config = SiteBuilder.from_config(
config, TestSiteBuilder.MockFileProcessor()).build()
self.assertEqual(len(config.articles_by_date), 3,
'article count mismatch')
|
Add a skeleton of builder test.
|
Add a skeleton of builder test.
|
Python
|
bsd-3-clause
|
zesik/zkb
|
Add a skeleton of builder test.
|
"""
test.test_builder
~~~~~~~~~~~~~~~~~
This is the unit test file for site builder.
:Copyright: Copyright 2014 Yang LIU <zesikliu@gmail.com>
:License: BSD, see LICENSE for details.
"""
import unittest
import datetime
import io
from zkb.builder import FileProcessor, SiteBuilder
from zkb.config import SiteConfig
class TestSiteBuilder(unittest.TestCase):
class MockFileProcessor(FileProcessor):
def get_article_files(self, dirname, ignored_dirs=None):
return [('article1.md', 'article1', datetime.datetime(2002, 1, 5),
'yaml', None),
('article2.md', 'article2', datetime.datetime(2002, 2, 1),
'yaml', None),
('article2.md', 'article2', datetime.datetime(2002, 3, 1),
'yaml', None)]
def read(self, filename):
output = io.BytesIO()
if filename == 'article1.md':
data = (u'title: Test\n'
u'tag: tag1\n'
u'\nContent')
elif filename == 'article2.md':
data = (u'title: Test2\n'
u'tag: tag1, tag2\n'
u'\nContent')
else:
data = (u'title: Test3\n'
u'date: 2002/1/30\n'
u'tag: tag2, tag3\n'
u'\nContent')
output.write(data.encode('utf-8', 'replace'))
return output
def write(self, filename, encoding, content):
pass
def write_stream(self, filename, stream):
pass
def copy_file(self, source, destination):
pass
def exists(self, file):
return True
class MockSiteBuilder(SiteBuilder):
def _do_build(self):
return self.config
def test_site_builder(self):
config = SiteConfig()
config.site_builder = "test.test_builder/" \
"TestSiteBuilder.MockSiteBuilder"
config = SiteBuilder.from_config(
config, TestSiteBuilder.MockFileProcessor()).build()
self.assertEqual(len(config.articles_by_date), 3,
'article count mismatch')
|
<commit_before><commit_msg>Add a skeleton of builder test.<commit_after>
|
"""
test.test_builder
~~~~~~~~~~~~~~~~~
This is the unit test file for site builder.
:Copyright: Copyright 2014 Yang LIU <zesikliu@gmail.com>
:License: BSD, see LICENSE for details.
"""
import unittest
import datetime
import io
from zkb.builder import FileProcessor, SiteBuilder
from zkb.config import SiteConfig
class TestSiteBuilder(unittest.TestCase):
class MockFileProcessor(FileProcessor):
def get_article_files(self, dirname, ignored_dirs=None):
return [('article1.md', 'article1', datetime.datetime(2002, 1, 5),
'yaml', None),
('article2.md', 'article2', datetime.datetime(2002, 2, 1),
'yaml', None),
('article2.md', 'article2', datetime.datetime(2002, 3, 1),
'yaml', None)]
def read(self, filename):
output = io.BytesIO()
if filename == 'article1.md':
data = (u'title: Test\n'
u'tag: tag1\n'
u'\nContent')
elif filename == 'article2.md':
data = (u'title: Test2\n'
u'tag: tag1, tag2\n'
u'\nContent')
else:
data = (u'title: Test3\n'
u'date: 2002/1/30\n'
u'tag: tag2, tag3\n'
u'\nContent')
output.write(data.encode('utf-8', 'replace'))
return output
def write(self, filename, encoding, content):
pass
def write_stream(self, filename, stream):
pass
def copy_file(self, source, destination):
pass
def exists(self, file):
return True
class MockSiteBuilder(SiteBuilder):
def _do_build(self):
return self.config
def test_site_builder(self):
config = SiteConfig()
config.site_builder = "test.test_builder/" \
"TestSiteBuilder.MockSiteBuilder"
config = SiteBuilder.from_config(
config, TestSiteBuilder.MockFileProcessor()).build()
self.assertEqual(len(config.articles_by_date), 3,
'article count mismatch')
|
Add a skeleton of builder test."""
test.test_builder
~~~~~~~~~~~~~~~~~
This is the unit test file for site builder.
:Copyright: Copyright 2014 Yang LIU <zesikliu@gmail.com>
:License: BSD, see LICENSE for details.
"""
import unittest
import datetime
import io
from zkb.builder import FileProcessor, SiteBuilder
from zkb.config import SiteConfig
class TestSiteBuilder(unittest.TestCase):
class MockFileProcessor(FileProcessor):
def get_article_files(self, dirname, ignored_dirs=None):
return [('article1.md', 'article1', datetime.datetime(2002, 1, 5),
'yaml', None),
('article2.md', 'article2', datetime.datetime(2002, 2, 1),
'yaml', None),
('article2.md', 'article2', datetime.datetime(2002, 3, 1),
'yaml', None)]
def read(self, filename):
output = io.BytesIO()
if filename == 'article1.md':
data = (u'title: Test\n'
u'tag: tag1\n'
u'\nContent')
elif filename == 'article2.md':
data = (u'title: Test2\n'
u'tag: tag1, tag2\n'
u'\nContent')
else:
data = (u'title: Test3\n'
u'date: 2002/1/30\n'
u'tag: tag2, tag3\n'
u'\nContent')
output.write(data.encode('utf-8', 'replace'))
return output
def write(self, filename, encoding, content):
pass
def write_stream(self, filename, stream):
pass
def copy_file(self, source, destination):
pass
def exists(self, file):
return True
class MockSiteBuilder(SiteBuilder):
def _do_build(self):
return self.config
def test_site_builder(self):
config = SiteConfig()
config.site_builder = "test.test_builder/" \
"TestSiteBuilder.MockSiteBuilder"
config = SiteBuilder.from_config(
config, TestSiteBuilder.MockFileProcessor()).build()
self.assertEqual(len(config.articles_by_date), 3,
'article count mismatch')
|
<commit_before><commit_msg>Add a skeleton of builder test.<commit_after>"""
test.test_builder
~~~~~~~~~~~~~~~~~
This is the unit test file for site builder.
:Copyright: Copyright 2014 Yang LIU <zesikliu@gmail.com>
:License: BSD, see LICENSE for details.
"""
import unittest
import datetime
import io
from zkb.builder import FileProcessor, SiteBuilder
from zkb.config import SiteConfig
class TestSiteBuilder(unittest.TestCase):
class MockFileProcessor(FileProcessor):
def get_article_files(self, dirname, ignored_dirs=None):
return [('article1.md', 'article1', datetime.datetime(2002, 1, 5),
'yaml', None),
('article2.md', 'article2', datetime.datetime(2002, 2, 1),
'yaml', None),
('article2.md', 'article2', datetime.datetime(2002, 3, 1),
'yaml', None)]
def read(self, filename):
output = io.BytesIO()
if filename == 'article1.md':
data = (u'title: Test\n'
u'tag: tag1\n'
u'\nContent')
elif filename == 'article2.md':
data = (u'title: Test2\n'
u'tag: tag1, tag2\n'
u'\nContent')
else:
data = (u'title: Test3\n'
u'date: 2002/1/30\n'
u'tag: tag2, tag3\n'
u'\nContent')
output.write(data.encode('utf-8', 'replace'))
return output
def write(self, filename, encoding, content):
pass
def write_stream(self, filename, stream):
pass
def copy_file(self, source, destination):
pass
def exists(self, file):
return True
class MockSiteBuilder(SiteBuilder):
def _do_build(self):
return self.config
def test_site_builder(self):
config = SiteConfig()
config.site_builder = "test.test_builder/" \
"TestSiteBuilder.MockSiteBuilder"
config = SiteBuilder.from_config(
config, TestSiteBuilder.MockFileProcessor()).build()
self.assertEqual(len(config.articles_by_date), 3,
'article count mismatch')
|
|
76b62e4be81ec06f9b0b5611abb3f49ba2a41dae
|
analysis/print_reward.py
|
analysis/print_reward.py
|
import numpy as np
import common
def print_reward(inpaths, legend, budget=1e6, last=1e5, sort=False):
runs = common.load_runs(inpaths, budget)
if not legend:
methods = sorted(set(run['method'] for run in runs))
legend = {x: x.replace('_', ' ').title() for x in methods}
seeds = sorted({x['seed'] for x in runs})
rewards = np.empty((len(legend), len(seeds)))
rewards[:] = np.nan
for i, (method, label) in enumerate(legend.items()):
relevant = [run for run in runs if run['method'] == method]
if not relevant:
print(f'No runs found for method {method}.')
for run in relevant:
j = seeds.index(run['seed'])
xs = np.array(run['xs'])
ys = np.array(run['reward'])
rewards[i][j] = ys[-(xs >= (xs.max() - last)).sum()]
means = np.nanmean(rewards, -1)
stds = np.nanstd(rewards, -1)
print('')
print(r'\textbf{Method} & \textbf{Reward} \\')
print('')
for method, mean, std in zip(legend.values(), means, stds):
mean = f'{mean:.1f}'
mean = (r'\o' if len(mean) < 4 else ' ') + mean
print(rf'{method:<25} & ${mean} \pm {std:4.1f}$ \\')
print('')
inpaths = [
'scores/crafter_reward-dreamerv2.json',
'scores/crafter_reward-ppo.json',
'scores/crafter_reward-rainbow.json',
'scores/crafter_noreward-unsup_plan2explore.json',
'scores/crafter_noreward-unsup_rnd.json',
'scores/crafter_noreward-random.json',
'scores/crafter_reward-human.json',
]
legend = {
'human': 'Human Experts',
'dreamerv2': 'DreamerV2',
'ppo': 'PPO',
'rainbow': 'Rainbow',
'unsup_plan2explore': 'Plan2Explore (Unsup)',
'unsup_rnd': 'RND (Unsup)',
'random': 'Random',
}
print_reward(inpaths, legend)
|
Add script to print reward table.
|
Add script to print reward table.
|
Python
|
mit
|
danijar/crafter
|
Add script to print reward table.
|
import numpy as np
import common
def print_reward(inpaths, legend, budget=1e6, last=1e5, sort=False):
runs = common.load_runs(inpaths, budget)
if not legend:
methods = sorted(set(run['method'] for run in runs))
legend = {x: x.replace('_', ' ').title() for x in methods}
seeds = sorted({x['seed'] for x in runs})
rewards = np.empty((len(legend), len(seeds)))
rewards[:] = np.nan
for i, (method, label) in enumerate(legend.items()):
relevant = [run for run in runs if run['method'] == method]
if not relevant:
print(f'No runs found for method {method}.')
for run in relevant:
j = seeds.index(run['seed'])
xs = np.array(run['xs'])
ys = np.array(run['reward'])
rewards[i][j] = ys[-(xs >= (xs.max() - last)).sum()]
means = np.nanmean(rewards, -1)
stds = np.nanstd(rewards, -1)
print('')
print(r'\textbf{Method} & \textbf{Reward} \\')
print('')
for method, mean, std in zip(legend.values(), means, stds):
mean = f'{mean:.1f}'
mean = (r'\o' if len(mean) < 4 else ' ') + mean
print(rf'{method:<25} & ${mean} \pm {std:4.1f}$ \\')
print('')
inpaths = [
'scores/crafter_reward-dreamerv2.json',
'scores/crafter_reward-ppo.json',
'scores/crafter_reward-rainbow.json',
'scores/crafter_noreward-unsup_plan2explore.json',
'scores/crafter_noreward-unsup_rnd.json',
'scores/crafter_noreward-random.json',
'scores/crafter_reward-human.json',
]
legend = {
'human': 'Human Experts',
'dreamerv2': 'DreamerV2',
'ppo': 'PPO',
'rainbow': 'Rainbow',
'unsup_plan2explore': 'Plan2Explore (Unsup)',
'unsup_rnd': 'RND (Unsup)',
'random': 'Random',
}
print_reward(inpaths, legend)
|
<commit_before><commit_msg>Add script to print reward table.<commit_after>
|
import numpy as np
import common
def print_reward(inpaths, legend, budget=1e6, last=1e5, sort=False):
runs = common.load_runs(inpaths, budget)
if not legend:
methods = sorted(set(run['method'] for run in runs))
legend = {x: x.replace('_', ' ').title() for x in methods}
seeds = sorted({x['seed'] for x in runs})
rewards = np.empty((len(legend), len(seeds)))
rewards[:] = np.nan
for i, (method, label) in enumerate(legend.items()):
relevant = [run for run in runs if run['method'] == method]
if not relevant:
print(f'No runs found for method {method}.')
for run in relevant:
j = seeds.index(run['seed'])
xs = np.array(run['xs'])
ys = np.array(run['reward'])
rewards[i][j] = ys[-(xs >= (xs.max() - last)).sum()]
means = np.nanmean(rewards, -1)
stds = np.nanstd(rewards, -1)
print('')
print(r'\textbf{Method} & \textbf{Reward} \\')
print('')
for method, mean, std in zip(legend.values(), means, stds):
mean = f'{mean:.1f}'
mean = (r'\o' if len(mean) < 4 else ' ') + mean
print(rf'{method:<25} & ${mean} \pm {std:4.1f}$ \\')
print('')
inpaths = [
'scores/crafter_reward-dreamerv2.json',
'scores/crafter_reward-ppo.json',
'scores/crafter_reward-rainbow.json',
'scores/crafter_noreward-unsup_plan2explore.json',
'scores/crafter_noreward-unsup_rnd.json',
'scores/crafter_noreward-random.json',
'scores/crafter_reward-human.json',
]
legend = {
'human': 'Human Experts',
'dreamerv2': 'DreamerV2',
'ppo': 'PPO',
'rainbow': 'Rainbow',
'unsup_plan2explore': 'Plan2Explore (Unsup)',
'unsup_rnd': 'RND (Unsup)',
'random': 'Random',
}
print_reward(inpaths, legend)
|
Add script to print reward table.import numpy as np
import common
def print_reward(inpaths, legend, budget=1e6, last=1e5, sort=False):
runs = common.load_runs(inpaths, budget)
if not legend:
methods = sorted(set(run['method'] for run in runs))
legend = {x: x.replace('_', ' ').title() for x in methods}
seeds = sorted({x['seed'] for x in runs})
rewards = np.empty((len(legend), len(seeds)))
rewards[:] = np.nan
for i, (method, label) in enumerate(legend.items()):
relevant = [run for run in runs if run['method'] == method]
if not relevant:
print(f'No runs found for method {method}.')
for run in relevant:
j = seeds.index(run['seed'])
xs = np.array(run['xs'])
ys = np.array(run['reward'])
rewards[i][j] = ys[-(xs >= (xs.max() - last)).sum()]
means = np.nanmean(rewards, -1)
stds = np.nanstd(rewards, -1)
print('')
print(r'\textbf{Method} & \textbf{Reward} \\')
print('')
for method, mean, std in zip(legend.values(), means, stds):
mean = f'{mean:.1f}'
mean = (r'\o' if len(mean) < 4 else ' ') + mean
print(rf'{method:<25} & ${mean} \pm {std:4.1f}$ \\')
print('')
inpaths = [
'scores/crafter_reward-dreamerv2.json',
'scores/crafter_reward-ppo.json',
'scores/crafter_reward-rainbow.json',
'scores/crafter_noreward-unsup_plan2explore.json',
'scores/crafter_noreward-unsup_rnd.json',
'scores/crafter_noreward-random.json',
'scores/crafter_reward-human.json',
]
legend = {
'human': 'Human Experts',
'dreamerv2': 'DreamerV2',
'ppo': 'PPO',
'rainbow': 'Rainbow',
'unsup_plan2explore': 'Plan2Explore (Unsup)',
'unsup_rnd': 'RND (Unsup)',
'random': 'Random',
}
print_reward(inpaths, legend)
|
<commit_before><commit_msg>Add script to print reward table.<commit_after>import numpy as np
import common
def print_reward(inpaths, legend, budget=1e6, last=1e5, sort=False):
runs = common.load_runs(inpaths, budget)
if not legend:
methods = sorted(set(run['method'] for run in runs))
legend = {x: x.replace('_', ' ').title() for x in methods}
seeds = sorted({x['seed'] for x in runs})
rewards = np.empty((len(legend), len(seeds)))
rewards[:] = np.nan
for i, (method, label) in enumerate(legend.items()):
relevant = [run for run in runs if run['method'] == method]
if not relevant:
print(f'No runs found for method {method}.')
for run in relevant:
j = seeds.index(run['seed'])
xs = np.array(run['xs'])
ys = np.array(run['reward'])
rewards[i][j] = ys[-(xs >= (xs.max() - last)).sum()]
means = np.nanmean(rewards, -1)
stds = np.nanstd(rewards, -1)
print('')
print(r'\textbf{Method} & \textbf{Reward} \\')
print('')
for method, mean, std in zip(legend.values(), means, stds):
mean = f'{mean:.1f}'
mean = (r'\o' if len(mean) < 4 else ' ') + mean
print(rf'{method:<25} & ${mean} \pm {std:4.1f}$ \\')
print('')
inpaths = [
'scores/crafter_reward-dreamerv2.json',
'scores/crafter_reward-ppo.json',
'scores/crafter_reward-rainbow.json',
'scores/crafter_noreward-unsup_plan2explore.json',
'scores/crafter_noreward-unsup_rnd.json',
'scores/crafter_noreward-random.json',
'scores/crafter_reward-human.json',
]
legend = {
'human': 'Human Experts',
'dreamerv2': 'DreamerV2',
'ppo': 'PPO',
'rainbow': 'Rainbow',
'unsup_plan2explore': 'Plan2Explore (Unsup)',
'unsup_rnd': 'RND (Unsup)',
'random': 'Random',
}
print_reward(inpaths, legend)
|
|
32ec67c2a4fa0e225e9edef03cd59b4f1a509db8
|
cheroot/test/test_server.py
|
cheroot/test/test_server.py
|
"""Tests for the HTTP server."""
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8 :
import os
import tempfile
import threading
import time
import pytest
import cheroot.server
from cheroot.testing import (
ANY_INTERFACE_IPV4,
ANY_INTERFACE_IPV6,
EPHEMERAL_PORT,
)
def make_http_server(bind_addr):
"""Create and start an HTTP server bound to bind_addr."""
httpserver = cheroot.server.HTTPServer(
bind_addr=bind_addr,
gateway=cheroot.server.Gateway,
)
threading.Thread(target=httpserver.safe_start).start()
while not httpserver.ready:
time.sleep(0.1)
return httpserver
@pytest.fixture
def http_server():
"""Provision a server creator as a fixture."""
def start_srv():
bind_addr = yield
httpserver = make_http_server(bind_addr)
yield httpserver
yield httpserver
srv_creator = iter(start_srv())
next(srv_creator)
yield srv_creator
try:
while True:
httpserver = next(srv_creator)
if httpserver is not None:
httpserver.stop()
except StopIteration:
pass
@pytest.fixture
def unix_sock_file():
"""Check that bound UNIX socket address is stored in server."""
tmp_sock_fh, tmp_sock_fname = tempfile.mkstemp()
yield tmp_sock_fname
os.close(tmp_sock_fh)
os.unlink(tmp_sock_fname)
@pytest.mark.parametrize(
'ip_addr',
(
ANY_INTERFACE_IPV4,
ANY_INTERFACE_IPV6,
)
)
def test_bind_addr_inet(http_server, ip_addr):
"""Check that bound IP address is stored in server."""
httpserver = http_server.send((ip_addr, EPHEMERAL_PORT))
assert httpserver.bind_addr[0] == ip_addr
assert httpserver.bind_addr[1] != EPHEMERAL_PORT
def test_bind_addr_unix(http_server, unix_sock_file):
"""Check that bound UNIX socket address is stored in server."""
httpserver = http_server.send(unix_sock_file)
assert httpserver.bind_addr == unix_sock_file
@pytest.mark.skip # FIXME: investigate binding to abstract sockets issue
def test_bind_addr_unix_abstract(http_server):
"""Check that bound UNIX socket address is stored in server."""
unix_abstract_sock = b'\x00cheroot/test/socket/here.sock'
httpserver = http_server.send(unix_abstract_sock)
assert httpserver.bind_addr == unix_abstract_sock
|
Add basic tests for server.HTTPServer.bind_addr
|
Add basic tests for server.HTTPServer.bind_addr
|
Python
|
bsd-3-clause
|
cherrypy/cheroot
|
Add basic tests for server.HTTPServer.bind_addr
|
"""Tests for the HTTP server."""
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8 :
import os
import tempfile
import threading
import time
import pytest
import cheroot.server
from cheroot.testing import (
ANY_INTERFACE_IPV4,
ANY_INTERFACE_IPV6,
EPHEMERAL_PORT,
)
def make_http_server(bind_addr):
"""Create and start an HTTP server bound to bind_addr."""
httpserver = cheroot.server.HTTPServer(
bind_addr=bind_addr,
gateway=cheroot.server.Gateway,
)
threading.Thread(target=httpserver.safe_start).start()
while not httpserver.ready:
time.sleep(0.1)
return httpserver
@pytest.fixture
def http_server():
"""Provision a server creator as a fixture."""
def start_srv():
bind_addr = yield
httpserver = make_http_server(bind_addr)
yield httpserver
yield httpserver
srv_creator = iter(start_srv())
next(srv_creator)
yield srv_creator
try:
while True:
httpserver = next(srv_creator)
if httpserver is not None:
httpserver.stop()
except StopIteration:
pass
@pytest.fixture
def unix_sock_file():
"""Check that bound UNIX socket address is stored in server."""
tmp_sock_fh, tmp_sock_fname = tempfile.mkstemp()
yield tmp_sock_fname
os.close(tmp_sock_fh)
os.unlink(tmp_sock_fname)
@pytest.mark.parametrize(
'ip_addr',
(
ANY_INTERFACE_IPV4,
ANY_INTERFACE_IPV6,
)
)
def test_bind_addr_inet(http_server, ip_addr):
"""Check that bound IP address is stored in server."""
httpserver = http_server.send((ip_addr, EPHEMERAL_PORT))
assert httpserver.bind_addr[0] == ip_addr
assert httpserver.bind_addr[1] != EPHEMERAL_PORT
def test_bind_addr_unix(http_server, unix_sock_file):
"""Check that bound UNIX socket address is stored in server."""
httpserver = http_server.send(unix_sock_file)
assert httpserver.bind_addr == unix_sock_file
@pytest.mark.skip # FIXME: investigate binding to abstract sockets issue
def test_bind_addr_unix_abstract(http_server):
"""Check that bound UNIX socket address is stored in server."""
unix_abstract_sock = b'\x00cheroot/test/socket/here.sock'
httpserver = http_server.send(unix_abstract_sock)
assert httpserver.bind_addr == unix_abstract_sock
|
<commit_before><commit_msg>Add basic tests for server.HTTPServer.bind_addr<commit_after>
|
"""Tests for the HTTP server."""
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8 :
import os
import tempfile
import threading
import time
import pytest
import cheroot.server
from cheroot.testing import (
ANY_INTERFACE_IPV4,
ANY_INTERFACE_IPV6,
EPHEMERAL_PORT,
)
def make_http_server(bind_addr):
"""Create and start an HTTP server bound to bind_addr."""
httpserver = cheroot.server.HTTPServer(
bind_addr=bind_addr,
gateway=cheroot.server.Gateway,
)
threading.Thread(target=httpserver.safe_start).start()
while not httpserver.ready:
time.sleep(0.1)
return httpserver
@pytest.fixture
def http_server():
"""Provision a server creator as a fixture."""
def start_srv():
bind_addr = yield
httpserver = make_http_server(bind_addr)
yield httpserver
yield httpserver
srv_creator = iter(start_srv())
next(srv_creator)
yield srv_creator
try:
while True:
httpserver = next(srv_creator)
if httpserver is not None:
httpserver.stop()
except StopIteration:
pass
@pytest.fixture
def unix_sock_file():
"""Check that bound UNIX socket address is stored in server."""
tmp_sock_fh, tmp_sock_fname = tempfile.mkstemp()
yield tmp_sock_fname
os.close(tmp_sock_fh)
os.unlink(tmp_sock_fname)
@pytest.mark.parametrize(
'ip_addr',
(
ANY_INTERFACE_IPV4,
ANY_INTERFACE_IPV6,
)
)
def test_bind_addr_inet(http_server, ip_addr):
"""Check that bound IP address is stored in server."""
httpserver = http_server.send((ip_addr, EPHEMERAL_PORT))
assert httpserver.bind_addr[0] == ip_addr
assert httpserver.bind_addr[1] != EPHEMERAL_PORT
def test_bind_addr_unix(http_server, unix_sock_file):
"""Check that bound UNIX socket address is stored in server."""
httpserver = http_server.send(unix_sock_file)
assert httpserver.bind_addr == unix_sock_file
@pytest.mark.skip # FIXME: investigate binding to abstract sockets issue
def test_bind_addr_unix_abstract(http_server):
"""Check that bound UNIX socket address is stored in server."""
unix_abstract_sock = b'\x00cheroot/test/socket/here.sock'
httpserver = http_server.send(unix_abstract_sock)
assert httpserver.bind_addr == unix_abstract_sock
|
Add basic tests for server.HTTPServer.bind_addr"""Tests for the HTTP server."""
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8 :
import os
import tempfile
import threading
import time
import pytest
import cheroot.server
from cheroot.testing import (
ANY_INTERFACE_IPV4,
ANY_INTERFACE_IPV6,
EPHEMERAL_PORT,
)
def make_http_server(bind_addr):
"""Create and start an HTTP server bound to bind_addr."""
httpserver = cheroot.server.HTTPServer(
bind_addr=bind_addr,
gateway=cheroot.server.Gateway,
)
threading.Thread(target=httpserver.safe_start).start()
while not httpserver.ready:
time.sleep(0.1)
return httpserver
@pytest.fixture
def http_server():
"""Provision a server creator as a fixture."""
def start_srv():
bind_addr = yield
httpserver = make_http_server(bind_addr)
yield httpserver
yield httpserver
srv_creator = iter(start_srv())
next(srv_creator)
yield srv_creator
try:
while True:
httpserver = next(srv_creator)
if httpserver is not None:
httpserver.stop()
except StopIteration:
pass
@pytest.fixture
def unix_sock_file():
"""Check that bound UNIX socket address is stored in server."""
tmp_sock_fh, tmp_sock_fname = tempfile.mkstemp()
yield tmp_sock_fname
os.close(tmp_sock_fh)
os.unlink(tmp_sock_fname)
@pytest.mark.parametrize(
'ip_addr',
(
ANY_INTERFACE_IPV4,
ANY_INTERFACE_IPV6,
)
)
def test_bind_addr_inet(http_server, ip_addr):
"""Check that bound IP address is stored in server."""
httpserver = http_server.send((ip_addr, EPHEMERAL_PORT))
assert httpserver.bind_addr[0] == ip_addr
assert httpserver.bind_addr[1] != EPHEMERAL_PORT
def test_bind_addr_unix(http_server, unix_sock_file):
"""Check that bound UNIX socket address is stored in server."""
httpserver = http_server.send(unix_sock_file)
assert httpserver.bind_addr == unix_sock_file
@pytest.mark.skip # FIXME: investigate binding to abstract sockets issue
def test_bind_addr_unix_abstract(http_server):
"""Check that bound UNIX socket address is stored in server."""
unix_abstract_sock = b'\x00cheroot/test/socket/here.sock'
httpserver = http_server.send(unix_abstract_sock)
assert httpserver.bind_addr == unix_abstract_sock
|
<commit_before><commit_msg>Add basic tests for server.HTTPServer.bind_addr<commit_after>"""Tests for the HTTP server."""
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8 :
import os
import tempfile
import threading
import time
import pytest
import cheroot.server
from cheroot.testing import (
ANY_INTERFACE_IPV4,
ANY_INTERFACE_IPV6,
EPHEMERAL_PORT,
)
def make_http_server(bind_addr):
"""Create and start an HTTP server bound to bind_addr."""
httpserver = cheroot.server.HTTPServer(
bind_addr=bind_addr,
gateway=cheroot.server.Gateway,
)
threading.Thread(target=httpserver.safe_start).start()
while not httpserver.ready:
time.sleep(0.1)
return httpserver
@pytest.fixture
def http_server():
"""Provision a server creator as a fixture."""
def start_srv():
bind_addr = yield
httpserver = make_http_server(bind_addr)
yield httpserver
yield httpserver
srv_creator = iter(start_srv())
next(srv_creator)
yield srv_creator
try:
while True:
httpserver = next(srv_creator)
if httpserver is not None:
httpserver.stop()
except StopIteration:
pass
@pytest.fixture
def unix_sock_file():
"""Check that bound UNIX socket address is stored in server."""
tmp_sock_fh, tmp_sock_fname = tempfile.mkstemp()
yield tmp_sock_fname
os.close(tmp_sock_fh)
os.unlink(tmp_sock_fname)
@pytest.mark.parametrize(
'ip_addr',
(
ANY_INTERFACE_IPV4,
ANY_INTERFACE_IPV6,
)
)
def test_bind_addr_inet(http_server, ip_addr):
"""Check that bound IP address is stored in server."""
httpserver = http_server.send((ip_addr, EPHEMERAL_PORT))
assert httpserver.bind_addr[0] == ip_addr
assert httpserver.bind_addr[1] != EPHEMERAL_PORT
def test_bind_addr_unix(http_server, unix_sock_file):
"""Check that bound UNIX socket address is stored in server."""
httpserver = http_server.send(unix_sock_file)
assert httpserver.bind_addr == unix_sock_file
@pytest.mark.skip # FIXME: investigate binding to abstract sockets issue
def test_bind_addr_unix_abstract(http_server):
"""Check that bound UNIX socket address is stored in server."""
unix_abstract_sock = b'\x00cheroot/test/socket/here.sock'
httpserver = http_server.send(unix_abstract_sock)
assert httpserver.bind_addr == unix_abstract_sock
|
|
241f4bb835c3335819f66a25367336470d7e3583
|
bin/result_updater.py
|
bin/result_updater.py
|
#!/usr/bin/python2.4
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bulk datastore changes."""
__author__ = 'slamm@google.com (Stephen Lamm)'
import getopt
import getpass
import os
import simplejson
import sys
DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
sys.path.append('%s/..' % DIR_PATH)
from appengine_tools import appengine_rpc
UPDATER_URL_PATH = '/admin/update_result_parents'
class ResultUpdater(object):
def __init__(self, host, path, user, bookmark=None):
self.path = path
self.user = user
user_agent = None
# TODO: figure out a value for 'source'.
# Doc says, "The source to specify in authentication requests."
source = ''
self.rpc_server = appengine_rpc.HttpRpcServer(
host, self.GetCredentials, user_agent, source, save_cookies=True)
def GetCredentials(self):
# TODO: Grab email/password from config
return self.user, getpass.getpass('Password for %s: ' % self.user)
def Send(self, bookmark, total_scanned, total_updated):
response_data = self.rpc_server.Send(self.path, simplejson.dumps(
(bookmark, total_scanned, total_updated)))
return simplejson.loads(response_data)
def main(argv):
options, args = getopt.getopt(
argv[1:],
'h:u:',
['host=', 'gae_user='])
host = None
gae_user = None
for option_key, option_value in options:
if option_key in ('-h', '--host'):
host = option_value
elif option_key in ('-u', '--gae_user'):
gae_user = option_value
updater = ResultUpdater(host, UPDATER_URL_PATH, user=gae_user)
bookmark = None
total_scanned = 0
total_updated = 0
while 1:
print 'Update batch: %s (total_scanned=%s, total_updated=%s)' % (
bookmark or 'no_bookmark', total_scanned, total_updated)
bookmark, total_scanned, total_updated = updater.Send(
bookmark, total_scanned, total_updated)
if not bookmark:
break
if __name__ == '__main__':
main(sys.argv)
|
Add a client to migrate from ResultParent.user_agent_list to ResultParent.user_agent_pretty
|
Add a client to migrate from ResultParent.user_agent_list to ResultParent.user_agent_pretty
|
Python
|
apache-2.0
|
elsigh/browserscope,elsigh/browserscope,elsigh/browserscope,elsigh/browserscope
|
Add a client to migrate from ResultParent.user_agent_list to ResultParent.user_agent_pretty
|
#!/usr/bin/python2.4
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bulk datastore changes."""
__author__ = 'slamm@google.com (Stephen Lamm)'
import getopt
import getpass
import os
import simplejson
import sys
DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
sys.path.append('%s/..' % DIR_PATH)
from appengine_tools import appengine_rpc
UPDATER_URL_PATH = '/admin/update_result_parents'
class ResultUpdater(object):
def __init__(self, host, path, user, bookmark=None):
self.path = path
self.user = user
user_agent = None
# TODO: figure out a value for 'source'.
# Doc says, "The source to specify in authentication requests."
source = ''
self.rpc_server = appengine_rpc.HttpRpcServer(
host, self.GetCredentials, user_agent, source, save_cookies=True)
def GetCredentials(self):
# TODO: Grab email/password from config
return self.user, getpass.getpass('Password for %s: ' % self.user)
def Send(self, bookmark, total_scanned, total_updated):
response_data = self.rpc_server.Send(self.path, simplejson.dumps(
(bookmark, total_scanned, total_updated)))
return simplejson.loads(response_data)
def main(argv):
options, args = getopt.getopt(
argv[1:],
'h:u:',
['host=', 'gae_user='])
host = None
gae_user = None
for option_key, option_value in options:
if option_key in ('-h', '--host'):
host = option_value
elif option_key in ('-u', '--gae_user'):
gae_user = option_value
updater = ResultUpdater(host, UPDATER_URL_PATH, user=gae_user)
bookmark = None
total_scanned = 0
total_updated = 0
while 1:
print 'Update batch: %s (total_scanned=%s, total_updated=%s)' % (
bookmark or 'no_bookmark', total_scanned, total_updated)
bookmark, total_scanned, total_updated = updater.Send(
bookmark, total_scanned, total_updated)
if not bookmark:
break
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Add a client to migrate from ResultParent.user_agent_list to ResultParent.user_agent_pretty<commit_after>
|
#!/usr/bin/python2.4
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bulk datastore changes."""
__author__ = 'slamm@google.com (Stephen Lamm)'
import getopt
import getpass
import os
import simplejson
import sys
DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
sys.path.append('%s/..' % DIR_PATH)
from appengine_tools import appengine_rpc
UPDATER_URL_PATH = '/admin/update_result_parents'
class ResultUpdater(object):
def __init__(self, host, path, user, bookmark=None):
self.path = path
self.user = user
user_agent = None
# TODO: figure out a value for 'source'.
# Doc says, "The source to specify in authentication requests."
source = ''
self.rpc_server = appengine_rpc.HttpRpcServer(
host, self.GetCredentials, user_agent, source, save_cookies=True)
def GetCredentials(self):
# TODO: Grab email/password from config
return self.user, getpass.getpass('Password for %s: ' % self.user)
def Send(self, bookmark, total_scanned, total_updated):
response_data = self.rpc_server.Send(self.path, simplejson.dumps(
(bookmark, total_scanned, total_updated)))
return simplejson.loads(response_data)
def main(argv):
options, args = getopt.getopt(
argv[1:],
'h:u:',
['host=', 'gae_user='])
host = None
gae_user = None
for option_key, option_value in options:
if option_key in ('-h', '--host'):
host = option_value
elif option_key in ('-u', '--gae_user'):
gae_user = option_value
updater = ResultUpdater(host, UPDATER_URL_PATH, user=gae_user)
bookmark = None
total_scanned = 0
total_updated = 0
while 1:
print 'Update batch: %s (total_scanned=%s, total_updated=%s)' % (
bookmark or 'no_bookmark', total_scanned, total_updated)
bookmark, total_scanned, total_updated = updater.Send(
bookmark, total_scanned, total_updated)
if not bookmark:
break
if __name__ == '__main__':
main(sys.argv)
|
Add a client to migrate from ResultParent.user_agent_list to ResultParent.user_agent_pretty#!/usr/bin/python2.4
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bulk datastore changes."""
__author__ = 'slamm@google.com (Stephen Lamm)'
import getopt
import getpass
import os
import simplejson
import sys
DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
sys.path.append('%s/..' % DIR_PATH)
from appengine_tools import appengine_rpc
UPDATER_URL_PATH = '/admin/update_result_parents'
class ResultUpdater(object):
def __init__(self, host, path, user, bookmark=None):
self.path = path
self.user = user
user_agent = None
# TODO: figure out a value for 'source'.
# Doc says, "The source to specify in authentication requests."
source = ''
self.rpc_server = appengine_rpc.HttpRpcServer(
host, self.GetCredentials, user_agent, source, save_cookies=True)
def GetCredentials(self):
# TODO: Grab email/password from config
return self.user, getpass.getpass('Password for %s: ' % self.user)
def Send(self, bookmark, total_scanned, total_updated):
response_data = self.rpc_server.Send(self.path, simplejson.dumps(
(bookmark, total_scanned, total_updated)))
return simplejson.loads(response_data)
def main(argv):
options, args = getopt.getopt(
argv[1:],
'h:u:',
['host=', 'gae_user='])
host = None
gae_user = None
for option_key, option_value in options:
if option_key in ('-h', '--host'):
host = option_value
elif option_key in ('-u', '--gae_user'):
gae_user = option_value
updater = ResultUpdater(host, UPDATER_URL_PATH, user=gae_user)
bookmark = None
total_scanned = 0
total_updated = 0
while 1:
print 'Update batch: %s (total_scanned=%s, total_updated=%s)' % (
bookmark or 'no_bookmark', total_scanned, total_updated)
bookmark, total_scanned, total_updated = updater.Send(
bookmark, total_scanned, total_updated)
if not bookmark:
break
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Add a client to migrate from ResultParent.user_agent_list to ResultParent.user_agent_pretty<commit_after>#!/usr/bin/python2.4
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bulk datastore changes."""
__author__ = 'slamm@google.com (Stephen Lamm)'
import getopt
import getpass
import os
import simplejson
import sys
DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
sys.path.append('%s/..' % DIR_PATH)
from appengine_tools import appengine_rpc
UPDATER_URL_PATH = '/admin/update_result_parents'
class ResultUpdater(object):
def __init__(self, host, path, user, bookmark=None):
self.path = path
self.user = user
user_agent = None
# TODO: figure out a value for 'source'.
# Doc says, "The source to specify in authentication requests."
source = ''
self.rpc_server = appengine_rpc.HttpRpcServer(
host, self.GetCredentials, user_agent, source, save_cookies=True)
def GetCredentials(self):
# TODO: Grab email/password from config
return self.user, getpass.getpass('Password for %s: ' % self.user)
def Send(self, bookmark, total_scanned, total_updated):
response_data = self.rpc_server.Send(self.path, simplejson.dumps(
(bookmark, total_scanned, total_updated)))
return simplejson.loads(response_data)
def main(argv):
options, args = getopt.getopt(
argv[1:],
'h:u:',
['host=', 'gae_user='])
host = None
gae_user = None
for option_key, option_value in options:
if option_key in ('-h', '--host'):
host = option_value
elif option_key in ('-u', '--gae_user'):
gae_user = option_value
updater = ResultUpdater(host, UPDATER_URL_PATH, user=gae_user)
bookmark = None
total_scanned = 0
total_updated = 0
while 1:
print 'Update batch: %s (total_scanned=%s, total_updated=%s)' % (
bookmark or 'no_bookmark', total_scanned, total_updated)
bookmark, total_scanned, total_updated = updater.Send(
bookmark, total_scanned, total_updated)
if not bookmark:
break
if __name__ == '__main__':
main(sys.argv)
|
|
f9b87b08db621fe0f2c39ba7f03b586b09e521bd
|
api/sonetworks/migrations/0002_auto_20170202_0207.py
|
api/sonetworks/migrations/0002_auto_20170202_0207.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-02 02:07
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sonetworks', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='bucket',
new_name='data',
),
]
|
ADD Django migration for change in Post model
|
ADD Django migration for change in Post model
|
Python
|
mit
|
semitki/semitki,semitki/semitki,semitki/semitki,semitki/semitki
|
ADD Django migration for change in Post model
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-02 02:07
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sonetworks', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='bucket',
new_name='data',
),
]
|
<commit_before><commit_msg>ADD Django migration for change in Post model<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-02 02:07
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sonetworks', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='bucket',
new_name='data',
),
]
|
ADD Django migration for change in Post model# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-02 02:07
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sonetworks', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='bucket',
new_name='data',
),
]
|
<commit_before><commit_msg>ADD Django migration for change in Post model<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-02 02:07
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sonetworks', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='bucket',
new_name='data',
),
]
|
|
43d038920a7e4e0b352c42675646a19285c1fb60
|
app/soc/modules/gci/views/moderate_delete_account.py
|
app/soc/modules/gci/views/moderate_delete_account.py
|
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the GCI delete account page."""
from soc.logic import delete_account
from soc.views.helper import url_patterns
from soc.modules.gci.logic import profile as profile_logic
from soc.modules.gci.views import base
from soc.modules.gci.views.helper.url_patterns import url
class ModerateDeleteAccountPage(base.GCIRequestHandler):
"""View for the GCI delete account page.
"""
def templatePath(self):
return 'v2/modules/gci/moderate_delete_account/base.html'
def djangoURLPatterns(self):
return [
url(r'admin/delete_account/%s$' % url_patterns.PROFILE,
self, name='gci_moderate_delete_account')
]
def checkAccess(self):
self.check.isHost()
self.mutator.profileFromKwargs()
def context(self):
profile = self.data.url_profile
return {
'page_name': 'Moderate delete account requests',
'profile': profile,
'has_tasks': profile_logic.hasTasks(profile),
'has_created_or_modified_tasks': profile_logic.hasCreatedOrModifiedTask(
profile),
'has_task_comments': profile_logic.hasTaskComments(profile),
'has_other_gci_profiles': profile_logic.hasOtherGCIProfiles(profile),
'has_other_gsoc_profiles': profile_logic.hasOtherGSoCProfiles(profile),
}
def post(self):
delete_account.confirm_delete(self.data.url_profile)
self.redirect.program().to('gci_moderate_delete_account', validated=True)
|
Implement the view class required to moderate account deletion requests.
|
Implement the view class required to moderate account deletion requests.
|
Python
|
apache-2.0
|
rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son
|
Implement the view class required to moderate account deletion requests.
|
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the GCI delete account page."""
from soc.logic import delete_account
from soc.views.helper import url_patterns
from soc.modules.gci.logic import profile as profile_logic
from soc.modules.gci.views import base
from soc.modules.gci.views.helper.url_patterns import url
class ModerateDeleteAccountPage(base.GCIRequestHandler):
"""View for the GCI delete account page.
"""
def templatePath(self):
return 'v2/modules/gci/moderate_delete_account/base.html'
def djangoURLPatterns(self):
return [
url(r'admin/delete_account/%s$' % url_patterns.PROFILE,
self, name='gci_moderate_delete_account')
]
def checkAccess(self):
self.check.isHost()
self.mutator.profileFromKwargs()
def context(self):
profile = self.data.url_profile
return {
'page_name': 'Moderate delete account requests',
'profile': profile,
'has_tasks': profile_logic.hasTasks(profile),
'has_created_or_modified_tasks': profile_logic.hasCreatedOrModifiedTask(
profile),
'has_task_comments': profile_logic.hasTaskComments(profile),
'has_other_gci_profiles': profile_logic.hasOtherGCIProfiles(profile),
'has_other_gsoc_profiles': profile_logic.hasOtherGSoCProfiles(profile),
}
def post(self):
delete_account.confirm_delete(self.data.url_profile)
self.redirect.program().to('gci_moderate_delete_account', validated=True)
|
<commit_before><commit_msg>Implement the view class required to moderate account deletion requests.<commit_after>
|
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the GCI delete account page."""
from soc.logic import delete_account
from soc.views.helper import url_patterns
from soc.modules.gci.logic import profile as profile_logic
from soc.modules.gci.views import base
from soc.modules.gci.views.helper.url_patterns import url
class ModerateDeleteAccountPage(base.GCIRequestHandler):
"""View for the GCI delete account page.
"""
def templatePath(self):
return 'v2/modules/gci/moderate_delete_account/base.html'
def djangoURLPatterns(self):
return [
url(r'admin/delete_account/%s$' % url_patterns.PROFILE,
self, name='gci_moderate_delete_account')
]
def checkAccess(self):
self.check.isHost()
self.mutator.profileFromKwargs()
def context(self):
profile = self.data.url_profile
return {
'page_name': 'Moderate delete account requests',
'profile': profile,
'has_tasks': profile_logic.hasTasks(profile),
'has_created_or_modified_tasks': profile_logic.hasCreatedOrModifiedTask(
profile),
'has_task_comments': profile_logic.hasTaskComments(profile),
'has_other_gci_profiles': profile_logic.hasOtherGCIProfiles(profile),
'has_other_gsoc_profiles': profile_logic.hasOtherGSoCProfiles(profile),
}
def post(self):
delete_account.confirm_delete(self.data.url_profile)
self.redirect.program().to('gci_moderate_delete_account', validated=True)
|
Implement the view class required to moderate account deletion requests.# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the GCI delete account page."""
from soc.logic import delete_account
from soc.views.helper import url_patterns
from soc.modules.gci.logic import profile as profile_logic
from soc.modules.gci.views import base
from soc.modules.gci.views.helper.url_patterns import url
class ModerateDeleteAccountPage(base.GCIRequestHandler):
"""View for the GCI delete account page.
"""
def templatePath(self):
return 'v2/modules/gci/moderate_delete_account/base.html'
def djangoURLPatterns(self):
return [
url(r'admin/delete_account/%s$' % url_patterns.PROFILE,
self, name='gci_moderate_delete_account')
]
def checkAccess(self):
self.check.isHost()
self.mutator.profileFromKwargs()
def context(self):
profile = self.data.url_profile
return {
'page_name': 'Moderate delete account requests',
'profile': profile,
'has_tasks': profile_logic.hasTasks(profile),
'has_created_or_modified_tasks': profile_logic.hasCreatedOrModifiedTask(
profile),
'has_task_comments': profile_logic.hasTaskComments(profile),
'has_other_gci_profiles': profile_logic.hasOtherGCIProfiles(profile),
'has_other_gsoc_profiles': profile_logic.hasOtherGSoCProfiles(profile),
}
def post(self):
delete_account.confirm_delete(self.data.url_profile)
self.redirect.program().to('gci_moderate_delete_account', validated=True)
|
<commit_before><commit_msg>Implement the view class required to moderate account deletion requests.<commit_after># Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the GCI delete account page."""
from soc.logic import delete_account
from soc.views.helper import url_patterns
from soc.modules.gci.logic import profile as profile_logic
from soc.modules.gci.views import base
from soc.modules.gci.views.helper.url_patterns import url
class ModerateDeleteAccountPage(base.GCIRequestHandler):
"""View for the GCI delete account page.
"""
def templatePath(self):
return 'v2/modules/gci/moderate_delete_account/base.html'
def djangoURLPatterns(self):
return [
url(r'admin/delete_account/%s$' % url_patterns.PROFILE,
self, name='gci_moderate_delete_account')
]
def checkAccess(self):
self.check.isHost()
self.mutator.profileFromKwargs()
def context(self):
profile = self.data.url_profile
return {
'page_name': 'Moderate delete account requests',
'profile': profile,
'has_tasks': profile_logic.hasTasks(profile),
'has_created_or_modified_tasks': profile_logic.hasCreatedOrModifiedTask(
profile),
'has_task_comments': profile_logic.hasTaskComments(profile),
'has_other_gci_profiles': profile_logic.hasOtherGCIProfiles(profile),
'has_other_gsoc_profiles': profile_logic.hasOtherGSoCProfiles(profile),
}
def post(self):
delete_account.confirm_delete(self.data.url_profile)
self.redirect.program().to('gci_moderate_delete_account', validated=True)
|
|
0a0ed8d26f930d80bf38164d5010b96007e17bfd
|
casepro/statistics/migrations/0015_populate_is_squashed.py
|
casepro/statistics/migrations/0015_populate_is_squashed.py
|
# Generated by Django 2.2.8 on 2019-12-09 21:03
from django.db import migrations
BATCH_SIZE = 2500
def populate_is_squashed(apps, schema_editor):
DailyCount = apps.get_model("statistics", "DailyCount")
DailySecondTotalCount = apps.get_model("statistics", "DailySecondTotalCount")
TotalCount = apps.get_model("statistics", "TotalCount")
populate_for_model(DailyCount)
populate_for_model(DailySecondTotalCount)
populate_for_model(TotalCount)
def populate_for_model(model):
max_id = 0
num_updated = 0
while True:
id_batch = list(model.objects.filter(id__gt=max_id, is_squashed=None).values_list("id", flat=True).order_by("id")[:BATCH_SIZE])
if not id_batch:
break
model.objects.filter(id__in=id_batch).update(is_squashed=True)
max_id = id_batch[-1]
num_updated += len(id_batch)
print(f" > Updated {num_updated} instances of {model.name}")
class Migration(migrations.Migration):
dependencies = [
('statistics', '0014_auto_20191209_1933'),
]
operations = [
migrations.RunPython(populate_is_squashed)
]
|
Add migration to backfill is_squashed on squashable models
|
Add migration to backfill is_squashed on squashable models
|
Python
|
bsd-3-clause
|
rapidpro/casepro,rapidpro/casepro,rapidpro/casepro
|
Add migration to backfill is_squashed on squashable models
|
# Generated by Django 2.2.8 on 2019-12-09 21:03
from django.db import migrations
BATCH_SIZE = 2500
def populate_is_squashed(apps, schema_editor):
DailyCount = apps.get_model("statistics", "DailyCount")
DailySecondTotalCount = apps.get_model("statistics", "DailySecondTotalCount")
TotalCount = apps.get_model("statistics", "TotalCount")
populate_for_model(DailyCount)
populate_for_model(DailySecondTotalCount)
populate_for_model(TotalCount)
def populate_for_model(model):
max_id = 0
num_updated = 0
while True:
id_batch = list(model.objects.filter(id__gt=max_id, is_squashed=None).values_list("id", flat=True).order_by("id")[:BATCH_SIZE])
if not id_batch:
break
model.objects.filter(id__in=id_batch).update(is_squashed=True)
max_id = id_batch[-1]
num_updated += len(id_batch)
print(f" > Updated {num_updated} instances of {model.name}")
class Migration(migrations.Migration):
dependencies = [
('statistics', '0014_auto_20191209_1933'),
]
operations = [
migrations.RunPython(populate_is_squashed)
]
|
<commit_before><commit_msg>Add migration to backfill is_squashed on squashable models<commit_after>
|
# Generated by Django 2.2.8 on 2019-12-09 21:03
from django.db import migrations
BATCH_SIZE = 2500
def populate_is_squashed(apps, schema_editor):
DailyCount = apps.get_model("statistics", "DailyCount")
DailySecondTotalCount = apps.get_model("statistics", "DailySecondTotalCount")
TotalCount = apps.get_model("statistics", "TotalCount")
populate_for_model(DailyCount)
populate_for_model(DailySecondTotalCount)
populate_for_model(TotalCount)
def populate_for_model(model):
max_id = 0
num_updated = 0
while True:
id_batch = list(model.objects.filter(id__gt=max_id, is_squashed=None).values_list("id", flat=True).order_by("id")[:BATCH_SIZE])
if not id_batch:
break
model.objects.filter(id__in=id_batch).update(is_squashed=True)
max_id = id_batch[-1]
num_updated += len(id_batch)
print(f" > Updated {num_updated} instances of {model.name}")
class Migration(migrations.Migration):
dependencies = [
('statistics', '0014_auto_20191209_1933'),
]
operations = [
migrations.RunPython(populate_is_squashed)
]
|
Add migration to backfill is_squashed on squashable models# Generated by Django 2.2.8 on 2019-12-09 21:03
from django.db import migrations
BATCH_SIZE = 2500
def populate_is_squashed(apps, schema_editor):
DailyCount = apps.get_model("statistics", "DailyCount")
DailySecondTotalCount = apps.get_model("statistics", "DailySecondTotalCount")
TotalCount = apps.get_model("statistics", "TotalCount")
populate_for_model(DailyCount)
populate_for_model(DailySecondTotalCount)
populate_for_model(TotalCount)
def populate_for_model(model):
max_id = 0
num_updated = 0
while True:
id_batch = list(model.objects.filter(id__gt=max_id, is_squashed=None).values_list("id", flat=True).order_by("id")[:BATCH_SIZE])
if not id_batch:
break
model.objects.filter(id__in=id_batch).update(is_squashed=True)
max_id = id_batch[-1]
num_updated += len(id_batch)
print(f" > Updated {num_updated} instances of {model.name}")
class Migration(migrations.Migration):
dependencies = [
('statistics', '0014_auto_20191209_1933'),
]
operations = [
migrations.RunPython(populate_is_squashed)
]
|
<commit_before><commit_msg>Add migration to backfill is_squashed on squashable models<commit_after># Generated by Django 2.2.8 on 2019-12-09 21:03
from django.db import migrations
BATCH_SIZE = 2500
def populate_is_squashed(apps, schema_editor):
DailyCount = apps.get_model("statistics", "DailyCount")
DailySecondTotalCount = apps.get_model("statistics", "DailySecondTotalCount")
TotalCount = apps.get_model("statistics", "TotalCount")
populate_for_model(DailyCount)
populate_for_model(DailySecondTotalCount)
populate_for_model(TotalCount)
def populate_for_model(model):
max_id = 0
num_updated = 0
while True:
id_batch = list(model.objects.filter(id__gt=max_id, is_squashed=None).values_list("id", flat=True).order_by("id")[:BATCH_SIZE])
if not id_batch:
break
model.objects.filter(id__in=id_batch).update(is_squashed=True)
max_id = id_batch[-1]
num_updated += len(id_batch)
print(f" > Updated {num_updated} instances of {model.name}")
class Migration(migrations.Migration):
dependencies = [
('statistics', '0014_auto_20191209_1933'),
]
operations = [
migrations.RunPython(populate_is_squashed)
]
|
|
0b1a4a57a9e8f9e7038fb4330cbac68596f5960c
|
tests/plugins/test_property_mod_tracker.py
|
tests/plugins/test_property_mod_tracker.py
|
import sqlalchemy as sa
from sqlalchemy_continuum.plugins import PropertyModTrackerPlugin
from tests import TestCase
class TestPropertyModificationsTracking(TestCase):
plugins = [PropertyModTrackerPlugin]
def create_models(self):
class User(self.Model):
__tablename__ = 'text_item'
__versioned__ = {
'base_classes': (self.Model, ),
'track_property_modifications': True
}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
age = sa.Column(sa.Integer)
self.User = User
def test_each_column_generates_additional_mod_column(self):
UserHistory = self.User.__versioned__['class']
assert 'name_mod' in UserHistory.__table__.c
column = UserHistory.__table__.c['name_mod']
assert not column.nullable
assert isinstance(column.type, sa.Boolean)
def test_primary_keys_not_included(self):
UserHistory = self.User.__versioned__['class']
assert 'id_mod' not in UserHistory.__table__.c
def test_mod_properties_get_updated(self):
user = self.User(name=u'John')
self.session.add(user)
self.session.commit()
assert user.versions[-1].name_mod
|
Add tests for PropertyModTracker plugin
|
Add tests for PropertyModTracker plugin
|
Python
|
bsd-3-clause
|
avilaton/sqlalchemy-continuum,piotr-dobrogost/sqlalchemy-continuum,rmoorman/sqlalchemy-continuum,kvesteri/sqlalchemy-continuum
|
Add tests for PropertyModTracker plugin
|
import sqlalchemy as sa
from sqlalchemy_continuum.plugins import PropertyModTrackerPlugin
from tests import TestCase
class TestPropertyModificationsTracking(TestCase):
plugins = [PropertyModTrackerPlugin]
def create_models(self):
class User(self.Model):
__tablename__ = 'text_item'
__versioned__ = {
'base_classes': (self.Model, ),
'track_property_modifications': True
}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
age = sa.Column(sa.Integer)
self.User = User
def test_each_column_generates_additional_mod_column(self):
UserHistory = self.User.__versioned__['class']
assert 'name_mod' in UserHistory.__table__.c
column = UserHistory.__table__.c['name_mod']
assert not column.nullable
assert isinstance(column.type, sa.Boolean)
def test_primary_keys_not_included(self):
UserHistory = self.User.__versioned__['class']
assert 'id_mod' not in UserHistory.__table__.c
def test_mod_properties_get_updated(self):
user = self.User(name=u'John')
self.session.add(user)
self.session.commit()
assert user.versions[-1].name_mod
|
<commit_before><commit_msg>Add tests for PropertyModTracker plugin<commit_after>
|
import sqlalchemy as sa
from sqlalchemy_continuum.plugins import PropertyModTrackerPlugin
from tests import TestCase
class TestPropertyModificationsTracking(TestCase):
plugins = [PropertyModTrackerPlugin]
def create_models(self):
class User(self.Model):
__tablename__ = 'text_item'
__versioned__ = {
'base_classes': (self.Model, ),
'track_property_modifications': True
}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
age = sa.Column(sa.Integer)
self.User = User
def test_each_column_generates_additional_mod_column(self):
UserHistory = self.User.__versioned__['class']
assert 'name_mod' in UserHistory.__table__.c
column = UserHistory.__table__.c['name_mod']
assert not column.nullable
assert isinstance(column.type, sa.Boolean)
def test_primary_keys_not_included(self):
UserHistory = self.User.__versioned__['class']
assert 'id_mod' not in UserHistory.__table__.c
def test_mod_properties_get_updated(self):
user = self.User(name=u'John')
self.session.add(user)
self.session.commit()
assert user.versions[-1].name_mod
|
Add tests for PropertyModTracker pluginimport sqlalchemy as sa
from sqlalchemy_continuum.plugins import PropertyModTrackerPlugin
from tests import TestCase
class TestPropertyModificationsTracking(TestCase):
plugins = [PropertyModTrackerPlugin]
def create_models(self):
class User(self.Model):
__tablename__ = 'text_item'
__versioned__ = {
'base_classes': (self.Model, ),
'track_property_modifications': True
}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
age = sa.Column(sa.Integer)
self.User = User
def test_each_column_generates_additional_mod_column(self):
UserHistory = self.User.__versioned__['class']
assert 'name_mod' in UserHistory.__table__.c
column = UserHistory.__table__.c['name_mod']
assert not column.nullable
assert isinstance(column.type, sa.Boolean)
def test_primary_keys_not_included(self):
UserHistory = self.User.__versioned__['class']
assert 'id_mod' not in UserHistory.__table__.c
def test_mod_properties_get_updated(self):
user = self.User(name=u'John')
self.session.add(user)
self.session.commit()
assert user.versions[-1].name_mod
|
<commit_before><commit_msg>Add tests for PropertyModTracker plugin<commit_after>import sqlalchemy as sa
from sqlalchemy_continuum.plugins import PropertyModTrackerPlugin
from tests import TestCase
class TestPropertyModificationsTracking(TestCase):
plugins = [PropertyModTrackerPlugin]
def create_models(self):
class User(self.Model):
__tablename__ = 'text_item'
__versioned__ = {
'base_classes': (self.Model, ),
'track_property_modifications': True
}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
age = sa.Column(sa.Integer)
self.User = User
def test_each_column_generates_additional_mod_column(self):
UserHistory = self.User.__versioned__['class']
assert 'name_mod' in UserHistory.__table__.c
column = UserHistory.__table__.c['name_mod']
assert not column.nullable
assert isinstance(column.type, sa.Boolean)
def test_primary_keys_not_included(self):
UserHistory = self.User.__versioned__['class']
assert 'id_mod' not in UserHistory.__table__.c
def test_mod_properties_get_updated(self):
user = self.User(name=u'John')
self.session.add(user)
self.session.commit()
assert user.versions[-1].name_mod
|
|
63c2d7536d37113b78fce22c7db94b17a8450b27
|
ureport/stats/migrations/0010_add_index.py
|
ureport/stats/migrations/0010_add_index.py
|
# Generated by Django 2.2.20 on 2021-05-26 16:49
from django.db import migrations
INDEX_SQL = """
CREATE INDEX IF NOT EXISTS stats_pollstats_org_question on stats_pollstats (org_id, question_id) WHERE question_id IS NOT NULL;
"""
class Migration(migrations.Migration):
dependencies = [
("stats", "0009_add_better_indexes"),
]
operations = [migrations.RunSQL(INDEX_SQL)]
|
Add index on poll stats org and question
|
Add index on poll stats org and question
|
Python
|
agpl-3.0
|
Ilhasoft/ureport,Ilhasoft/ureport,rapidpro/ureport,Ilhasoft/ureport,Ilhasoft/ureport,rapidpro/ureport,rapidpro/ureport,rapidpro/ureport
|
Add index on poll stats org and question
|
# Generated by Django 2.2.20 on 2021-05-26 16:49
from django.db import migrations
INDEX_SQL = """
CREATE INDEX IF NOT EXISTS stats_pollstats_org_question on stats_pollstats (org_id, question_id) WHERE question_id IS NOT NULL;
"""
class Migration(migrations.Migration):
dependencies = [
("stats", "0009_add_better_indexes"),
]
operations = [migrations.RunSQL(INDEX_SQL)]
|
<commit_before><commit_msg>Add index on poll stats org and question<commit_after>
|
# Generated by Django 2.2.20 on 2021-05-26 16:49
from django.db import migrations
INDEX_SQL = """
CREATE INDEX IF NOT EXISTS stats_pollstats_org_question on stats_pollstats (org_id, question_id) WHERE question_id IS NOT NULL;
"""
class Migration(migrations.Migration):
dependencies = [
("stats", "0009_add_better_indexes"),
]
operations = [migrations.RunSQL(INDEX_SQL)]
|
Add index on poll stats org and question# Generated by Django 2.2.20 on 2021-05-26 16:49
from django.db import migrations
INDEX_SQL = """
CREATE INDEX IF NOT EXISTS stats_pollstats_org_question on stats_pollstats (org_id, question_id) WHERE question_id IS NOT NULL;
"""
class Migration(migrations.Migration):
dependencies = [
("stats", "0009_add_better_indexes"),
]
operations = [migrations.RunSQL(INDEX_SQL)]
|
<commit_before><commit_msg>Add index on poll stats org and question<commit_after># Generated by Django 2.2.20 on 2021-05-26 16:49
from django.db import migrations
INDEX_SQL = """
CREATE INDEX IF NOT EXISTS stats_pollstats_org_question on stats_pollstats (org_id, question_id) WHERE question_id IS NOT NULL;
"""
class Migration(migrations.Migration):
dependencies = [
("stats", "0009_add_better_indexes"),
]
operations = [migrations.RunSQL(INDEX_SQL)]
|
|
5aebcc61f12087b4f2e594b28264b8601722af93
|
tests/test_fields.py
|
tests/test_fields.py
|
import unittest
from decimal import Decimal as D
from oscar_vat_moss.fields import * # noqa
from django.core.exceptions import ValidationError
class VATINValidatorTest(unittest.TestCase):
VALID_VATINS = (
# VATIN # Company name
('ATU66688202', 'hastexo Professional Services GmbH'),
('ATU66688202', 'HASTEXO PROFESSIONAL SERVICES GMBH'),
('ATU66688202', 'hastexo Professional Services GmbH (Procurement Department)'),
)
INVALID_VATINS = (
# VATIN # Incorrect company name
('ATU66688999', 'Example, Inc.'),
('ATU99999999', 'Acme, Inc'),
)
def test_valid_vatin(self):
validator = VATINValidator(None)
for vatin, name in self.VALID_VATINS:
# Just ensure this doesn't fail
validator.validate_vatin(vatin)
# validator is also callable
validator(vatin)
def test_invalid_vatin(self):
validator = VATINValidator(None)
for vatin, name in self.INVALID_VATINS:
with self.assertRaises(ValidationError):
validator.validate_vatin(vatin)
with self.assertRaises(ValidationError):
# validator is also callable
validator(vatin)
class VATINFieldTest(unittest.TestCase):
def test_default_properties(self):
field = VATINField()
validator_classes = [ v.__class__ for v in field.validators ]
self.assertTrue(VATINValidator in validator_classes)
self.assertEqual(field.max_length, DEFAULT_MAX_LENGTH)
def test_convenience_method(self):
field = vatin()
validator_classes = [ v.__class__ for v in field.validators ]
self.assertTrue(VATINValidator in validator_classes)
self.assertEqual(field.max_length, DEFAULT_MAX_LENGTH)
self.assertEqual(field.name,'vatin')
self.assertTrue(field.blank)
|
Add unit tests for VATINField, VATINValidator
|
Add unit tests for VATINField, VATINValidator
|
Python
|
bsd-3-clause
|
hastexo/django-oscar-vat_moss,arbrandes/django-oscar-vat_moss,fghaas/django-oscar-vat_moss,arbrandes/django-oscar-vat_moss,hastexo/django-oscar-vat_moss,fghaas/django-oscar-vat_moss
|
Add unit tests for VATINField, VATINValidator
|
import unittest
from decimal import Decimal as D
from oscar_vat_moss.fields import * # noqa
from django.core.exceptions import ValidationError
class VATINValidatorTest(unittest.TestCase):
VALID_VATINS = (
# VATIN # Company name
('ATU66688202', 'hastexo Professional Services GmbH'),
('ATU66688202', 'HASTEXO PROFESSIONAL SERVICES GMBH'),
('ATU66688202', 'hastexo Professional Services GmbH (Procurement Department)'),
)
INVALID_VATINS = (
# VATIN # Incorrect company name
('ATU66688999', 'Example, Inc.'),
('ATU99999999', 'Acme, Inc'),
)
def test_valid_vatin(self):
validator = VATINValidator(None)
for vatin, name in self.VALID_VATINS:
# Just ensure this doesn't fail
validator.validate_vatin(vatin)
# validator is also callable
validator(vatin)
def test_invalid_vatin(self):
validator = VATINValidator(None)
for vatin, name in self.INVALID_VATINS:
with self.assertRaises(ValidationError):
validator.validate_vatin(vatin)
with self.assertRaises(ValidationError):
# validator is also callable
validator(vatin)
class VATINFieldTest(unittest.TestCase):
def test_default_properties(self):
field = VATINField()
validator_classes = [ v.__class__ for v in field.validators ]
self.assertTrue(VATINValidator in validator_classes)
self.assertEqual(field.max_length, DEFAULT_MAX_LENGTH)
def test_convenience_method(self):
field = vatin()
validator_classes = [ v.__class__ for v in field.validators ]
self.assertTrue(VATINValidator in validator_classes)
self.assertEqual(field.max_length, DEFAULT_MAX_LENGTH)
self.assertEqual(field.name,'vatin')
self.assertTrue(field.blank)
|
<commit_before><commit_msg>Add unit tests for VATINField, VATINValidator<commit_after>
|
import unittest
from decimal import Decimal as D
from oscar_vat_moss.fields import * # noqa
from django.core.exceptions import ValidationError
class VATINValidatorTest(unittest.TestCase):
VALID_VATINS = (
# VATIN # Company name
('ATU66688202', 'hastexo Professional Services GmbH'),
('ATU66688202', 'HASTEXO PROFESSIONAL SERVICES GMBH'),
('ATU66688202', 'hastexo Professional Services GmbH (Procurement Department)'),
)
INVALID_VATINS = (
# VATIN # Incorrect company name
('ATU66688999', 'Example, Inc.'),
('ATU99999999', 'Acme, Inc'),
)
def test_valid_vatin(self):
validator = VATINValidator(None)
for vatin, name in self.VALID_VATINS:
# Just ensure this doesn't fail
validator.validate_vatin(vatin)
# validator is also callable
validator(vatin)
def test_invalid_vatin(self):
validator = VATINValidator(None)
for vatin, name in self.INVALID_VATINS:
with self.assertRaises(ValidationError):
validator.validate_vatin(vatin)
with self.assertRaises(ValidationError):
# validator is also callable
validator(vatin)
class VATINFieldTest(unittest.TestCase):
def test_default_properties(self):
field = VATINField()
validator_classes = [ v.__class__ for v in field.validators ]
self.assertTrue(VATINValidator in validator_classes)
self.assertEqual(field.max_length, DEFAULT_MAX_LENGTH)
def test_convenience_method(self):
field = vatin()
validator_classes = [ v.__class__ for v in field.validators ]
self.assertTrue(VATINValidator in validator_classes)
self.assertEqual(field.max_length, DEFAULT_MAX_LENGTH)
self.assertEqual(field.name,'vatin')
self.assertTrue(field.blank)
|
Add unit tests for VATINField, VATINValidatorimport unittest
from decimal import Decimal as D
from oscar_vat_moss.fields import * # noqa
from django.core.exceptions import ValidationError
class VATINValidatorTest(unittest.TestCase):
VALID_VATINS = (
# VATIN # Company name
('ATU66688202', 'hastexo Professional Services GmbH'),
('ATU66688202', 'HASTEXO PROFESSIONAL SERVICES GMBH'),
('ATU66688202', 'hastexo Professional Services GmbH (Procurement Department)'),
)
INVALID_VATINS = (
# VATIN # Incorrect company name
('ATU66688999', 'Example, Inc.'),
('ATU99999999', 'Acme, Inc'),
)
def test_valid_vatin(self):
validator = VATINValidator(None)
for vatin, name in self.VALID_VATINS:
# Just ensure this doesn't fail
validator.validate_vatin(vatin)
# validator is also callable
validator(vatin)
def test_invalid_vatin(self):
validator = VATINValidator(None)
for vatin, name in self.INVALID_VATINS:
with self.assertRaises(ValidationError):
validator.validate_vatin(vatin)
with self.assertRaises(ValidationError):
# validator is also callable
validator(vatin)
class VATINFieldTest(unittest.TestCase):
def test_default_properties(self):
field = VATINField()
validator_classes = [ v.__class__ for v in field.validators ]
self.assertTrue(VATINValidator in validator_classes)
self.assertEqual(field.max_length, DEFAULT_MAX_LENGTH)
def test_convenience_method(self):
field = vatin()
validator_classes = [ v.__class__ for v in field.validators ]
self.assertTrue(VATINValidator in validator_classes)
self.assertEqual(field.max_length, DEFAULT_MAX_LENGTH)
self.assertEqual(field.name,'vatin')
self.assertTrue(field.blank)
|
<commit_before><commit_msg>Add unit tests for VATINField, VATINValidator<commit_after>import unittest
from decimal import Decimal as D
from oscar_vat_moss.fields import * # noqa
from django.core.exceptions import ValidationError
class VATINValidatorTest(unittest.TestCase):
VALID_VATINS = (
# VATIN # Company name
('ATU66688202', 'hastexo Professional Services GmbH'),
('ATU66688202', 'HASTEXO PROFESSIONAL SERVICES GMBH'),
('ATU66688202', 'hastexo Professional Services GmbH (Procurement Department)'),
)
INVALID_VATINS = (
# VATIN # Incorrect company name
('ATU66688999', 'Example, Inc.'),
('ATU99999999', 'Acme, Inc'),
)
def test_valid_vatin(self):
validator = VATINValidator(None)
for vatin, name in self.VALID_VATINS:
# Just ensure this doesn't fail
validator.validate_vatin(vatin)
# validator is also callable
validator(vatin)
def test_invalid_vatin(self):
validator = VATINValidator(None)
for vatin, name in self.INVALID_VATINS:
with self.assertRaises(ValidationError):
validator.validate_vatin(vatin)
with self.assertRaises(ValidationError):
# validator is also callable
validator(vatin)
class VATINFieldTest(unittest.TestCase):
def test_default_properties(self):
field = VATINField()
validator_classes = [ v.__class__ for v in field.validators ]
self.assertTrue(VATINValidator in validator_classes)
self.assertEqual(field.max_length, DEFAULT_MAX_LENGTH)
def test_convenience_method(self):
field = vatin()
validator_classes = [ v.__class__ for v in field.validators ]
self.assertTrue(VATINValidator in validator_classes)
self.assertEqual(field.max_length, DEFAULT_MAX_LENGTH)
self.assertEqual(field.name,'vatin')
self.assertTrue(field.blank)
|
|
6c1815e30a66478b3534c42847b088a08ad71c90
|
tests/test_routes.py
|
tests/test_routes.py
|
import pytest
import msgpack
from falcon import API, testing
from pianodb.routes import ValidatorComponent, SongFinish
TOKEN = 'CB80CB12CC0F41FC87CA6F2AC989E27E'
API_PREFIX = '/api/v1'
SONGFINISH_ROUTE = "{API_PREFIX}/songfinish".format(API_PREFIX=API_PREFIX)
@pytest.fixture(scope='module')
def client():
api = API(middleware=ValidatorComponent())
api.add_route(SONGFINISH_ROUTE, SongFinish(token=TOKEN))
return testing.TestClient(api)
def test_songfinish_requires_auth_token(client):
"""
TODO: Document this test.
"""
expected = dict(
title='Authentication required',
description='Missing or invalid authentication token',
)
result = client.simulate_post(path=SONGFINISH_ROUTE)
assert result.status_code == 401 # HTTP 401 Unauthorized
assert result.json == expected
def test_songfinish_requires_msgpack_payloads(client):
"""
TODO: Document this test.
"""
expected = dict(
title='Unsupported media type',
description='Payload must be msgpack',
)
result = client.simulate_post(path=SONGFINISH_ROUTE,
headers={
'X-Auth-Token': TOKEN,
})
assert result.status_code == 415 # HTTP 415 Unsupported Media Type
assert result.json == expected
def test_songfinish_requires_valid_msgpack_payloads(client):
"""
TODO: Document this test.
"""
# A properly formatted payload would be b'\x81\xa6artist\xabJohn Cleese'
malformed_msgpack = b'\x82\xa6artist\xabJohn Cleese'
expected = dict(
title='Bad request',
description='Could not unpack msgpack data',
)
result = client.simulate_post(path=SONGFINISH_ROUTE,
body=malformed_msgpack,
headers={
'X-Auth-Token': TOKEN,
'Content-Type': 'application/msgpack',
})
assert result.status_code == 400 # HTTP 400 Bad Request
assert result.json == expected
# TODO: Test remaining branches and investigate msgpack.exceptions.ExtraData or
# UnicodeDecodeError errors when given a non-msgpack request body.
|
Add initial Falcon API tests
|
Add initial Falcon API tests
|
Python
|
isc
|
reillysiemens/pianodb
|
Add initial Falcon API tests
|
import pytest
import msgpack
from falcon import API, testing
from pianodb.routes import ValidatorComponent, SongFinish
TOKEN = 'CB80CB12CC0F41FC87CA6F2AC989E27E'
API_PREFIX = '/api/v1'
SONGFINISH_ROUTE = "{API_PREFIX}/songfinish".format(API_PREFIX=API_PREFIX)
@pytest.fixture(scope='module')
def client():
api = API(middleware=ValidatorComponent())
api.add_route(SONGFINISH_ROUTE, SongFinish(token=TOKEN))
return testing.TestClient(api)
def test_songfinish_requires_auth_token(client):
"""
TODO: Document this test.
"""
expected = dict(
title='Authentication required',
description='Missing or invalid authentication token',
)
result = client.simulate_post(path=SONGFINISH_ROUTE)
assert result.status_code == 401 # HTTP 401 Unauthorized
assert result.json == expected
def test_songfinish_requires_msgpack_payloads(client):
"""
TODO: Document this test.
"""
expected = dict(
title='Unsupported media type',
description='Payload must be msgpack',
)
result = client.simulate_post(path=SONGFINISH_ROUTE,
headers={
'X-Auth-Token': TOKEN,
})
assert result.status_code == 415 # HTTP 415 Unsupported Media Type
assert result.json == expected
def test_songfinish_requires_valid_msgpack_payloads(client):
"""
TODO: Document this test.
"""
# A properly formatted payload would be b'\x81\xa6artist\xabJohn Cleese'
malformed_msgpack = b'\x82\xa6artist\xabJohn Cleese'
expected = dict(
title='Bad request',
description='Could not unpack msgpack data',
)
result = client.simulate_post(path=SONGFINISH_ROUTE,
body=malformed_msgpack,
headers={
'X-Auth-Token': TOKEN,
'Content-Type': 'application/msgpack',
})
assert result.status_code == 400 # HTTP 400 Bad Request
assert result.json == expected
# TODO: Test remaining branches and investigate msgpack.exceptions.ExtraData or
# UnicodeDecodeError errors when given a non-msgpack request body.
|
<commit_before><commit_msg>Add initial Falcon API tests<commit_after>
|
import pytest
import msgpack
from falcon import API, testing
from pianodb.routes import ValidatorComponent, SongFinish
TOKEN = 'CB80CB12CC0F41FC87CA6F2AC989E27E'
API_PREFIX = '/api/v1'
SONGFINISH_ROUTE = "{API_PREFIX}/songfinish".format(API_PREFIX=API_PREFIX)
@pytest.fixture(scope='module')
def client():
api = API(middleware=ValidatorComponent())
api.add_route(SONGFINISH_ROUTE, SongFinish(token=TOKEN))
return testing.TestClient(api)
def test_songfinish_requires_auth_token(client):
"""
TODO: Document this test.
"""
expected = dict(
title='Authentication required',
description='Missing or invalid authentication token',
)
result = client.simulate_post(path=SONGFINISH_ROUTE)
assert result.status_code == 401 # HTTP 401 Unauthorized
assert result.json == expected
def test_songfinish_requires_msgpack_payloads(client):
"""
TODO: Document this test.
"""
expected = dict(
title='Unsupported media type',
description='Payload must be msgpack',
)
result = client.simulate_post(path=SONGFINISH_ROUTE,
headers={
'X-Auth-Token': TOKEN,
})
assert result.status_code == 415 # HTTP 415 Unsupported Media Type
assert result.json == expected
def test_songfinish_requires_valid_msgpack_payloads(client):
"""
TODO: Document this test.
"""
# A properly formatted payload would be b'\x81\xa6artist\xabJohn Cleese'
malformed_msgpack = b'\x82\xa6artist\xabJohn Cleese'
expected = dict(
title='Bad request',
description='Could not unpack msgpack data',
)
result = client.simulate_post(path=SONGFINISH_ROUTE,
body=malformed_msgpack,
headers={
'X-Auth-Token': TOKEN,
'Content-Type': 'application/msgpack',
})
assert result.status_code == 400 # HTTP 400 Bad Request
assert result.json == expected
# TODO: Test remaining branches and investigate msgpack.exceptions.ExtraData or
# UnicodeDecodeError errors when given a non-msgpack request body.
|
Add initial Falcon API testsimport pytest
import msgpack
from falcon import API, testing
from pianodb.routes import ValidatorComponent, SongFinish
TOKEN = 'CB80CB12CC0F41FC87CA6F2AC989E27E'
API_PREFIX = '/api/v1'
SONGFINISH_ROUTE = "{API_PREFIX}/songfinish".format(API_PREFIX=API_PREFIX)
@pytest.fixture(scope='module')
def client():
api = API(middleware=ValidatorComponent())
api.add_route(SONGFINISH_ROUTE, SongFinish(token=TOKEN))
return testing.TestClient(api)
def test_songfinish_requires_auth_token(client):
"""
TODO: Document this test.
"""
expected = dict(
title='Authentication required',
description='Missing or invalid authentication token',
)
result = client.simulate_post(path=SONGFINISH_ROUTE)
assert result.status_code == 401 # HTTP 401 Unauthorized
assert result.json == expected
def test_songfinish_requires_msgpack_payloads(client):
"""
TODO: Document this test.
"""
expected = dict(
title='Unsupported media type',
description='Payload must be msgpack',
)
result = client.simulate_post(path=SONGFINISH_ROUTE,
headers={
'X-Auth-Token': TOKEN,
})
assert result.status_code == 415 # HTTP 415 Unsupported Media Type
assert result.json == expected
def test_songfinish_requires_valid_msgpack_payloads(client):
"""
TODO: Document this test.
"""
# A properly formatted payload would be b'\x81\xa6artist\xabJohn Cleese'
malformed_msgpack = b'\x82\xa6artist\xabJohn Cleese'
expected = dict(
title='Bad request',
description='Could not unpack msgpack data',
)
result = client.simulate_post(path=SONGFINISH_ROUTE,
body=malformed_msgpack,
headers={
'X-Auth-Token': TOKEN,
'Content-Type': 'application/msgpack',
})
assert result.status_code == 400 # HTTP 400 Bad Request
assert result.json == expected
# TODO: Test remaining branches and investigate msgpack.exceptions.ExtraData or
# UnicodeDecodeError errors when given a non-msgpack request body.
|
<commit_before><commit_msg>Add initial Falcon API tests<commit_after>import pytest
import msgpack
from falcon import API, testing
from pianodb.routes import ValidatorComponent, SongFinish
TOKEN = 'CB80CB12CC0F41FC87CA6F2AC989E27E'
API_PREFIX = '/api/v1'
SONGFINISH_ROUTE = "{API_PREFIX}/songfinish".format(API_PREFIX=API_PREFIX)
@pytest.fixture(scope='module')
def client():
api = API(middleware=ValidatorComponent())
api.add_route(SONGFINISH_ROUTE, SongFinish(token=TOKEN))
return testing.TestClient(api)
def test_songfinish_requires_auth_token(client):
"""
TODO: Document this test.
"""
expected = dict(
title='Authentication required',
description='Missing or invalid authentication token',
)
result = client.simulate_post(path=SONGFINISH_ROUTE)
assert result.status_code == 401 # HTTP 401 Unauthorized
assert result.json == expected
def test_songfinish_requires_msgpack_payloads(client):
"""
TODO: Document this test.
"""
expected = dict(
title='Unsupported media type',
description='Payload must be msgpack',
)
result = client.simulate_post(path=SONGFINISH_ROUTE,
headers={
'X-Auth-Token': TOKEN,
})
assert result.status_code == 415 # HTTP 415 Unsupported Media Type
assert result.json == expected
def test_songfinish_requires_valid_msgpack_payloads(client):
"""
TODO: Document this test.
"""
# A properly formatted payload would be b'\x81\xa6artist\xabJohn Cleese'
malformed_msgpack = b'\x82\xa6artist\xabJohn Cleese'
expected = dict(
title='Bad request',
description='Could not unpack msgpack data',
)
result = client.simulate_post(path=SONGFINISH_ROUTE,
body=malformed_msgpack,
headers={
'X-Auth-Token': TOKEN,
'Content-Type': 'application/msgpack',
})
assert result.status_code == 400 # HTTP 400 Bad Request
assert result.json == expected
# TODO: Test remaining branches and investigate msgpack.exceptions.ExtraData or
# UnicodeDecodeError errors when given a non-msgpack request body.
|
|
fbb6fee97a668e716028f4cecefb698824cb2933
|
tests/test_search.py
|
tests/test_search.py
|
from .helpers import BaseApplicationTest
class TestApplication(BaseApplicationTest):
def test_search_with_query(self):
response = self.client.get('/search?q=email')
assert 200 == response.status_code
def test_search_with_query_and_lot(self):
response = self.client.get('/search?q=email&lot=saas')
assert 200 == response.status_code
def test_search_with_query_and_filters(self):
response = self.client.get('/search?q=email&elasticCloud=True')
assert 200 == response.status_code
|
Add tests for response codes for /search route
|
Add tests for response codes for /search route
|
Python
|
mit
|
alphagov/digitalmarketplace-buyer-frontend,AusDTO/dto-digitalmarketplace-buyer-frontend,AusDTO/dto-digitalmarketplace-buyer-frontend,mtekel/digitalmarketplace-buyer-frontend,alphagov/digitalmarketplace-buyer-frontend,mtekel/digitalmarketplace-buyer-frontend,AusDTO/dto-digitalmarketplace-buyer-frontend,mtekel/digitalmarketplace-buyer-frontend,mtekel/digitalmarketplace-buyer-frontend,alphagov/digitalmarketplace-buyer-frontend,alphagov/digitalmarketplace-buyer-frontend,AusDTO/dto-digitalmarketplace-buyer-frontend
|
Add tests for response codes for /search route
|
from .helpers import BaseApplicationTest
class TestApplication(BaseApplicationTest):
def test_search_with_query(self):
response = self.client.get('/search?q=email')
assert 200 == response.status_code
def test_search_with_query_and_lot(self):
response = self.client.get('/search?q=email&lot=saas')
assert 200 == response.status_code
def test_search_with_query_and_filters(self):
response = self.client.get('/search?q=email&elasticCloud=True')
assert 200 == response.status_code
|
<commit_before><commit_msg>Add tests for response codes for /search route<commit_after>
|
from .helpers import BaseApplicationTest
class TestApplication(BaseApplicationTest):
def test_search_with_query(self):
response = self.client.get('/search?q=email')
assert 200 == response.status_code
def test_search_with_query_and_lot(self):
response = self.client.get('/search?q=email&lot=saas')
assert 200 == response.status_code
def test_search_with_query_and_filters(self):
response = self.client.get('/search?q=email&elasticCloud=True')
assert 200 == response.status_code
|
Add tests for response codes for /search routefrom .helpers import BaseApplicationTest
class TestApplication(BaseApplicationTest):
def test_search_with_query(self):
response = self.client.get('/search?q=email')
assert 200 == response.status_code
def test_search_with_query_and_lot(self):
response = self.client.get('/search?q=email&lot=saas')
assert 200 == response.status_code
def test_search_with_query_and_filters(self):
response = self.client.get('/search?q=email&elasticCloud=True')
assert 200 == response.status_code
|
<commit_before><commit_msg>Add tests for response codes for /search route<commit_after>from .helpers import BaseApplicationTest
class TestApplication(BaseApplicationTest):
def test_search_with_query(self):
response = self.client.get('/search?q=email')
assert 200 == response.status_code
def test_search_with_query_and_lot(self):
response = self.client.get('/search?q=email&lot=saas')
assert 200 == response.status_code
def test_search_with_query_and_filters(self):
response = self.client.get('/search?q=email&elasticCloud=True')
assert 200 == response.status_code
|
|
41a4a37dae64813b6e6c2e1b78dc75cd0847a587
|
plyer/platforms/ios/spatialorientation.py
|
plyer/platforms/ios/spatialorientation.py
|
'''
iOS Spatial Orientation
-----------------------
'''
from plyer.facades import SpatialOrientation
from pyobjus import autoclass
class iOSSpatialOrientation(SpatialOrientation):
def __init__(self):
self.bridge = autoclass('bridge').alloc().init()
self.bridge.motionManager.setdeviceMotionUpdateInterval_(0.1)
def _enable_listener(self):
self.bridge.startDeviceMotion()
def _disable_listener(self):
self.bridge.stopDeviceMotion()
def _get_orientation(self):
return (
self.bridge.sp_yaw,
self.bridge.sp_pitch,
self.bridge.sp_roll)
def instance():
return iOSSpatialOrientation()
|
Add iOS api for spatial orientation
|
Add iOS api for spatial orientation
|
Python
|
mit
|
kivy/plyer,KeyWeeUsr/plyer,KeyWeeUsr/plyer,kivy/plyer,KeyWeeUsr/plyer,kivy/plyer
|
Add iOS api for spatial orientation
|
'''
iOS Spatial Orientation
-----------------------
'''
from plyer.facades import SpatialOrientation
from pyobjus import autoclass
class iOSSpatialOrientation(SpatialOrientation):
def __init__(self):
self.bridge = autoclass('bridge').alloc().init()
self.bridge.motionManager.setdeviceMotionUpdateInterval_(0.1)
def _enable_listener(self):
self.bridge.startDeviceMotion()
def _disable_listener(self):
self.bridge.stopDeviceMotion()
def _get_orientation(self):
return (
self.bridge.sp_yaw,
self.bridge.sp_pitch,
self.bridge.sp_roll)
def instance():
return iOSSpatialOrientation()
|
<commit_before><commit_msg>Add iOS api for spatial orientation<commit_after>
|
'''
iOS Spatial Orientation
-----------------------
'''
from plyer.facades import SpatialOrientation
from pyobjus import autoclass
class iOSSpatialOrientation(SpatialOrientation):
def __init__(self):
self.bridge = autoclass('bridge').alloc().init()
self.bridge.motionManager.setdeviceMotionUpdateInterval_(0.1)
def _enable_listener(self):
self.bridge.startDeviceMotion()
def _disable_listener(self):
self.bridge.stopDeviceMotion()
def _get_orientation(self):
return (
self.bridge.sp_yaw,
self.bridge.sp_pitch,
self.bridge.sp_roll)
def instance():
return iOSSpatialOrientation()
|
Add iOS api for spatial orientation'''
iOS Spatial Orientation
-----------------------
'''
from plyer.facades import SpatialOrientation
from pyobjus import autoclass
class iOSSpatialOrientation(SpatialOrientation):
def __init__(self):
self.bridge = autoclass('bridge').alloc().init()
self.bridge.motionManager.setdeviceMotionUpdateInterval_(0.1)
def _enable_listener(self):
self.bridge.startDeviceMotion()
def _disable_listener(self):
self.bridge.stopDeviceMotion()
def _get_orientation(self):
return (
self.bridge.sp_yaw,
self.bridge.sp_pitch,
self.bridge.sp_roll)
def instance():
return iOSSpatialOrientation()
|
<commit_before><commit_msg>Add iOS api for spatial orientation<commit_after>'''
iOS Spatial Orientation
-----------------------
'''
from plyer.facades import SpatialOrientation
from pyobjus import autoclass
class iOSSpatialOrientation(SpatialOrientation):
def __init__(self):
self.bridge = autoclass('bridge').alloc().init()
self.bridge.motionManager.setdeviceMotionUpdateInterval_(0.1)
def _enable_listener(self):
self.bridge.startDeviceMotion()
def _disable_listener(self):
self.bridge.stopDeviceMotion()
def _get_orientation(self):
return (
self.bridge.sp_yaw,
self.bridge.sp_pitch,
self.bridge.sp_roll)
def instance():
return iOSSpatialOrientation()
|
|
d69138b8570a13063a5a44eee5cbf932513d3e3c
|
alembic/versions/bbba2255e00_add_email_validation_column.py
|
alembic/versions/bbba2255e00_add_email_validation_column.py
|
"""add email validation column
Revision ID: bbba2255e00
Revises: 38a8a6299086
Create Date: 2014-12-16 14:18:45.290836
"""
# revision identifiers, used by Alembic.
revision = 'bbba2255e00'
down_revision = '38a8a6299086'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('user', sa.Column('valid_email', sa.Boolean, default=False))
query = 'UPDATE "user" SET valid_email=false;'
op.execute(query)
def downgrade():
op.drop_column('user', 'valid_email')
|
Add validation column for email address.
|
Add validation column for email address.
|
Python
|
agpl-3.0
|
jean/pybossa,geotagx/pybossa,jean/pybossa,geotagx/pybossa,PyBossa/pybossa,OpenNewsLabs/pybossa,OpenNewsLabs/pybossa,stefanhahmann/pybossa,PyBossa/pybossa,inteligencia-coletiva-lsd/pybossa,Scifabric/pybossa,stefanhahmann/pybossa,inteligencia-coletiva-lsd/pybossa,Scifabric/pybossa
|
Add validation column for email address.
|
"""add email validation column
Revision ID: bbba2255e00
Revises: 38a8a6299086
Create Date: 2014-12-16 14:18:45.290836
"""
# revision identifiers, used by Alembic.
revision = 'bbba2255e00'
down_revision = '38a8a6299086'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('user', sa.Column('valid_email', sa.Boolean, default=False))
query = 'UPDATE "user" SET valid_email=false;'
op.execute(query)
def downgrade():
op.drop_column('user', 'valid_email')
|
<commit_before><commit_msg>Add validation column for email address.<commit_after>
|
"""add email validation column
Revision ID: bbba2255e00
Revises: 38a8a6299086
Create Date: 2014-12-16 14:18:45.290836
"""
# revision identifiers, used by Alembic.
revision = 'bbba2255e00'
down_revision = '38a8a6299086'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('user', sa.Column('valid_email', sa.Boolean, default=False))
query = 'UPDATE "user" SET valid_email=false;'
op.execute(query)
def downgrade():
op.drop_column('user', 'valid_email')
|
Add validation column for email address."""add email validation column
Revision ID: bbba2255e00
Revises: 38a8a6299086
Create Date: 2014-12-16 14:18:45.290836
"""
# revision identifiers, used by Alembic.
revision = 'bbba2255e00'
down_revision = '38a8a6299086'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('user', sa.Column('valid_email', sa.Boolean, default=False))
query = 'UPDATE "user" SET valid_email=false;'
op.execute(query)
def downgrade():
op.drop_column('user', 'valid_email')
|
<commit_before><commit_msg>Add validation column for email address.<commit_after>"""add email validation column
Revision ID: bbba2255e00
Revises: 38a8a6299086
Create Date: 2014-12-16 14:18:45.290836
"""
# revision identifiers, used by Alembic.
revision = 'bbba2255e00'
down_revision = '38a8a6299086'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('user', sa.Column('valid_email', sa.Boolean, default=False))
query = 'UPDATE "user" SET valid_email=false;'
op.execute(query)
def downgrade():
op.drop_column('user', 'valid_email')
|
|
64216937d00f7a5437da022eeff0a543059e89ed
|
code/supervise_gene_mentions.py
|
code/supervise_gene_mentions.py
|
#! /usr/bin/env python3
#
# Perform distant supervision on gene mentions
#
# Each input row contains a mention and the sentence it is from
#
import fileinput
import json
from dstruct.Sentence import Sentence
from dstruct.Mention import Mention
from helper.dictionaries import load_dict
# Perform the supervision
def supervise(mention, sentence):
# If it's a gene symbol, and not an English word, and not a medical
# acronym, and not a NIH or NSF grant code, then label it as correct
# XXX (Matteo) Taken from pharm
mention_word = mention.words[0].word
if mention_word in genes_dict and \
mention_word.lower() not in english_dict and \
mention_word not in med_acrons_dict and \
mention_word not in nih_grants_dict and\
mention_word not in nsf_grants_dict:
mention.is_correct = True
# Not correct if the previous word is one of the following keywords.
# XXX (Matteo) Taken from pharm
prev_word = sentence.get_prev_wordobject(mention)
if prev_word != None and prev_word.word.lower() in ['figure', 'table', 'individual', "figures", "tables", "individuals"]:
mention.is_correct = False
if frozenset([sentence.doc_id, str(sentence.sent_id), mention_word]) in pos_mentions_dict:
mention.is_correct = True
if frozenset([sentence.doc_id, str(sentence.sent_id), mention_word]) in neg_mentions_dict:
mention.is_correct = False
# Load the dictionaries that we need
genes_dict = load_dict("genes")
english_dict = load_dict("english")
nih_grants_dict = load_dict("nih_grants")
nsf_grants_dict = load_dict("nsf_grants")
med_acrons_dict = load_dict("med_acrons")
pos_mentions_dict = load_dict("pos_gene_mentions")
neg_mentions_dict = load_dict("neg_gene_mentions")
# Process input
with fileinput.input() as input_files:
for line in input_files:
row = json.loads(line)
sentence = Sentence(row["doc_id"], row["sent_id"], row["wordidxs"],
row["sentence_words"], row["poses"], row["ners"], row["lemmas"],
row["dep_paths"], row["dep_parents"], row["bounding_boxes"])
mention = Mention(row["type"], row["entity"],
sentence.words[row["start_word_idx"]:row["end_word_idx"]+1])
# Perform supervision
supervise(mention, sentence)
# Print mention
print(mention.json_dump())
|
Add script to supervise gene mentions
|
Add script to supervise gene mentions
|
Python
|
apache-2.0
|
rionda/dd-genomics,amwenger/dd-genomics,amwenger/dd-genomics,amwenger/dd-genomics,rionda/dd-genomics,HazyResearch/dd-genomics,HazyResearch/dd-genomics,HazyResearch/dd-genomics,HazyResearch/dd-genomics,HazyResearch/dd-genomics
|
Add script to supervise gene mentions
|
#! /usr/bin/env python3
#
# Perform distant supervision on gene mentions
#
# Each input row contains a mention and the sentence it is from
#
import fileinput
import json
from dstruct.Sentence import Sentence
from dstruct.Mention import Mention
from helper.dictionaries import load_dict
# Perform the supervision
def supervise(mention, sentence):
# If it's a gene symbol, and not an English word, and not a medical
# acronym, and not a NIH or NSF grant code, then label it as correct
# XXX (Matteo) Taken from pharm
mention_word = mention.words[0].word
if mention_word in genes_dict and \
mention_word.lower() not in english_dict and \
mention_word not in med_acrons_dict and \
mention_word not in nih_grants_dict and\
mention_word not in nsf_grants_dict:
mention.is_correct = True
# Not correct if the previous word is one of the following keywords.
# XXX (Matteo) Taken from pharm
prev_word = sentence.get_prev_wordobject(mention)
if prev_word != None and prev_word.word.lower() in ['figure', 'table', 'individual', "figures", "tables", "individuals"]:
mention.is_correct = False
if frozenset([sentence.doc_id, str(sentence.sent_id), mention_word]) in pos_mentions_dict:
mention.is_correct = True
if frozenset([sentence.doc_id, str(sentence.sent_id), mention_word]) in neg_mentions_dict:
mention.is_correct = False
# Load the dictionaries that we need
genes_dict = load_dict("genes")
english_dict = load_dict("english")
nih_grants_dict = load_dict("nih_grants")
nsf_grants_dict = load_dict("nsf_grants")
med_acrons_dict = load_dict("med_acrons")
pos_mentions_dict = load_dict("pos_gene_mentions")
neg_mentions_dict = load_dict("neg_gene_mentions")
# Process input
with fileinput.input() as input_files:
for line in input_files:
row = json.loads(line)
sentence = Sentence(row["doc_id"], row["sent_id"], row["wordidxs"],
row["sentence_words"], row["poses"], row["ners"], row["lemmas"],
row["dep_paths"], row["dep_parents"], row["bounding_boxes"])
mention = Mention(row["type"], row["entity"],
sentence.words[row["start_word_idx"]:row["end_word_idx"]+1])
# Perform supervision
supervise(mention, sentence)
# Print mention
print(mention.json_dump())
|
<commit_before><commit_msg>Add script to supervise gene mentions<commit_after>
|
#! /usr/bin/env python3
#
# Perform distant supervision on gene mentions
#
# Each input row contains a mention and the sentence it is from
#
import fileinput
import json
from dstruct.Sentence import Sentence
from dstruct.Mention import Mention
from helper.dictionaries import load_dict
# Perform the supervision
def supervise(mention, sentence):
# If it's a gene symbol, and not an English word, and not a medical
# acronym, and not a NIH or NSF grant code, then label it as correct
# XXX (Matteo) Taken from pharm
mention_word = mention.words[0].word
if mention_word in genes_dict and \
mention_word.lower() not in english_dict and \
mention_word not in med_acrons_dict and \
mention_word not in nih_grants_dict and\
mention_word not in nsf_grants_dict:
mention.is_correct = True
# Not correct if the previous word is one of the following keywords.
# XXX (Matteo) Taken from pharm
prev_word = sentence.get_prev_wordobject(mention)
if prev_word != None and prev_word.word.lower() in ['figure', 'table', 'individual', "figures", "tables", "individuals"]:
mention.is_correct = False
if frozenset([sentence.doc_id, str(sentence.sent_id), mention_word]) in pos_mentions_dict:
mention.is_correct = True
if frozenset([sentence.doc_id, str(sentence.sent_id), mention_word]) in neg_mentions_dict:
mention.is_correct = False
# Load the dictionaries that we need
genes_dict = load_dict("genes")
english_dict = load_dict("english")
nih_grants_dict = load_dict("nih_grants")
nsf_grants_dict = load_dict("nsf_grants")
med_acrons_dict = load_dict("med_acrons")
pos_mentions_dict = load_dict("pos_gene_mentions")
neg_mentions_dict = load_dict("neg_gene_mentions")
# Process input
with fileinput.input() as input_files:
for line in input_files:
row = json.loads(line)
sentence = Sentence(row["doc_id"], row["sent_id"], row["wordidxs"],
row["sentence_words"], row["poses"], row["ners"], row["lemmas"],
row["dep_paths"], row["dep_parents"], row["bounding_boxes"])
mention = Mention(row["type"], row["entity"],
sentence.words[row["start_word_idx"]:row["end_word_idx"]+1])
# Perform supervision
supervise(mention, sentence)
# Print mention
print(mention.json_dump())
|
Add script to supervise gene mentions#! /usr/bin/env python3
#
# Perform distant supervision on gene mentions
#
# Each input row contains a mention and the sentence it is from
#
import fileinput
import json
from dstruct.Sentence import Sentence
from dstruct.Mention import Mention
from helper.dictionaries import load_dict
# Perform the supervision
def supervise(mention, sentence):
# If it's a gene symbol, and not an English word, and not a medical
# acronym, and not a NIH or NSF grant code, then label it as correct
# XXX (Matteo) Taken from pharm
mention_word = mention.words[0].word
if mention_word in genes_dict and \
mention_word.lower() not in english_dict and \
mention_word not in med_acrons_dict and \
mention_word not in nih_grants_dict and\
mention_word not in nsf_grants_dict:
mention.is_correct = True
# Not correct if the previous word is one of the following keywords.
# XXX (Matteo) Taken from pharm
prev_word = sentence.get_prev_wordobject(mention)
if prev_word != None and prev_word.word.lower() in ['figure', 'table', 'individual', "figures", "tables", "individuals"]:
mention.is_correct = False
if frozenset([sentence.doc_id, str(sentence.sent_id), mention_word]) in pos_mentions_dict:
mention.is_correct = True
if frozenset([sentence.doc_id, str(sentence.sent_id), mention_word]) in neg_mentions_dict:
mention.is_correct = False
# Load the dictionaries that we need
genes_dict = load_dict("genes")
english_dict = load_dict("english")
nih_grants_dict = load_dict("nih_grants")
nsf_grants_dict = load_dict("nsf_grants")
med_acrons_dict = load_dict("med_acrons")
pos_mentions_dict = load_dict("pos_gene_mentions")
neg_mentions_dict = load_dict("neg_gene_mentions")
# Process input
with fileinput.input() as input_files:
for line in input_files:
row = json.loads(line)
sentence = Sentence(row["doc_id"], row["sent_id"], row["wordidxs"],
row["sentence_words"], row["poses"], row["ners"], row["lemmas"],
row["dep_paths"], row["dep_parents"], row["bounding_boxes"])
mention = Mention(row["type"], row["entity"],
sentence.words[row["start_word_idx"]:row["end_word_idx"]+1])
# Perform supervision
supervise(mention, sentence)
# Print mention
print(mention.json_dump())
|
<commit_before><commit_msg>Add script to supervise gene mentions<commit_after>#! /usr/bin/env python3
#
# Perform distant supervision on gene mentions
#
# Each input row contains a mention and the sentence it is from
#
import fileinput
import json
from dstruct.Sentence import Sentence
from dstruct.Mention import Mention
from helper.dictionaries import load_dict
# Perform the supervision
def supervise(mention, sentence):
# If it's a gene symbol, and not an English word, and not a medical
# acronym, and not a NIH or NSF grant code, then label it as correct
# XXX (Matteo) Taken from pharm
mention_word = mention.words[0].word
if mention_word in genes_dict and \
mention_word.lower() not in english_dict and \
mention_word not in med_acrons_dict and \
mention_word not in nih_grants_dict and\
mention_word not in nsf_grants_dict:
mention.is_correct = True
# Not correct if the previous word is one of the following keywords.
# XXX (Matteo) Taken from pharm
prev_word = sentence.get_prev_wordobject(mention)
if prev_word != None and prev_word.word.lower() in ['figure', 'table', 'individual', "figures", "tables", "individuals"]:
mention.is_correct = False
if frozenset([sentence.doc_id, str(sentence.sent_id), mention_word]) in pos_mentions_dict:
mention.is_correct = True
if frozenset([sentence.doc_id, str(sentence.sent_id), mention_word]) in neg_mentions_dict:
mention.is_correct = False
# Load the dictionaries that we need
genes_dict = load_dict("genes")
english_dict = load_dict("english")
nih_grants_dict = load_dict("nih_grants")
nsf_grants_dict = load_dict("nsf_grants")
med_acrons_dict = load_dict("med_acrons")
pos_mentions_dict = load_dict("pos_gene_mentions")
neg_mentions_dict = load_dict("neg_gene_mentions")
# Process input
with fileinput.input() as input_files:
for line in input_files:
row = json.loads(line)
sentence = Sentence(row["doc_id"], row["sent_id"], row["wordidxs"],
row["sentence_words"], row["poses"], row["ners"], row["lemmas"],
row["dep_paths"], row["dep_parents"], row["bounding_boxes"])
mention = Mention(row["type"], row["entity"],
sentence.words[row["start_word_idx"]:row["end_word_idx"]+1])
# Perform supervision
supervise(mention, sentence)
# Print mention
print(mention.json_dump())
|
|
630e9796a2cdeb4d2b6d132bef362b5bf6cd1ff0
|
databaker/tutorial_loader.py
|
databaker/tutorial_loader.py
|
# Based on altair tutorial loader:
# https://github.com/altair-viz/altair/blob/273a1fcf9cec1956474af755d5fe32f0e3f0aee8/altair/tutorial.py
# Copyright (c) 2015, Brian E. Granger and Jake Vanderplas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of altair nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
SRC_PATH = os.path.join(
os.path.split(os.path.abspath(__file__)
)[0], 'tutorial')
DEST_PATH = os.path.relpath('DatabakerTutorial')
def copy_tutorial(overwrite=False):
"""Copy the Databaker tutorial notebooks into ./DatabakerTutorial."""
if os.path.isdir(DEST_PATH) and overwrite:
print('Removing old tutorial directory: {}'.format(DEST_PATH))
shutil.rmtree(DEST_PATH, ignore_errors=True)
if os.path.isdir(DEST_PATH):
raise RuntimeError('{} already exists, run with overwrite=True to discard *all* existing files in tutorial directory'.format(DEST_PATH))
print('Copying notebooks into fresh tutorial directory: {}'.format(DEST_PATH))
shutil.copytree(SRC_PATH, DEST_PATH)
def tutorial(overwrite=False):
"""Copy the Databaker tutorial notebooks into ./DatabakerTutorial and show a link in the notebook."""
copy_tutorial(overwrite=overwrite)
print('Click on the following notebooks to explore the tutorial:')
from IPython.display import FileLinks, display
file_links = FileLinks(path=DEST_PATH,
included_suffixes=['.ipynb'],
recursive=False)
display(file_links)
|
Add tutorial loader script based on one in altair
|
Add tutorial loader script based on one in altair
List tutorials from within Jupyter.
|
Python
|
agpl-3.0
|
scraperwiki/databaker,scraperwiki/databaker
|
Add tutorial loader script based on one in altair
List tutorials from within Jupyter.
|
# Based on altair tutorial loader:
# https://github.com/altair-viz/altair/blob/273a1fcf9cec1956474af755d5fe32f0e3f0aee8/altair/tutorial.py
# Copyright (c) 2015, Brian E. Granger and Jake Vanderplas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of altair nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
SRC_PATH = os.path.join(
os.path.split(os.path.abspath(__file__)
)[0], 'tutorial')
DEST_PATH = os.path.relpath('DatabakerTutorial')
def copy_tutorial(overwrite=False):
"""Copy the Databaker tutorial notebooks into ./DatabakerTutorial."""
if os.path.isdir(DEST_PATH) and overwrite:
print('Removing old tutorial directory: {}'.format(DEST_PATH))
shutil.rmtree(DEST_PATH, ignore_errors=True)
if os.path.isdir(DEST_PATH):
raise RuntimeError('{} already exists, run with overwrite=True to discard *all* existing files in tutorial directory'.format(DEST_PATH))
print('Copying notebooks into fresh tutorial directory: {}'.format(DEST_PATH))
shutil.copytree(SRC_PATH, DEST_PATH)
def tutorial(overwrite=False):
"""Copy the Databaker tutorial notebooks into ./DatabakerTutorial and show a link in the notebook."""
copy_tutorial(overwrite=overwrite)
print('Click on the following notebooks to explore the tutorial:')
from IPython.display import FileLinks, display
file_links = FileLinks(path=DEST_PATH,
included_suffixes=['.ipynb'],
recursive=False)
display(file_links)
|
<commit_before><commit_msg>Add tutorial loader script based on one in altair
List tutorials from within Jupyter.<commit_after>
|
# Based on altair tutorial loader:
# https://github.com/altair-viz/altair/blob/273a1fcf9cec1956474af755d5fe32f0e3f0aee8/altair/tutorial.py
# Copyright (c) 2015, Brian E. Granger and Jake Vanderplas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of altair nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
SRC_PATH = os.path.join(
os.path.split(os.path.abspath(__file__)
)[0], 'tutorial')
DEST_PATH = os.path.relpath('DatabakerTutorial')
def copy_tutorial(overwrite=False):
"""Copy the Databaker tutorial notebooks into ./DatabakerTutorial."""
if os.path.isdir(DEST_PATH) and overwrite:
print('Removing old tutorial directory: {}'.format(DEST_PATH))
shutil.rmtree(DEST_PATH, ignore_errors=True)
if os.path.isdir(DEST_PATH):
raise RuntimeError('{} already exists, run with overwrite=True to discard *all* existing files in tutorial directory'.format(DEST_PATH))
print('Copying notebooks into fresh tutorial directory: {}'.format(DEST_PATH))
shutil.copytree(SRC_PATH, DEST_PATH)
def tutorial(overwrite=False):
"""Copy the Databaker tutorial notebooks into ./DatabakerTutorial and show a link in the notebook."""
copy_tutorial(overwrite=overwrite)
print('Click on the following notebooks to explore the tutorial:')
from IPython.display import FileLinks, display
file_links = FileLinks(path=DEST_PATH,
included_suffixes=['.ipynb'],
recursive=False)
display(file_links)
|
Add tutorial loader script based on one in altair
List tutorials from within Jupyter.# Based on altair tutorial loader:
# https://github.com/altair-viz/altair/blob/273a1fcf9cec1956474af755d5fe32f0e3f0aee8/altair/tutorial.py
# Copyright (c) 2015, Brian E. Granger and Jake Vanderplas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of altair nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
SRC_PATH = os.path.join(
os.path.split(os.path.abspath(__file__)
)[0], 'tutorial')
DEST_PATH = os.path.relpath('DatabakerTutorial')
def copy_tutorial(overwrite=False):
"""Copy the Databaker tutorial notebooks into ./DatabakerTutorial."""
if os.path.isdir(DEST_PATH) and overwrite:
print('Removing old tutorial directory: {}'.format(DEST_PATH))
shutil.rmtree(DEST_PATH, ignore_errors=True)
if os.path.isdir(DEST_PATH):
raise RuntimeError('{} already exists, run with overwrite=True to discard *all* existing files in tutorial directory'.format(DEST_PATH))
print('Copying notebooks into fresh tutorial directory: {}'.format(DEST_PATH))
shutil.copytree(SRC_PATH, DEST_PATH)
def tutorial(overwrite=False):
"""Copy the Databaker tutorial notebooks into ./DatabakerTutorial and show a link in the notebook."""
copy_tutorial(overwrite=overwrite)
print('Click on the following notebooks to explore the tutorial:')
from IPython.display import FileLinks, display
file_links = FileLinks(path=DEST_PATH,
included_suffixes=['.ipynb'],
recursive=False)
display(file_links)
|
<commit_before><commit_msg>Add tutorial loader script based on one in altair
List tutorials from within Jupyter.<commit_after># Based on altair tutorial loader:
# https://github.com/altair-viz/altair/blob/273a1fcf9cec1956474af755d5fe32f0e3f0aee8/altair/tutorial.py
# Copyright (c) 2015, Brian E. Granger and Jake Vanderplas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of altair nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
SRC_PATH = os.path.join(
os.path.split(os.path.abspath(__file__)
)[0], 'tutorial')
DEST_PATH = os.path.relpath('DatabakerTutorial')
def copy_tutorial(overwrite=False):
"""Copy the Databaker tutorial notebooks into ./DatabakerTutorial."""
if os.path.isdir(DEST_PATH) and overwrite:
print('Removing old tutorial directory: {}'.format(DEST_PATH))
shutil.rmtree(DEST_PATH, ignore_errors=True)
if os.path.isdir(DEST_PATH):
raise RuntimeError('{} already exists, run with overwrite=True to discard *all* existing files in tutorial directory'.format(DEST_PATH))
print('Copying notebooks into fresh tutorial directory: {}'.format(DEST_PATH))
shutil.copytree(SRC_PATH, DEST_PATH)
def tutorial(overwrite=False):
"""Copy the Databaker tutorial notebooks into ./DatabakerTutorial and show a link in the notebook."""
copy_tutorial(overwrite=overwrite)
print('Click on the following notebooks to explore the tutorial:')
from IPython.display import FileLinks, display
file_links = FileLinks(path=DEST_PATH,
included_suffixes=['.ipynb'],
recursive=False)
display(file_links)
|
|
11abd1c48968e0fd14eb8879fd64af35af9e1e20
|
pyfwk/struc/dbrow.py
|
pyfwk/struc/dbrow.py
|
#!/usr/bin/env python
"""
dbcol.py: DBCol is a struct describing an sqlite database table column
"""
from dbcol import *
# ----------------------------DATABASE-COLUMN-----------------------------#
class DBRow:
@staticmethod
def dict(columns, values):
if (values is not None):
if (len(columns) == len(values)):
dict = {}
i = 0
for col in columns:
dict[col.name] = values[i]
i += 1
return dict
else:
raise ValueError('columns do not match values')
else:
return None
@staticmethod
def sqlForRowInsert(table, columns, values):
c = len(columns)
v = len(values)
sql = 'INSERT INTO "{}" ('.format(table)
i = 1
for col in columns:
sql += '"{}"'.format(col.name)
if (i < c):
sql += ','
i += 1
sql += ') VALUES ('
i = 1
# allow for first column autoincrement
if (v == (c - 1)):
sql += 'NULL,'
i += 1
for val in values:
if (isinstance(val, int)):
# don't need quotes in integer values
sql += str(val)
elif (val == 'NULL'):
# keep quotes off of NULL
sql += val
else:
# wrap value with quotes
sql += '"{}"'.format(val)
if (i < v):
sql += ','
i += 1
sql += ');'
return sql
# ---------------------------------EXPORT---------------------------------#
__all__ = ['DBRow']
# ----------------------------------MAIN----------------------------------#
def main():
f1 = DBCol('f1', 'INTEGER')
f2 = DBCol('f2', 'TEXT')
f3 = DBCol('f3', 'TEXT')
cols = [f1, f2, f3]
vals = [1, 'Test', 'Third']
print DBRow.dict(cols, vals)
print DBRow.sqlForRowInsert('sample', cols, vals)
if __name__ == '__main__':
main()
|
Create database row class (DBRow)
|
Create database row class (DBRow)
|
Python
|
mit
|
rlinguri/pyfwk
|
Create database row class (DBRow)
|
#!/usr/bin/env python
"""
dbcol.py: DBCol is a struct describing an sqlite database table column
"""
from dbcol import *
# ----------------------------DATABASE-COLUMN-----------------------------#
class DBRow:
@staticmethod
def dict(columns, values):
if (values is not None):
if (len(columns) == len(values)):
dict = {}
i = 0
for col in columns:
dict[col.name] = values[i]
i += 1
return dict
else:
raise ValueError('columns do not match values')
else:
return None
@staticmethod
def sqlForRowInsert(table, columns, values):
c = len(columns)
v = len(values)
sql = 'INSERT INTO "{}" ('.format(table)
i = 1
for col in columns:
sql += '"{}"'.format(col.name)
if (i < c):
sql += ','
i += 1
sql += ') VALUES ('
i = 1
# allow for first column autoincrement
if (v == (c - 1)):
sql += 'NULL,'
i += 1
for val in values:
if (isinstance(val, int)):
# don't need quotes in integer values
sql += str(val)
elif (val == 'NULL'):
# keep quotes off of NULL
sql += val
else:
# wrap value with quotes
sql += '"{}"'.format(val)
if (i < v):
sql += ','
i += 1
sql += ');'
return sql
# ---------------------------------EXPORT---------------------------------#
__all__ = ['DBRow']
# ----------------------------------MAIN----------------------------------#
def main():
f1 = DBCol('f1', 'INTEGER')
f2 = DBCol('f2', 'TEXT')
f3 = DBCol('f3', 'TEXT')
cols = [f1, f2, f3]
vals = [1, 'Test', 'Third']
print DBRow.dict(cols, vals)
print DBRow.sqlForRowInsert('sample', cols, vals)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Create database row class (DBRow)<commit_after>
|
#!/usr/bin/env python
"""
dbcol.py: DBCol is a struct describing an sqlite database table column
"""
from dbcol import *
# ----------------------------DATABASE-COLUMN-----------------------------#
class DBRow:
@staticmethod
def dict(columns, values):
if (values is not None):
if (len(columns) == len(values)):
dict = {}
i = 0
for col in columns:
dict[col.name] = values[i]
i += 1
return dict
else:
raise ValueError('columns do not match values')
else:
return None
@staticmethod
def sqlForRowInsert(table, columns, values):
c = len(columns)
v = len(values)
sql = 'INSERT INTO "{}" ('.format(table)
i = 1
for col in columns:
sql += '"{}"'.format(col.name)
if (i < c):
sql += ','
i += 1
sql += ') VALUES ('
i = 1
# allow for first column autoincrement
if (v == (c - 1)):
sql += 'NULL,'
i += 1
for val in values:
if (isinstance(val, int)):
# don't need quotes in integer values
sql += str(val)
elif (val == 'NULL'):
# keep quotes off of NULL
sql += val
else:
# wrap value with quotes
sql += '"{}"'.format(val)
if (i < v):
sql += ','
i += 1
sql += ');'
return sql
# ---------------------------------EXPORT---------------------------------#
__all__ = ['DBRow']
# ----------------------------------MAIN----------------------------------#
def main():
f1 = DBCol('f1', 'INTEGER')
f2 = DBCol('f2', 'TEXT')
f3 = DBCol('f3', 'TEXT')
cols = [f1, f2, f3]
vals = [1, 'Test', 'Third']
print DBRow.dict(cols, vals)
print DBRow.sqlForRowInsert('sample', cols, vals)
if __name__ == '__main__':
main()
|
Create database row class (DBRow)#!/usr/bin/env python
"""
dbcol.py: DBCol is a struct describing an sqlite database table column
"""
from dbcol import *
# ----------------------------DATABASE-COLUMN-----------------------------#
class DBRow:
@staticmethod
def dict(columns, values):
if (values is not None):
if (len(columns) == len(values)):
dict = {}
i = 0
for col in columns:
dict[col.name] = values[i]
i += 1
return dict
else:
raise ValueError('columns do not match values')
else:
return None
@staticmethod
def sqlForRowInsert(table, columns, values):
c = len(columns)
v = len(values)
sql = 'INSERT INTO "{}" ('.format(table)
i = 1
for col in columns:
sql += '"{}"'.format(col.name)
if (i < c):
sql += ','
i += 1
sql += ') VALUES ('
i = 1
# allow for first column autoincrement
if (v == (c - 1)):
sql += 'NULL,'
i += 1
for val in values:
if (isinstance(val, int)):
# don't need quotes in integer values
sql += str(val)
elif (val == 'NULL'):
# keep quotes off of NULL
sql += val
else:
# wrap value with quotes
sql += '"{}"'.format(val)
if (i < v):
sql += ','
i += 1
sql += ');'
return sql
# ---------------------------------EXPORT---------------------------------#
__all__ = ['DBRow']
# ----------------------------------MAIN----------------------------------#
def main():
f1 = DBCol('f1', 'INTEGER')
f2 = DBCol('f2', 'TEXT')
f3 = DBCol('f3', 'TEXT')
cols = [f1, f2, f3]
vals = [1, 'Test', 'Third']
print DBRow.dict(cols, vals)
print DBRow.sqlForRowInsert('sample', cols, vals)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Create database row class (DBRow)<commit_after>#!/usr/bin/env python
"""
dbcol.py: DBCol is a struct describing an sqlite database table column
"""
from dbcol import *
# ----------------------------DATABASE-COLUMN-----------------------------#
class DBRow:
@staticmethod
def dict(columns, values):
if (values is not None):
if (len(columns) == len(values)):
dict = {}
i = 0
for col in columns:
dict[col.name] = values[i]
i += 1
return dict
else:
raise ValueError('columns do not match values')
else:
return None
@staticmethod
def sqlForRowInsert(table, columns, values):
c = len(columns)
v = len(values)
sql = 'INSERT INTO "{}" ('.format(table)
i = 1
for col in columns:
sql += '"{}"'.format(col.name)
if (i < c):
sql += ','
i += 1
sql += ') VALUES ('
i = 1
# allow for first column autoincrement
if (v == (c - 1)):
sql += 'NULL,'
i += 1
for val in values:
if (isinstance(val, int)):
# don't need quotes in integer values
sql += str(val)
elif (val == 'NULL'):
# keep quotes off of NULL
sql += val
else:
# wrap value with quotes
sql += '"{}"'.format(val)
if (i < v):
sql += ','
i += 1
sql += ');'
return sql
# ---------------------------------EXPORT---------------------------------#
__all__ = ['DBRow']
# ----------------------------------MAIN----------------------------------#
def main():
f1 = DBCol('f1', 'INTEGER')
f2 = DBCol('f2', 'TEXT')
f3 = DBCol('f3', 'TEXT')
cols = [f1, f2, f3]
vals = [1, 'Test', 'Third']
print DBRow.dict(cols, vals)
print DBRow.sqlForRowInsert('sample', cols, vals)
if __name__ == '__main__':
main()
|
|
f64e2f17f2e4b69790d6dccf3279c36659d499a0
|
strToCamelCase.py
|
strToCamelCase.py
|
# python3
#
# Sample Input: min value array
# Sample Output: minValueArray
#
inputString = input().strip().split(' ')
result = inputString[0]
for i in range(1,len(inputString)):
result += inputString[i][0].upper() + inputString[i][1:]
print (result)
|
Create a program to Convert String to Camel Case
|
Create a program to Convert String to Camel Case
|
Python
|
mit
|
laxmena/CodeKata,laxmena/CodeKata
|
Create a program to Convert String to Camel Case
|
# python3
#
# Sample Input: min value array
# Sample Output: minValueArray
#
inputString = input().strip().split(' ')
result = inputString[0]
for i in range(1,len(inputString)):
result += inputString[i][0].upper() + inputString[i][1:]
print (result)
|
<commit_before><commit_msg>Create a program to Convert String to Camel Case<commit_after>
|
# python3
#
# Sample Input: min value array
# Sample Output: minValueArray
#
inputString = input().strip().split(' ')
result = inputString[0]
for i in range(1,len(inputString)):
result += inputString[i][0].upper() + inputString[i][1:]
print (result)
|
Create a program to Convert String to Camel Case# python3
#
# Sample Input: min value array
# Sample Output: minValueArray
#
inputString = input().strip().split(' ')
result = inputString[0]
for i in range(1,len(inputString)):
result += inputString[i][0].upper() + inputString[i][1:]
print (result)
|
<commit_before><commit_msg>Create a program to Convert String to Camel Case<commit_after># python3
#
# Sample Input: min value array
# Sample Output: minValueArray
#
inputString = input().strip().split(' ')
result = inputString[0]
for i in range(1,len(inputString)):
result += inputString[i][0].upper() + inputString[i][1:]
print (result)
|
|
9f4978ff189c6fc8a2f4c263d43b10644c1e978e
|
easy_bake.py
|
easy_bake.py
|
import RPi.GPIO as gpio
#use board numbering on the pi
gpio.setmode(gpio.BOARD)
gpio.setup(40, gpio.OUT)
gpio.setup(38, gpio.IN)
#true and 1 are the same
gpio.output(40, True)
gpio.output(38, 1)
|
Add initial gpio commands to python file
|
Add initial gpio commands to python file
|
Python
|
mit
|
emgreen33/easy_bake,emgreen33/easy_bake
|
Add initial gpio commands to python file
|
import RPi.GPIO as gpio
#use board numbering on the pi
gpio.setmode(gpio.BOARD)
gpio.setup(40, gpio.OUT)
gpio.setup(38, gpio.IN)
#true and 1 are the same
gpio.output(40, True)
gpio.output(38, 1)
|
<commit_before><commit_msg>Add initial gpio commands to python file<commit_after>
|
import RPi.GPIO as gpio
#use board numbering on the pi
gpio.setmode(gpio.BOARD)
gpio.setup(40, gpio.OUT)
gpio.setup(38, gpio.IN)
#true and 1 are the same
gpio.output(40, True)
gpio.output(38, 1)
|
Add initial gpio commands to python fileimport RPi.GPIO as gpio
#use board numbering on the pi
gpio.setmode(gpio.BOARD)
gpio.setup(40, gpio.OUT)
gpio.setup(38, gpio.IN)
#true and 1 are the same
gpio.output(40, True)
gpio.output(38, 1)
|
<commit_before><commit_msg>Add initial gpio commands to python file<commit_after>import RPi.GPIO as gpio
#use board numbering on the pi
gpio.setmode(gpio.BOARD)
gpio.setup(40, gpio.OUT)
gpio.setup(38, gpio.IN)
#true and 1 are the same
gpio.output(40, True)
gpio.output(38, 1)
|
|
91e7a4f36637e7706dd17f1e093fe029f031bc3d
|
API/chat/migrations/0001_squashed_0002_auto_20150707_1647.py
|
API/chat/migrations/0001_squashed_0002_auto_20150707_1647.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
replaces = [(b'chat', '0001_squashed_0008_auto_20150702_1437'), (b'chat', '0002_auto_20150707_1647')]
dependencies = [
]
operations = [
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField(max_length=2000)),
('datetime', models.DateTimeField()),
('channel', models.ForeignKey(to='chat.Channel')),
('username', models.CharField(max_length=20)),
],
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField(max_length=2000)),
('datetime', models.DateTimeField()),
('channel', models.ForeignKey(to='chat.Channel')),
('username', models.CharField(max_length=20)),
],
),
]
|
Remove replaces line on 0001_squashed
|
[HOTFIX] Remove replaces line on 0001_squashed
|
Python
|
mit
|
gtklocker/ting,sirodoht/ting,gtklocker/ting,odyvarv/ting-1,VitSalis/ting,mbalamat/ting,VitSalis/ting,gtklocker/ting,sirodoht/ting,odyvarv/ting-1,gtklocker/ting,dionyziz/ting,mbalamat/ting,VitSalis/ting,odyvarv/ting-1,dionyziz/ting,mbalamat/ting,VitSalis/ting,odyvarv/ting-1,dionyziz/ting,mbalamat/ting,sirodoht/ting,dionyziz/ting,sirodoht/ting
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
replaces = [(b'chat', '0001_squashed_0008_auto_20150702_1437'), (b'chat', '0002_auto_20150707_1647')]
dependencies = [
]
operations = [
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField(max_length=2000)),
('datetime', models.DateTimeField()),
('channel', models.ForeignKey(to='chat.Channel')),
('username', models.CharField(max_length=20)),
],
),
]
[HOTFIX] Remove replaces line on 0001_squashed
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField(max_length=2000)),
('datetime', models.DateTimeField()),
('channel', models.ForeignKey(to='chat.Channel')),
('username', models.CharField(max_length=20)),
],
),
]
|
<commit_before># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
replaces = [(b'chat', '0001_squashed_0008_auto_20150702_1437'), (b'chat', '0002_auto_20150707_1647')]
dependencies = [
]
operations = [
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField(max_length=2000)),
('datetime', models.DateTimeField()),
('channel', models.ForeignKey(to='chat.Channel')),
('username', models.CharField(max_length=20)),
],
),
]
<commit_msg>[HOTFIX] Remove replaces line on 0001_squashed<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField(max_length=2000)),
('datetime', models.DateTimeField()),
('channel', models.ForeignKey(to='chat.Channel')),
('username', models.CharField(max_length=20)),
],
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
replaces = [(b'chat', '0001_squashed_0008_auto_20150702_1437'), (b'chat', '0002_auto_20150707_1647')]
dependencies = [
]
operations = [
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField(max_length=2000)),
('datetime', models.DateTimeField()),
('channel', models.ForeignKey(to='chat.Channel')),
('username', models.CharField(max_length=20)),
],
),
]
[HOTFIX] Remove replaces line on 0001_squashed# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField(max_length=2000)),
('datetime', models.DateTimeField()),
('channel', models.ForeignKey(to='chat.Channel')),
('username', models.CharField(max_length=20)),
],
),
]
|
<commit_before># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
replaces = [(b'chat', '0001_squashed_0008_auto_20150702_1437'), (b'chat', '0002_auto_20150707_1647')]
dependencies = [
]
operations = [
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField(max_length=2000)),
('datetime', models.DateTimeField()),
('channel', models.ForeignKey(to='chat.Channel')),
('username', models.CharField(max_length=20)),
],
),
]
<commit_msg>[HOTFIX] Remove replaces line on 0001_squashed<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField(max_length=2000)),
('datetime', models.DateTimeField()),
('channel', models.ForeignKey(to='chat.Channel')),
('username', models.CharField(max_length=20)),
],
),
]
|
21f1f9ea6abbf5b2fc7f851224821d8e94864372
|
data_analysis/parse_pagerank.py
|
data_analysis/parse_pagerank.py
|
import pandas as pd
import argparse
import glob
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--dir', type=str, default="it", help="directory where the files are stored")
parser.add_argument('--top', type=int, default=100)
args = parser.parse_args()
min_year = 2007
max_year = 2019
top_k = args.top
total_pages = []
for f in glob.glob(args.dir+"*results.txt"):
name = os.path.basename(f)
year = int(name.split(".")[4].split("-")[0])
if not (year >= min_year and year <= max_year):
continue
with open(f, "r") as file:
counter = 0
for line in file:
if counter >= top_k:
break
data = line.split("\t")
page_name = data[0][6:-2].replace(" ", "_")
score = float(data[1])
total_pages.append(page_name)
counter += 1
total_pages = set(total_pages)
print(len(total_pages))
with open("result.txt", "w+") as output:
for name in total_pages:
output.write(name+"\n")
|
Add script to parse pagerank results.
|
Add script to parse pagerank results.
|
Python
|
mit
|
geektoni/Influenza-Like-Illness-Predictor,geektoni/Influenza-Like-Illness-Predictor
|
Add script to parse pagerank results.
|
import pandas as pd
import argparse
import glob
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--dir', type=str, default="it", help="directory where the files are stored")
parser.add_argument('--top', type=int, default=100)
args = parser.parse_args()
min_year = 2007
max_year = 2019
top_k = args.top
total_pages = []
for f in glob.glob(args.dir+"*results.txt"):
name = os.path.basename(f)
year = int(name.split(".")[4].split("-")[0])
if not (year >= min_year and year <= max_year):
continue
with open(f, "r") as file:
counter = 0
for line in file:
if counter >= top_k:
break
data = line.split("\t")
page_name = data[0][6:-2].replace(" ", "_")
score = float(data[1])
total_pages.append(page_name)
counter += 1
total_pages = set(total_pages)
print(len(total_pages))
with open("result.txt", "w+") as output:
for name in total_pages:
output.write(name+"\n")
|
<commit_before><commit_msg>Add script to parse pagerank results.<commit_after>
|
import pandas as pd
import argparse
import glob
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--dir', type=str, default="it", help="directory where the files are stored")
parser.add_argument('--top', type=int, default=100)
args = parser.parse_args()
min_year = 2007
max_year = 2019
top_k = args.top
total_pages = []
for f in glob.glob(args.dir+"*results.txt"):
name = os.path.basename(f)
year = int(name.split(".")[4].split("-")[0])
if not (year >= min_year and year <= max_year):
continue
with open(f, "r") as file:
counter = 0
for line in file:
if counter >= top_k:
break
data = line.split("\t")
page_name = data[0][6:-2].replace(" ", "_")
score = float(data[1])
total_pages.append(page_name)
counter += 1
total_pages = set(total_pages)
print(len(total_pages))
with open("result.txt", "w+") as output:
for name in total_pages:
output.write(name+"\n")
|
Add script to parse pagerank results.import pandas as pd
import argparse
import glob
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--dir', type=str, default="it", help="directory where the files are stored")
parser.add_argument('--top', type=int, default=100)
args = parser.parse_args()
min_year = 2007
max_year = 2019
top_k = args.top
total_pages = []
for f in glob.glob(args.dir+"*results.txt"):
name = os.path.basename(f)
year = int(name.split(".")[4].split("-")[0])
if not (year >= min_year and year <= max_year):
continue
with open(f, "r") as file:
counter = 0
for line in file:
if counter >= top_k:
break
data = line.split("\t")
page_name = data[0][6:-2].replace(" ", "_")
score = float(data[1])
total_pages.append(page_name)
counter += 1
total_pages = set(total_pages)
print(len(total_pages))
with open("result.txt", "w+") as output:
for name in total_pages:
output.write(name+"\n")
|
<commit_before><commit_msg>Add script to parse pagerank results.<commit_after>import pandas as pd
import argparse
import glob
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--dir', type=str, default="it", help="directory where the files are stored")
parser.add_argument('--top', type=int, default=100)
args = parser.parse_args()
min_year = 2007
max_year = 2019
top_k = args.top
total_pages = []
for f in glob.glob(args.dir+"*results.txt"):
name = os.path.basename(f)
year = int(name.split(".")[4].split("-")[0])
if not (year >= min_year and year <= max_year):
continue
with open(f, "r") as file:
counter = 0
for line in file:
if counter >= top_k:
break
data = line.split("\t")
page_name = data[0][6:-2].replace(" ", "_")
score = float(data[1])
total_pages.append(page_name)
counter += 1
total_pages = set(total_pages)
print(len(total_pages))
with open("result.txt", "w+") as output:
for name in total_pages:
output.write(name+"\n")
|
|
d0c1ce9ece3f2c901090a3a232bfd73dd34d28e3
|
zou/migrations/versions/a66508788c53_add_nb_assets_ready.py
|
zou/migrations/versions/a66508788c53_add_nb_assets_ready.py
|
"""Add nb assets ready column
Revision ID: a66508788c53
Revises: 1e150c2cea4d
Create Date: 2021-11-23 00:07:43.717653
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = 'a66508788c53'
down_revision = '1e150c2cea4d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('task', sa.Column('nb_assets_ready', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('task', 'nb_assets_ready')
# ### end Alembic commands ###
|
Add missing column for tasks
|
[breakdown] Add missing column for tasks
|
Python
|
agpl-3.0
|
cgwire/zou
|
[breakdown] Add missing column for tasks
|
"""Add nb assets ready column
Revision ID: a66508788c53
Revises: 1e150c2cea4d
Create Date: 2021-11-23 00:07:43.717653
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = 'a66508788c53'
down_revision = '1e150c2cea4d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('task', sa.Column('nb_assets_ready', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('task', 'nb_assets_ready')
# ### end Alembic commands ###
|
<commit_before><commit_msg>[breakdown] Add missing column for tasks<commit_after>
|
"""Add nb assets ready column
Revision ID: a66508788c53
Revises: 1e150c2cea4d
Create Date: 2021-11-23 00:07:43.717653
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = 'a66508788c53'
down_revision = '1e150c2cea4d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('task', sa.Column('nb_assets_ready', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('task', 'nb_assets_ready')
# ### end Alembic commands ###
|
[breakdown] Add missing column for tasks"""Add nb assets ready column
Revision ID: a66508788c53
Revises: 1e150c2cea4d
Create Date: 2021-11-23 00:07:43.717653
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = 'a66508788c53'
down_revision = '1e150c2cea4d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('task', sa.Column('nb_assets_ready', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('task', 'nb_assets_ready')
# ### end Alembic commands ###
|
<commit_before><commit_msg>[breakdown] Add missing column for tasks<commit_after>"""Add nb assets ready column
Revision ID: a66508788c53
Revises: 1e150c2cea4d
Create Date: 2021-11-23 00:07:43.717653
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = 'a66508788c53'
down_revision = '1e150c2cea4d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('task', sa.Column('nb_assets_ready', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('task', 'nb_assets_ready')
# ### end Alembic commands ###
|
|
8eb2c02f4a30f7b1a3d54550d027139687481bd2
|
temba/channels/migrations/0033_auto_20160718_2045.py
|
temba/channels/migrations/0033_auto_20160718_2045.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('channels', '0032_channelevent'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='channel_type',
field=models.CharField(default='A', help_text='Type of this channel, whether Android, Twilio or SMSC', max_length=3, verbose_name='Channel Type', choices=[('AT', "Africa's Talking"), ('A', 'Android'), ('BM', 'Blackmyna'), ('CT', 'Clickatell'), ('EX', 'External'), ('FB', 'Facebook'), ('GL', 'Globe Labs'), ('HX', 'High Connection'), ('H9', 'Hub9'), ('IB', 'Infobip'), ('JS', 'Jasmin'), ('KN', 'Kannel'), ('M3', 'M3 Tech'), ('MB', 'Mblox'), ('NX', 'Nexmo'), ('PL', 'Plivo'), ('SQ', 'Shaqodoon'), ('SC', 'SMSCentral'), ('ST', 'Start Mobile'), ('TG', 'Telegram'), ('T', 'Twilio'), ('TMS', 'Twilio Messaging Service'), ('TT', 'Twitter'), ('VB', 'Verboice'), ('VM', 'Vumi'), ('YO', 'Yo!'), ('ZV', 'Zenvia')]),
),
]
|
Add migration for globe channel type
|
Add migration for globe channel type
|
Python
|
agpl-3.0
|
tsotetsi/textily-web,tsotetsi/textily-web,pulilab/rapidpro,tsotetsi/textily-web,pulilab/rapidpro,tsotetsi/textily-web,ewheeler/rapidpro,ewheeler/rapidpro,ewheeler/rapidpro,pulilab/rapidpro,pulilab/rapidpro,tsotetsi/textily-web,pulilab/rapidpro,ewheeler/rapidpro
|
Add migration for globe channel type
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('channels', '0032_channelevent'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='channel_type',
field=models.CharField(default='A', help_text='Type of this channel, whether Android, Twilio or SMSC', max_length=3, verbose_name='Channel Type', choices=[('AT', "Africa's Talking"), ('A', 'Android'), ('BM', 'Blackmyna'), ('CT', 'Clickatell'), ('EX', 'External'), ('FB', 'Facebook'), ('GL', 'Globe Labs'), ('HX', 'High Connection'), ('H9', 'Hub9'), ('IB', 'Infobip'), ('JS', 'Jasmin'), ('KN', 'Kannel'), ('M3', 'M3 Tech'), ('MB', 'Mblox'), ('NX', 'Nexmo'), ('PL', 'Plivo'), ('SQ', 'Shaqodoon'), ('SC', 'SMSCentral'), ('ST', 'Start Mobile'), ('TG', 'Telegram'), ('T', 'Twilio'), ('TMS', 'Twilio Messaging Service'), ('TT', 'Twitter'), ('VB', 'Verboice'), ('VM', 'Vumi'), ('YO', 'Yo!'), ('ZV', 'Zenvia')]),
),
]
|
<commit_before><commit_msg>Add migration for globe channel type<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('channels', '0032_channelevent'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='channel_type',
field=models.CharField(default='A', help_text='Type of this channel, whether Android, Twilio or SMSC', max_length=3, verbose_name='Channel Type', choices=[('AT', "Africa's Talking"), ('A', 'Android'), ('BM', 'Blackmyna'), ('CT', 'Clickatell'), ('EX', 'External'), ('FB', 'Facebook'), ('GL', 'Globe Labs'), ('HX', 'High Connection'), ('H9', 'Hub9'), ('IB', 'Infobip'), ('JS', 'Jasmin'), ('KN', 'Kannel'), ('M3', 'M3 Tech'), ('MB', 'Mblox'), ('NX', 'Nexmo'), ('PL', 'Plivo'), ('SQ', 'Shaqodoon'), ('SC', 'SMSCentral'), ('ST', 'Start Mobile'), ('TG', 'Telegram'), ('T', 'Twilio'), ('TMS', 'Twilio Messaging Service'), ('TT', 'Twitter'), ('VB', 'Verboice'), ('VM', 'Vumi'), ('YO', 'Yo!'), ('ZV', 'Zenvia')]),
),
]
|
Add migration for globe channel type# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('channels', '0032_channelevent'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='channel_type',
field=models.CharField(default='A', help_text='Type of this channel, whether Android, Twilio or SMSC', max_length=3, verbose_name='Channel Type', choices=[('AT', "Africa's Talking"), ('A', 'Android'), ('BM', 'Blackmyna'), ('CT', 'Clickatell'), ('EX', 'External'), ('FB', 'Facebook'), ('GL', 'Globe Labs'), ('HX', 'High Connection'), ('H9', 'Hub9'), ('IB', 'Infobip'), ('JS', 'Jasmin'), ('KN', 'Kannel'), ('M3', 'M3 Tech'), ('MB', 'Mblox'), ('NX', 'Nexmo'), ('PL', 'Plivo'), ('SQ', 'Shaqodoon'), ('SC', 'SMSCentral'), ('ST', 'Start Mobile'), ('TG', 'Telegram'), ('T', 'Twilio'), ('TMS', 'Twilio Messaging Service'), ('TT', 'Twitter'), ('VB', 'Verboice'), ('VM', 'Vumi'), ('YO', 'Yo!'), ('ZV', 'Zenvia')]),
),
]
|
<commit_before><commit_msg>Add migration for globe channel type<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('channels', '0032_channelevent'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='channel_type',
field=models.CharField(default='A', help_text='Type of this channel, whether Android, Twilio or SMSC', max_length=3, verbose_name='Channel Type', choices=[('AT', "Africa's Talking"), ('A', 'Android'), ('BM', 'Blackmyna'), ('CT', 'Clickatell'), ('EX', 'External'), ('FB', 'Facebook'), ('GL', 'Globe Labs'), ('HX', 'High Connection'), ('H9', 'Hub9'), ('IB', 'Infobip'), ('JS', 'Jasmin'), ('KN', 'Kannel'), ('M3', 'M3 Tech'), ('MB', 'Mblox'), ('NX', 'Nexmo'), ('PL', 'Plivo'), ('SQ', 'Shaqodoon'), ('SC', 'SMSCentral'), ('ST', 'Start Mobile'), ('TG', 'Telegram'), ('T', 'Twilio'), ('TMS', 'Twilio Messaging Service'), ('TT', 'Twitter'), ('VB', 'Verboice'), ('VM', 'Vumi'), ('YO', 'Yo!'), ('ZV', 'Zenvia')]),
),
]
|
|
09854094677961d6833a793584d2098264900ab5
|
tests/generate_urls.py
|
tests/generate_urls.py
|
from urllib.parse import urljoin, urlparse
from itertools import product
import csv
import posixpath
def resolveComponents(url):
"""
>>> resolveComponents('http://www.example.com/foo/bar/../../baz/bux/')
'http://www.example.com/baz/bux/'
>>> resolveComponents('http://www.example.com/some/path/../file.ext')
'http://www.example.com/some/file.ext'
"""
parsed = urlparse(url)
new_path = posixpath.normpath(parsed.path)
if parsed.path.endswith('/'):
# Compensate for issue1707768
new_path += '/'
if new_path.startswith('//'):
new_path = new_path[1:]
cleaned = parsed._replace(path=new_path)
return cleaned.geturl()
first_authorities = ['http://example.com@user:pass:7152', 'https://example.com']
second_authorities = ['', 'https://www.example.org', 'http://example.com@user:pass:1111',
'file://example.com', 'file://']
first_paths = ['', '/', '/foobar/bazz', 'foobar/bazz/']
second_paths = ['', '/', '/foo/bar', 'foo/bar/', './foo/../bar', 'foo/./.././bar']
first_queries = ['', '?a=1', '?a=647&b=s564']
second_queries = ['', '?a=sdf', '?a=cvb&b=987']
fragments = ['', '#foo', '#bar']
with open('urls.csv', 'wt') as f:
csvwriter = csv.writer(f, quotechar='"', quoting=csv.QUOTE_ALL)
csvwriter.writerow(['first_url', 'second_url', 'expected'])
counter = 1
for first_domain, second_domain in product(first_authorities, second_authorities):
for first_path, second_path in product(first_paths, second_paths):
for first_query, second_query in product(first_queries, second_queries):
for first_fragment, second_fragment in product(fragments, fragments):
if not first_path.startswith('/'):
first_path = '/' + first_path
first_url = first_domain + first_path + first_query + first_fragment
if second_domain and not second_path.startswith('/'):
second_path = '/' + second_path
second_url = second_domain + second_path + second_query + second_fragment
if first_url != second_url:
csvwriter.writerow([first_url, second_url, resolveComponents(urljoin(first_url, second_url))])
|
Add script used to generate urls test file.
|
Add script used to generate urls test file.
Thank you @woxcab for the script.
|
Python
|
unlicense
|
zachborboa/php-curl-class,php-curl-class/php-curl-class,zachborboa/php-curl-class,php-curl-class/php-curl-class,zachborboa/php-curl-class,php-curl-class/php-curl-class
|
Add script used to generate urls test file.
Thank you @woxcab for the script.
|
from urllib.parse import urljoin, urlparse
from itertools import product
import csv
import posixpath
def resolveComponents(url):
"""
>>> resolveComponents('http://www.example.com/foo/bar/../../baz/bux/')
'http://www.example.com/baz/bux/'
>>> resolveComponents('http://www.example.com/some/path/../file.ext')
'http://www.example.com/some/file.ext'
"""
parsed = urlparse(url)
new_path = posixpath.normpath(parsed.path)
if parsed.path.endswith('/'):
# Compensate for issue1707768
new_path += '/'
if new_path.startswith('//'):
new_path = new_path[1:]
cleaned = parsed._replace(path=new_path)
return cleaned.geturl()
first_authorities = ['http://example.com@user:pass:7152', 'https://example.com']
second_authorities = ['', 'https://www.example.org', 'http://example.com@user:pass:1111',
'file://example.com', 'file://']
first_paths = ['', '/', '/foobar/bazz', 'foobar/bazz/']
second_paths = ['', '/', '/foo/bar', 'foo/bar/', './foo/../bar', 'foo/./.././bar']
first_queries = ['', '?a=1', '?a=647&b=s564']
second_queries = ['', '?a=sdf', '?a=cvb&b=987']
fragments = ['', '#foo', '#bar']
with open('urls.csv', 'wt') as f:
csvwriter = csv.writer(f, quotechar='"', quoting=csv.QUOTE_ALL)
csvwriter.writerow(['first_url', 'second_url', 'expected'])
counter = 1
for first_domain, second_domain in product(first_authorities, second_authorities):
for first_path, second_path in product(first_paths, second_paths):
for first_query, second_query in product(first_queries, second_queries):
for first_fragment, second_fragment in product(fragments, fragments):
if not first_path.startswith('/'):
first_path = '/' + first_path
first_url = first_domain + first_path + first_query + first_fragment
if second_domain and not second_path.startswith('/'):
second_path = '/' + second_path
second_url = second_domain + second_path + second_query + second_fragment
if first_url != second_url:
csvwriter.writerow([first_url, second_url, resolveComponents(urljoin(first_url, second_url))])
|
<commit_before><commit_msg>Add script used to generate urls test file.
Thank you @woxcab for the script.<commit_after>
|
from urllib.parse import urljoin, urlparse
from itertools import product
import csv
import posixpath
def resolveComponents(url):
"""
>>> resolveComponents('http://www.example.com/foo/bar/../../baz/bux/')
'http://www.example.com/baz/bux/'
>>> resolveComponents('http://www.example.com/some/path/../file.ext')
'http://www.example.com/some/file.ext'
"""
parsed = urlparse(url)
new_path = posixpath.normpath(parsed.path)
if parsed.path.endswith('/'):
# Compensate for issue1707768
new_path += '/'
if new_path.startswith('//'):
new_path = new_path[1:]
cleaned = parsed._replace(path=new_path)
return cleaned.geturl()
first_authorities = ['http://example.com@user:pass:7152', 'https://example.com']
second_authorities = ['', 'https://www.example.org', 'http://example.com@user:pass:1111',
'file://example.com', 'file://']
first_paths = ['', '/', '/foobar/bazz', 'foobar/bazz/']
second_paths = ['', '/', '/foo/bar', 'foo/bar/', './foo/../bar', 'foo/./.././bar']
first_queries = ['', '?a=1', '?a=647&b=s564']
second_queries = ['', '?a=sdf', '?a=cvb&b=987']
fragments = ['', '#foo', '#bar']
with open('urls.csv', 'wt') as f:
csvwriter = csv.writer(f, quotechar='"', quoting=csv.QUOTE_ALL)
csvwriter.writerow(['first_url', 'second_url', 'expected'])
counter = 1
for first_domain, second_domain in product(first_authorities, second_authorities):
for first_path, second_path in product(first_paths, second_paths):
for first_query, second_query in product(first_queries, second_queries):
for first_fragment, second_fragment in product(fragments, fragments):
if not first_path.startswith('/'):
first_path = '/' + first_path
first_url = first_domain + first_path + first_query + first_fragment
if second_domain and not second_path.startswith('/'):
second_path = '/' + second_path
second_url = second_domain + second_path + second_query + second_fragment
if first_url != second_url:
csvwriter.writerow([first_url, second_url, resolveComponents(urljoin(first_url, second_url))])
|
Add script used to generate urls test file.
Thank you @woxcab for the script.from urllib.parse import urljoin, urlparse
from itertools import product
import csv
import posixpath
def resolveComponents(url):
"""
>>> resolveComponents('http://www.example.com/foo/bar/../../baz/bux/')
'http://www.example.com/baz/bux/'
>>> resolveComponents('http://www.example.com/some/path/../file.ext')
'http://www.example.com/some/file.ext'
"""
parsed = urlparse(url)
new_path = posixpath.normpath(parsed.path)
if parsed.path.endswith('/'):
# Compensate for issue1707768
new_path += '/'
if new_path.startswith('//'):
new_path = new_path[1:]
cleaned = parsed._replace(path=new_path)
return cleaned.geturl()
first_authorities = ['http://example.com@user:pass:7152', 'https://example.com']
second_authorities = ['', 'https://www.example.org', 'http://example.com@user:pass:1111',
'file://example.com', 'file://']
first_paths = ['', '/', '/foobar/bazz', 'foobar/bazz/']
second_paths = ['', '/', '/foo/bar', 'foo/bar/', './foo/../bar', 'foo/./.././bar']
first_queries = ['', '?a=1', '?a=647&b=s564']
second_queries = ['', '?a=sdf', '?a=cvb&b=987']
fragments = ['', '#foo', '#bar']
with open('urls.csv', 'wt') as f:
csvwriter = csv.writer(f, quotechar='"', quoting=csv.QUOTE_ALL)
csvwriter.writerow(['first_url', 'second_url', 'expected'])
counter = 1
for first_domain, second_domain in product(first_authorities, second_authorities):
for first_path, second_path in product(first_paths, second_paths):
for first_query, second_query in product(first_queries, second_queries):
for first_fragment, second_fragment in product(fragments, fragments):
if not first_path.startswith('/'):
first_path = '/' + first_path
first_url = first_domain + first_path + first_query + first_fragment
if second_domain and not second_path.startswith('/'):
second_path = '/' + second_path
second_url = second_domain + second_path + second_query + second_fragment
if first_url != second_url:
csvwriter.writerow([first_url, second_url, resolveComponents(urljoin(first_url, second_url))])
|
<commit_before><commit_msg>Add script used to generate urls test file.
Thank you @woxcab for the script.<commit_after>from urllib.parse import urljoin, urlparse
from itertools import product
import csv
import posixpath
def resolveComponents(url):
"""
>>> resolveComponents('http://www.example.com/foo/bar/../../baz/bux/')
'http://www.example.com/baz/bux/'
>>> resolveComponents('http://www.example.com/some/path/../file.ext')
'http://www.example.com/some/file.ext'
"""
parsed = urlparse(url)
new_path = posixpath.normpath(parsed.path)
if parsed.path.endswith('/'):
# Compensate for issue1707768
new_path += '/'
if new_path.startswith('//'):
new_path = new_path[1:]
cleaned = parsed._replace(path=new_path)
return cleaned.geturl()
first_authorities = ['http://example.com@user:pass:7152', 'https://example.com']
second_authorities = ['', 'https://www.example.org', 'http://example.com@user:pass:1111',
'file://example.com', 'file://']
first_paths = ['', '/', '/foobar/bazz', 'foobar/bazz/']
second_paths = ['', '/', '/foo/bar', 'foo/bar/', './foo/../bar', 'foo/./.././bar']
first_queries = ['', '?a=1', '?a=647&b=s564']
second_queries = ['', '?a=sdf', '?a=cvb&b=987']
fragments = ['', '#foo', '#bar']
with open('urls.csv', 'wt') as f:
csvwriter = csv.writer(f, quotechar='"', quoting=csv.QUOTE_ALL)
csvwriter.writerow(['first_url', 'second_url', 'expected'])
counter = 1
for first_domain, second_domain in product(first_authorities, second_authorities):
for first_path, second_path in product(first_paths, second_paths):
for first_query, second_query in product(first_queries, second_queries):
for first_fragment, second_fragment in product(fragments, fragments):
if not first_path.startswith('/'):
first_path = '/' + first_path
first_url = first_domain + first_path + first_query + first_fragment
if second_domain and not second_path.startswith('/'):
second_path = '/' + second_path
second_url = second_domain + second_path + second_query + second_fragment
if first_url != second_url:
csvwriter.writerow([first_url, second_url, resolveComponents(urljoin(first_url, second_url))])
|
|
1124da4ea6c30f0c36854ec938aa9ea60cca73d4
|
djangoappengine/db/expressions.py
|
djangoappengine/db/expressions.py
|
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.expressions import ExpressionNode
OPERATION_MAP = {
ExpressionNode.ADD: lambda x, y: x + y,
ExpressionNode.SUB: lambda x, y: x - y,
ExpressionNode.MUL: lambda x, y: x * y,
ExpressionNode.DIV: lambda x, y: x / y,
ExpressionNode.MOD: lambda x, y: x % y,
ExpressionNode.BITAND: lambda x, y: x & y,
ExpressionNode.BITOR: lambda x, y: x | y,
}
class ExpressionEvaluator(SQLEvaluator):
def __init__(self, expression, query, entity, allow_joins=True):
super(ExpressionEvaluator, self).__init__(expression, query,
allow_joins)
self.entity = entity
##################################################
# Vistor methods for final expression evaluation #
##################################################
def evaluate_node(self, node, qn, connection):
values = []
for child in node.children:
if hasattr(child, 'evaluate'):
value = child.evaluate(self, qn, connection)
else:
value = child
if value is not None:
values.append(value)
return OPERATION_MAP[node.connector](*values)
def evaluate_leaf(self, node, qn, connection):
return self.entity[qn(self.cols[node][1])]
|
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.expressions import ExpressionNode
OPERATION_MAP = {
ExpressionNode.ADD: lambda x, y: x + y,
ExpressionNode.SUB: lambda x, y: x - y,
ExpressionNode.MUL: lambda x, y: x * y,
ExpressionNode.DIV: lambda x, y: x / y,
ExpressionNode.MOD: lambda x, y: x % y,
ExpressionNode.BITAND: lambda x, y: x & y,
ExpressionNode.BITOR: lambda x, y: x | y,
}
class ExpressionEvaluator(SQLEvaluator):
def __init__(self, expression, query, entity, allow_joins=True):
super(ExpressionEvaluator, self).__init__(expression, query,
allow_joins)
self.entity = entity
##################################################
# Vistor methods for final expression evaluation #
##################################################
def evaluate_node(self, node, qn, connection):
values = []
for child in node.children:
if hasattr(child, 'evaluate'):
value = child.evaluate(self, qn, connection)
else:
value = child
if value is not None:
values.append(value)
return OPERATION_MAP[node.connector](*values)
def evaluate_leaf(self, node, qn, connection):
col = None
for n, c in self.cols:
if n is node:
col = c
break
if col is None:
raise ValueError("Given node not found")
return self.entity[qn(col[1])]
|
Fix ExpressionEvalutator for Django 1.5 changes to cols property
|
Fix ExpressionEvalutator for Django 1.5 changes to cols property
|
Python
|
bsd-3-clause
|
django-nonrel/djangoappengine,Implisit/djangoappengine,dwdraju/djangoappengine
|
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.expressions import ExpressionNode
OPERATION_MAP = {
ExpressionNode.ADD: lambda x, y: x + y,
ExpressionNode.SUB: lambda x, y: x - y,
ExpressionNode.MUL: lambda x, y: x * y,
ExpressionNode.DIV: lambda x, y: x / y,
ExpressionNode.MOD: lambda x, y: x % y,
ExpressionNode.BITAND: lambda x, y: x & y,
ExpressionNode.BITOR: lambda x, y: x | y,
}
class ExpressionEvaluator(SQLEvaluator):
def __init__(self, expression, query, entity, allow_joins=True):
super(ExpressionEvaluator, self).__init__(expression, query,
allow_joins)
self.entity = entity
##################################################
# Vistor methods for final expression evaluation #
##################################################
def evaluate_node(self, node, qn, connection):
values = []
for child in node.children:
if hasattr(child, 'evaluate'):
value = child.evaluate(self, qn, connection)
else:
value = child
if value is not None:
values.append(value)
return OPERATION_MAP[node.connector](*values)
def evaluate_leaf(self, node, qn, connection):
return self.entity[qn(self.cols[node][1])]
Fix ExpressionEvalutator for Django 1.5 changes to cols property
|
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.expressions import ExpressionNode
OPERATION_MAP = {
ExpressionNode.ADD: lambda x, y: x + y,
ExpressionNode.SUB: lambda x, y: x - y,
ExpressionNode.MUL: lambda x, y: x * y,
ExpressionNode.DIV: lambda x, y: x / y,
ExpressionNode.MOD: lambda x, y: x % y,
ExpressionNode.BITAND: lambda x, y: x & y,
ExpressionNode.BITOR: lambda x, y: x | y,
}
class ExpressionEvaluator(SQLEvaluator):
def __init__(self, expression, query, entity, allow_joins=True):
super(ExpressionEvaluator, self).__init__(expression, query,
allow_joins)
self.entity = entity
##################################################
# Vistor methods for final expression evaluation #
##################################################
def evaluate_node(self, node, qn, connection):
values = []
for child in node.children:
if hasattr(child, 'evaluate'):
value = child.evaluate(self, qn, connection)
else:
value = child
if value is not None:
values.append(value)
return OPERATION_MAP[node.connector](*values)
def evaluate_leaf(self, node, qn, connection):
col = None
for n, c in self.cols:
if n is node:
col = c
break
if col is None:
raise ValueError("Given node not found")
return self.entity[qn(col[1])]
|
<commit_before>from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.expressions import ExpressionNode
OPERATION_MAP = {
ExpressionNode.ADD: lambda x, y: x + y,
ExpressionNode.SUB: lambda x, y: x - y,
ExpressionNode.MUL: lambda x, y: x * y,
ExpressionNode.DIV: lambda x, y: x / y,
ExpressionNode.MOD: lambda x, y: x % y,
ExpressionNode.BITAND: lambda x, y: x & y,
ExpressionNode.BITOR: lambda x, y: x | y,
}
class ExpressionEvaluator(SQLEvaluator):
def __init__(self, expression, query, entity, allow_joins=True):
super(ExpressionEvaluator, self).__init__(expression, query,
allow_joins)
self.entity = entity
##################################################
# Vistor methods for final expression evaluation #
##################################################
def evaluate_node(self, node, qn, connection):
values = []
for child in node.children:
if hasattr(child, 'evaluate'):
value = child.evaluate(self, qn, connection)
else:
value = child
if value is not None:
values.append(value)
return OPERATION_MAP[node.connector](*values)
def evaluate_leaf(self, node, qn, connection):
return self.entity[qn(self.cols[node][1])]
<commit_msg>Fix ExpressionEvalutator for Django 1.5 changes to cols property<commit_after>
|
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.expressions import ExpressionNode
OPERATION_MAP = {
ExpressionNode.ADD: lambda x, y: x + y,
ExpressionNode.SUB: lambda x, y: x - y,
ExpressionNode.MUL: lambda x, y: x * y,
ExpressionNode.DIV: lambda x, y: x / y,
ExpressionNode.MOD: lambda x, y: x % y,
ExpressionNode.BITAND: lambda x, y: x & y,
ExpressionNode.BITOR: lambda x, y: x | y,
}
class ExpressionEvaluator(SQLEvaluator):
def __init__(self, expression, query, entity, allow_joins=True):
super(ExpressionEvaluator, self).__init__(expression, query,
allow_joins)
self.entity = entity
##################################################
# Vistor methods for final expression evaluation #
##################################################
def evaluate_node(self, node, qn, connection):
values = []
for child in node.children:
if hasattr(child, 'evaluate'):
value = child.evaluate(self, qn, connection)
else:
value = child
if value is not None:
values.append(value)
return OPERATION_MAP[node.connector](*values)
def evaluate_leaf(self, node, qn, connection):
col = None
for n, c in self.cols:
if n is node:
col = c
break
if col is None:
raise ValueError("Given node not found")
return self.entity[qn(col[1])]
|
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.expressions import ExpressionNode
OPERATION_MAP = {
ExpressionNode.ADD: lambda x, y: x + y,
ExpressionNode.SUB: lambda x, y: x - y,
ExpressionNode.MUL: lambda x, y: x * y,
ExpressionNode.DIV: lambda x, y: x / y,
ExpressionNode.MOD: lambda x, y: x % y,
ExpressionNode.BITAND: lambda x, y: x & y,
ExpressionNode.BITOR: lambda x, y: x | y,
}
class ExpressionEvaluator(SQLEvaluator):
def __init__(self, expression, query, entity, allow_joins=True):
super(ExpressionEvaluator, self).__init__(expression, query,
allow_joins)
self.entity = entity
##################################################
# Vistor methods for final expression evaluation #
##################################################
def evaluate_node(self, node, qn, connection):
values = []
for child in node.children:
if hasattr(child, 'evaluate'):
value = child.evaluate(self, qn, connection)
else:
value = child
if value is not None:
values.append(value)
return OPERATION_MAP[node.connector](*values)
def evaluate_leaf(self, node, qn, connection):
return self.entity[qn(self.cols[node][1])]
Fix ExpressionEvalutator for Django 1.5 changes to cols propertyfrom django.db.models.sql.expressions import SQLEvaluator
from django.db.models.expressions import ExpressionNode
OPERATION_MAP = {
ExpressionNode.ADD: lambda x, y: x + y,
ExpressionNode.SUB: lambda x, y: x - y,
ExpressionNode.MUL: lambda x, y: x * y,
ExpressionNode.DIV: lambda x, y: x / y,
ExpressionNode.MOD: lambda x, y: x % y,
ExpressionNode.BITAND: lambda x, y: x & y,
ExpressionNode.BITOR: lambda x, y: x | y,
}
class ExpressionEvaluator(SQLEvaluator):
def __init__(self, expression, query, entity, allow_joins=True):
super(ExpressionEvaluator, self).__init__(expression, query,
allow_joins)
self.entity = entity
##################################################
# Vistor methods for final expression evaluation #
##################################################
def evaluate_node(self, node, qn, connection):
values = []
for child in node.children:
if hasattr(child, 'evaluate'):
value = child.evaluate(self, qn, connection)
else:
value = child
if value is not None:
values.append(value)
return OPERATION_MAP[node.connector](*values)
def evaluate_leaf(self, node, qn, connection):
col = None
for n, c in self.cols:
if n is node:
col = c
break
if col is None:
raise ValueError("Given node not found")
return self.entity[qn(col[1])]
|
<commit_before>from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.expressions import ExpressionNode
OPERATION_MAP = {
ExpressionNode.ADD: lambda x, y: x + y,
ExpressionNode.SUB: lambda x, y: x - y,
ExpressionNode.MUL: lambda x, y: x * y,
ExpressionNode.DIV: lambda x, y: x / y,
ExpressionNode.MOD: lambda x, y: x % y,
ExpressionNode.BITAND: lambda x, y: x & y,
ExpressionNode.BITOR: lambda x, y: x | y,
}
class ExpressionEvaluator(SQLEvaluator):
def __init__(self, expression, query, entity, allow_joins=True):
super(ExpressionEvaluator, self).__init__(expression, query,
allow_joins)
self.entity = entity
##################################################
# Vistor methods for final expression evaluation #
##################################################
def evaluate_node(self, node, qn, connection):
values = []
for child in node.children:
if hasattr(child, 'evaluate'):
value = child.evaluate(self, qn, connection)
else:
value = child
if value is not None:
values.append(value)
return OPERATION_MAP[node.connector](*values)
def evaluate_leaf(self, node, qn, connection):
return self.entity[qn(self.cols[node][1])]
<commit_msg>Fix ExpressionEvalutator for Django 1.5 changes to cols property<commit_after>from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.expressions import ExpressionNode
OPERATION_MAP = {
ExpressionNode.ADD: lambda x, y: x + y,
ExpressionNode.SUB: lambda x, y: x - y,
ExpressionNode.MUL: lambda x, y: x * y,
ExpressionNode.DIV: lambda x, y: x / y,
ExpressionNode.MOD: lambda x, y: x % y,
ExpressionNode.BITAND: lambda x, y: x & y,
ExpressionNode.BITOR: lambda x, y: x | y,
}
class ExpressionEvaluator(SQLEvaluator):
def __init__(self, expression, query, entity, allow_joins=True):
super(ExpressionEvaluator, self).__init__(expression, query,
allow_joins)
self.entity = entity
##################################################
# Vistor methods for final expression evaluation #
##################################################
def evaluate_node(self, node, qn, connection):
values = []
for child in node.children:
if hasattr(child, 'evaluate'):
value = child.evaluate(self, qn, connection)
else:
value = child
if value is not None:
values.append(value)
return OPERATION_MAP[node.connector](*values)
def evaluate_leaf(self, node, qn, connection):
col = None
for n, c in self.cols:
if n is node:
col = c
break
if col is None:
raise ValueError("Given node not found")
return self.entity[qn(col[1])]
|
08834282a5b3aec366d594bbd6d1f866876a7cb1
|
examples/basic_masterqa_test.py
|
examples/basic_masterqa_test.py
|
from seleniumbase import MasterQA
class MasterQATests(MasterQA):
def test_masterqa(self):
self.open("http://xkcd.com/1700/")
self.verify("Do you see a webcomic?")
self.click_link_text('Store')
self.verify("Do you see items for sale?")
self.update_text("input#top-search-input", "poster\n")
self.verify("Do you see posters in the search results?")
|
Add a more simple MasterQA example
|
Add a more simple MasterQA example
|
Python
|
mit
|
possoumous/Watchers,ktp420/SeleniumBase,mdmintz/seleniumspot,ktp420/SeleniumBase,mdmintz/SeleniumBase,ktp420/SeleniumBase,possoumous/Watchers,mdmintz/seleniumspot,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,possoumous/Watchers,possoumous/Watchers,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,ktp420/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase
|
Add a more simple MasterQA example
|
from seleniumbase import MasterQA
class MasterQATests(MasterQA):
def test_masterqa(self):
self.open("http://xkcd.com/1700/")
self.verify("Do you see a webcomic?")
self.click_link_text('Store')
self.verify("Do you see items for sale?")
self.update_text("input#top-search-input", "poster\n")
self.verify("Do you see posters in the search results?")
|
<commit_before><commit_msg>Add a more simple MasterQA example<commit_after>
|
from seleniumbase import MasterQA
class MasterQATests(MasterQA):
def test_masterqa(self):
self.open("http://xkcd.com/1700/")
self.verify("Do you see a webcomic?")
self.click_link_text('Store')
self.verify("Do you see items for sale?")
self.update_text("input#top-search-input", "poster\n")
self.verify("Do you see posters in the search results?")
|
Add a more simple MasterQA examplefrom seleniumbase import MasterQA
class MasterQATests(MasterQA):
def test_masterqa(self):
self.open("http://xkcd.com/1700/")
self.verify("Do you see a webcomic?")
self.click_link_text('Store')
self.verify("Do you see items for sale?")
self.update_text("input#top-search-input", "poster\n")
self.verify("Do you see posters in the search results?")
|
<commit_before><commit_msg>Add a more simple MasterQA example<commit_after>from seleniumbase import MasterQA
class MasterQATests(MasterQA):
def test_masterqa(self):
self.open("http://xkcd.com/1700/")
self.verify("Do you see a webcomic?")
self.click_link_text('Store')
self.verify("Do you see items for sale?")
self.update_text("input#top-search-input", "poster\n")
self.verify("Do you see posters in the search results?")
|
|
43322576a7a74f668a86cc0c4601c719c4fb2646
|
candidates/tests/test_autocomplete.py
|
candidates/tests/test_autocomplete.py
|
from mock import patch, Mock
from django_webtest import WebTest
@patch('candidates.views.PopIt')
@patch('candidates.views.requests')
class TestAutocompletePartyView(WebTest):
def test_autocomplete(self, mock_requests, mock_popit):
fake_search_result = {
"total": 9,
"page": 1,
"per_page": 30,
"has_more": False,
"result": [
{"name": "Socialist Labour Party"},
{"name": "Labour Party"},
{"name": "Democratic Labour Party"},
{"name": "Labour and Co-operative"},
{"name": "The Labour Party"},
{"name": "Labour Party of Northern Ireland"},
{"name": "SDLP (Social Democratic & Labour Party)"},
{"name": "The Individuals Labour and Tory (TILT)"},
{"name": "Liverpool Labour Community Party"},
],
}
mock_requests.get.return_value = Mock(**{
'json.return_value': fake_search_result
})
response = self.app.get('/autocomplete/party?term=lab')
self.assertEqual(
response.json,
[
"lab",
"Labour Party",
"Socialist Labour Party",
"SDLP (Social Democratic & Labour Party)",
"Democratic Labour Party",
"The Labour Party",
"Labour and Co-operative",
"Labour Party of Northern Ireland",
"The Individuals Labour and Tory (TILT)",
"Liverpool Labour Community Party"
]
)
|
Add a test for the party autocomplete view
|
Add a test for the party autocomplete view
|
Python
|
agpl-3.0
|
mhl/yournextmp-popit,YoQuieroSaber/yournextrepresentative,mysociety/yournextrepresentative,datamade/yournextmp-popit,datamade/yournextmp-popit,DemocracyClub/yournextrepresentative,neavouli/yournextrepresentative,mhl/yournextmp-popit,openstate/yournextrepresentative,YoQuieroSaber/yournextrepresentative,mysociety/yournextrepresentative,neavouli/yournextrepresentative,openstate/yournextrepresentative,mysociety/yournextmp-popit,datamade/yournextmp-popit,mysociety/yournextrepresentative,neavouli/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextmp-popit,mysociety/yournextmp-popit,neavouli/yournextrepresentative,datamade/yournextmp-popit,YoQuieroSaber/yournextrepresentative,mhl/yournextmp-popit,openstate/yournextrepresentative,datamade/yournextmp-popit,DemocracyClub/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextrepresentative,YoQuieroSaber/yournextrepresentative,openstate/yournextrepresentative,DemocracyClub/yournextrepresentative,openstate/yournextrepresentative,YoQuieroSaber/yournextrepresentative,neavouli/yournextrepresentative
|
Add a test for the party autocomplete view
|
from mock import patch, Mock
from django_webtest import WebTest
@patch('candidates.views.PopIt')
@patch('candidates.views.requests')
class TestAutocompletePartyView(WebTest):
def test_autocomplete(self, mock_requests, mock_popit):
fake_search_result = {
"total": 9,
"page": 1,
"per_page": 30,
"has_more": False,
"result": [
{"name": "Socialist Labour Party"},
{"name": "Labour Party"},
{"name": "Democratic Labour Party"},
{"name": "Labour and Co-operative"},
{"name": "The Labour Party"},
{"name": "Labour Party of Northern Ireland"},
{"name": "SDLP (Social Democratic & Labour Party)"},
{"name": "The Individuals Labour and Tory (TILT)"},
{"name": "Liverpool Labour Community Party"},
],
}
mock_requests.get.return_value = Mock(**{
'json.return_value': fake_search_result
})
response = self.app.get('/autocomplete/party?term=lab')
self.assertEqual(
response.json,
[
"lab",
"Labour Party",
"Socialist Labour Party",
"SDLP (Social Democratic & Labour Party)",
"Democratic Labour Party",
"The Labour Party",
"Labour and Co-operative",
"Labour Party of Northern Ireland",
"The Individuals Labour and Tory (TILT)",
"Liverpool Labour Community Party"
]
)
|
<commit_before><commit_msg>Add a test for the party autocomplete view<commit_after>
|
from mock import patch, Mock
from django_webtest import WebTest
@patch('candidates.views.PopIt')
@patch('candidates.views.requests')
class TestAutocompletePartyView(WebTest):
def test_autocomplete(self, mock_requests, mock_popit):
fake_search_result = {
"total": 9,
"page": 1,
"per_page": 30,
"has_more": False,
"result": [
{"name": "Socialist Labour Party"},
{"name": "Labour Party"},
{"name": "Democratic Labour Party"},
{"name": "Labour and Co-operative"},
{"name": "The Labour Party"},
{"name": "Labour Party of Northern Ireland"},
{"name": "SDLP (Social Democratic & Labour Party)"},
{"name": "The Individuals Labour and Tory (TILT)"},
{"name": "Liverpool Labour Community Party"},
],
}
mock_requests.get.return_value = Mock(**{
'json.return_value': fake_search_result
})
response = self.app.get('/autocomplete/party?term=lab')
self.assertEqual(
response.json,
[
"lab",
"Labour Party",
"Socialist Labour Party",
"SDLP (Social Democratic & Labour Party)",
"Democratic Labour Party",
"The Labour Party",
"Labour and Co-operative",
"Labour Party of Northern Ireland",
"The Individuals Labour and Tory (TILT)",
"Liverpool Labour Community Party"
]
)
|
Add a test for the party autocomplete viewfrom mock import patch, Mock
from django_webtest import WebTest
@patch('candidates.views.PopIt')
@patch('candidates.views.requests')
class TestAutocompletePartyView(WebTest):
def test_autocomplete(self, mock_requests, mock_popit):
fake_search_result = {
"total": 9,
"page": 1,
"per_page": 30,
"has_more": False,
"result": [
{"name": "Socialist Labour Party"},
{"name": "Labour Party"},
{"name": "Democratic Labour Party"},
{"name": "Labour and Co-operative"},
{"name": "The Labour Party"},
{"name": "Labour Party of Northern Ireland"},
{"name": "SDLP (Social Democratic & Labour Party)"},
{"name": "The Individuals Labour and Tory (TILT)"},
{"name": "Liverpool Labour Community Party"},
],
}
mock_requests.get.return_value = Mock(**{
'json.return_value': fake_search_result
})
response = self.app.get('/autocomplete/party?term=lab')
self.assertEqual(
response.json,
[
"lab",
"Labour Party",
"Socialist Labour Party",
"SDLP (Social Democratic & Labour Party)",
"Democratic Labour Party",
"The Labour Party",
"Labour and Co-operative",
"Labour Party of Northern Ireland",
"The Individuals Labour and Tory (TILT)",
"Liverpool Labour Community Party"
]
)
|
<commit_before><commit_msg>Add a test for the party autocomplete view<commit_after>from mock import patch, Mock
from django_webtest import WebTest
@patch('candidates.views.PopIt')
@patch('candidates.views.requests')
class TestAutocompletePartyView(WebTest):
def test_autocomplete(self, mock_requests, mock_popit):
fake_search_result = {
"total": 9,
"page": 1,
"per_page": 30,
"has_more": False,
"result": [
{"name": "Socialist Labour Party"},
{"name": "Labour Party"},
{"name": "Democratic Labour Party"},
{"name": "Labour and Co-operative"},
{"name": "The Labour Party"},
{"name": "Labour Party of Northern Ireland"},
{"name": "SDLP (Social Democratic & Labour Party)"},
{"name": "The Individuals Labour and Tory (TILT)"},
{"name": "Liverpool Labour Community Party"},
],
}
mock_requests.get.return_value = Mock(**{
'json.return_value': fake_search_result
})
response = self.app.get('/autocomplete/party?term=lab')
self.assertEqual(
response.json,
[
"lab",
"Labour Party",
"Socialist Labour Party",
"SDLP (Social Democratic & Labour Party)",
"Democratic Labour Party",
"The Labour Party",
"Labour and Co-operative",
"Labour Party of Northern Ireland",
"The Individuals Labour and Tory (TILT)",
"Liverpool Labour Community Party"
]
)
|
|
46f3909e6f50d7832db1331a74916e47d656d459
|
examples/polymorphic_inheritance.py
|
examples/polymorphic_inheritance.py
|
"""Example of using polymorphic inheritance features to keep multiple related
models in a single collection"""
import uuid
from modularodm import InheritableStoredObject, fields
from modularodm.storage import EphemeralStorage
class Car(InheritableStoredObject):
_id = fields.StringField(primary=True, default=uuid.uuid4)
brand = None
doors = None
class Ford(Car):
brand = "Ford"
class FordFocus(Ford):
doors = 4
storage = EphemeralStorage()
for cls in (Car, Ford, FordFocus):
cls.set_storage(storage)
generic_car = Car()
generic_car.save()
generic_ford = Ford()
generic_ford.save()
ford_focus = FordFocus()
ford_focus.save()
# All three cars have been saved
assert Car.find().count() == 3
# Only two of the cars were Fords
assert Ford.find().count() == 2
# Only one was a Focus
assert FordFocus.find().count() == 1
# Each item returned is an instance of its most specific type
for car in Car.find():
print(car.__class__)
|
Add example for polymorphic inheritance
|
Add example for polymorphic inheritance
|
Python
|
apache-2.0
|
CenterForOpenScience/modular-odm,sloria/modular-odm,icereval/modular-odm,chrisseto/modular-odm
|
Add example for polymorphic inheritance
|
"""Example of using polymorphic inheritance features to keep multiple related
models in a single collection"""
import uuid
from modularodm import InheritableStoredObject, fields
from modularodm.storage import EphemeralStorage
class Car(InheritableStoredObject):
_id = fields.StringField(primary=True, default=uuid.uuid4)
brand = None
doors = None
class Ford(Car):
brand = "Ford"
class FordFocus(Ford):
doors = 4
storage = EphemeralStorage()
for cls in (Car, Ford, FordFocus):
cls.set_storage(storage)
generic_car = Car()
generic_car.save()
generic_ford = Ford()
generic_ford.save()
ford_focus = FordFocus()
ford_focus.save()
# All three cars have been saved
assert Car.find().count() == 3
# Only two of the cars were Fords
assert Ford.find().count() == 2
# Only one was a Focus
assert FordFocus.find().count() == 1
# Each item returned is an instance of its most specific type
for car in Car.find():
print(car.__class__)
|
<commit_before><commit_msg>Add example for polymorphic inheritance<commit_after>
|
"""Example of using polymorphic inheritance features to keep multiple related
models in a single collection"""
import uuid
from modularodm import InheritableStoredObject, fields
from modularodm.storage import EphemeralStorage
class Car(InheritableStoredObject):
_id = fields.StringField(primary=True, default=uuid.uuid4)
brand = None
doors = None
class Ford(Car):
brand = "Ford"
class FordFocus(Ford):
doors = 4
storage = EphemeralStorage()
for cls in (Car, Ford, FordFocus):
cls.set_storage(storage)
generic_car = Car()
generic_car.save()
generic_ford = Ford()
generic_ford.save()
ford_focus = FordFocus()
ford_focus.save()
# All three cars have been saved
assert Car.find().count() == 3
# Only two of the cars were Fords
assert Ford.find().count() == 2
# Only one was a Focus
assert FordFocus.find().count() == 1
# Each item returned is an instance of its most specific type
for car in Car.find():
print(car.__class__)
|
Add example for polymorphic inheritance"""Example of using polymorphic inheritance features to keep multiple related
models in a single collection"""
import uuid
from modularodm import InheritableStoredObject, fields
from modularodm.storage import EphemeralStorage
class Car(InheritableStoredObject):
_id = fields.StringField(primary=True, default=uuid.uuid4)
brand = None
doors = None
class Ford(Car):
brand = "Ford"
class FordFocus(Ford):
doors = 4
storage = EphemeralStorage()
for cls in (Car, Ford, FordFocus):
cls.set_storage(storage)
generic_car = Car()
generic_car.save()
generic_ford = Ford()
generic_ford.save()
ford_focus = FordFocus()
ford_focus.save()
# All three cars have been saved
assert Car.find().count() == 3
# Only two of the cars were Fords
assert Ford.find().count() == 2
# Only one was a Focus
assert FordFocus.find().count() == 1
# Each item returned is an instance of its most specific type
for car in Car.find():
print(car.__class__)
|
<commit_before><commit_msg>Add example for polymorphic inheritance<commit_after>"""Example of using polymorphic inheritance features to keep multiple related
models in a single collection"""
import uuid
from modularodm import InheritableStoredObject, fields
from modularodm.storage import EphemeralStorage
class Car(InheritableStoredObject):
_id = fields.StringField(primary=True, default=uuid.uuid4)
brand = None
doors = None
class Ford(Car):
brand = "Ford"
class FordFocus(Ford):
doors = 4
storage = EphemeralStorage()
for cls in (Car, Ford, FordFocus):
cls.set_storage(storage)
generic_car = Car()
generic_car.save()
generic_ford = Ford()
generic_ford.save()
ford_focus = FordFocus()
ford_focus.save()
# All three cars have been saved
assert Car.find().count() == 3
# Only two of the cars were Fords
assert Ford.find().count() == 2
# Only one was a Focus
assert FordFocus.find().count() == 1
# Each item returned is an instance of its most specific type
for car in Car.find():
print(car.__class__)
|
|
3b8326b8f0fc2ab9fec2bc25f1e05ad3ca4928bb
|
indra/tests/test_bioregistry.py
|
indra/tests/test_bioregistry.py
|
from indra.databases import bioregistry
def test_get_ns_from_bioregistry():
assert bioregistry.get_ns_from_bioregistry('xxxx') is None
assert bioregistry.get_ns_from_bioregistry('noncodev4.rna') == 'NONCODE'
assert bioregistry.get_ns_from_bioregistry('chebi') == 'CHEBI'
def test_get_ns_id_from_bioregistry():
assert bioregistry.get_ns_id_from_bioregistry('xxxx', 'xxxx') == \
(None, None)
assert bioregistry.get_ns_id_from_bioregistry('chebi', '3696') == \
('CHEBI', 'CHEBI:3696')
assert bioregistry.get_ns_id_from_bioregistry('hgnc', '1097') == \
('HGNC', '1097')
def test_get_ns_id_from_bioregistry_curie():
assert bioregistry.get_ns_id_from_bioregistry_curie('xxxx:xxxx') == \
(None, None)
assert bioregistry.get_ns_id_from_bioregistry_curie('chebi:3696') == \
('CHEBI', 'CHEBI:3696')
assert bioregistry.get_ns_id_from_bioregistry_curie('hgnc:1097') == \
('HGNC', '1097')
def test_get_bioregistry_prefix():
assert bioregistry.get_bioregistry_prefix('PUBCHEM') == 'pubchem.compound'
assert bioregistry.get_bioregistry_prefix('NXPFA') == 'nextprot.family'
assert bioregistry.get_bioregistry_prefix('HGNC') == 'hgnc'
def test_get_bioregistry_curie():
assert bioregistry.get_bioregistry_curie('PUBCHEM', '100101') == \
'pubchem.compound:100101'
assert bioregistry.get_bioregistry_curie('NXPFA', '01405') == \
'nextprot.family:01405'
assert bioregistry.get_bioregistry_curie('HGNC', '1097') == 'hgnc:1097'
def test_get_bioregistry_url():
assert bioregistry.get_bioregistry_url('PUBCHEM', '100101') == \
'https://bioregistry.io/pubchem.compound:100101'
|
Add tests for Bioregistry module
|
Add tests for Bioregistry module
|
Python
|
bsd-2-clause
|
johnbachman/indra,bgyori/indra,sorgerlab/indra,bgyori/indra,bgyori/indra,johnbachman/indra,sorgerlab/indra,johnbachman/indra,sorgerlab/indra
|
Add tests for Bioregistry module
|
from indra.databases import bioregistry
def test_get_ns_from_bioregistry():
assert bioregistry.get_ns_from_bioregistry('xxxx') is None
assert bioregistry.get_ns_from_bioregistry('noncodev4.rna') == 'NONCODE'
assert bioregistry.get_ns_from_bioregistry('chebi') == 'CHEBI'
def test_get_ns_id_from_bioregistry():
assert bioregistry.get_ns_id_from_bioregistry('xxxx', 'xxxx') == \
(None, None)
assert bioregistry.get_ns_id_from_bioregistry('chebi', '3696') == \
('CHEBI', 'CHEBI:3696')
assert bioregistry.get_ns_id_from_bioregistry('hgnc', '1097') == \
('HGNC', '1097')
def test_get_ns_id_from_bioregistry_curie():
assert bioregistry.get_ns_id_from_bioregistry_curie('xxxx:xxxx') == \
(None, None)
assert bioregistry.get_ns_id_from_bioregistry_curie('chebi:3696') == \
('CHEBI', 'CHEBI:3696')
assert bioregistry.get_ns_id_from_bioregistry_curie('hgnc:1097') == \
('HGNC', '1097')
def test_get_bioregistry_prefix():
assert bioregistry.get_bioregistry_prefix('PUBCHEM') == 'pubchem.compound'
assert bioregistry.get_bioregistry_prefix('NXPFA') == 'nextprot.family'
assert bioregistry.get_bioregistry_prefix('HGNC') == 'hgnc'
def test_get_bioregistry_curie():
assert bioregistry.get_bioregistry_curie('PUBCHEM', '100101') == \
'pubchem.compound:100101'
assert bioregistry.get_bioregistry_curie('NXPFA', '01405') == \
'nextprot.family:01405'
assert bioregistry.get_bioregistry_curie('HGNC', '1097') == 'hgnc:1097'
def test_get_bioregistry_url():
assert bioregistry.get_bioregistry_url('PUBCHEM', '100101') == \
'https://bioregistry.io/pubchem.compound:100101'
|
<commit_before><commit_msg>Add tests for Bioregistry module<commit_after>
|
from indra.databases import bioregistry
def test_get_ns_from_bioregistry():
assert bioregistry.get_ns_from_bioregistry('xxxx') is None
assert bioregistry.get_ns_from_bioregistry('noncodev4.rna') == 'NONCODE'
assert bioregistry.get_ns_from_bioregistry('chebi') == 'CHEBI'
def test_get_ns_id_from_bioregistry():
assert bioregistry.get_ns_id_from_bioregistry('xxxx', 'xxxx') == \
(None, None)
assert bioregistry.get_ns_id_from_bioregistry('chebi', '3696') == \
('CHEBI', 'CHEBI:3696')
assert bioregistry.get_ns_id_from_bioregistry('hgnc', '1097') == \
('HGNC', '1097')
def test_get_ns_id_from_bioregistry_curie():
assert bioregistry.get_ns_id_from_bioregistry_curie('xxxx:xxxx') == \
(None, None)
assert bioregistry.get_ns_id_from_bioregistry_curie('chebi:3696') == \
('CHEBI', 'CHEBI:3696')
assert bioregistry.get_ns_id_from_bioregistry_curie('hgnc:1097') == \
('HGNC', '1097')
def test_get_bioregistry_prefix():
assert bioregistry.get_bioregistry_prefix('PUBCHEM') == 'pubchem.compound'
assert bioregistry.get_bioregistry_prefix('NXPFA') == 'nextprot.family'
assert bioregistry.get_bioregistry_prefix('HGNC') == 'hgnc'
def test_get_bioregistry_curie():
assert bioregistry.get_bioregistry_curie('PUBCHEM', '100101') == \
'pubchem.compound:100101'
assert bioregistry.get_bioregistry_curie('NXPFA', '01405') == \
'nextprot.family:01405'
assert bioregistry.get_bioregistry_curie('HGNC', '1097') == 'hgnc:1097'
def test_get_bioregistry_url():
assert bioregistry.get_bioregistry_url('PUBCHEM', '100101') == \
'https://bioregistry.io/pubchem.compound:100101'
|
Add tests for Bioregistry modulefrom indra.databases import bioregistry
def test_get_ns_from_bioregistry():
assert bioregistry.get_ns_from_bioregistry('xxxx') is None
assert bioregistry.get_ns_from_bioregistry('noncodev4.rna') == 'NONCODE'
assert bioregistry.get_ns_from_bioregistry('chebi') == 'CHEBI'
def test_get_ns_id_from_bioregistry():
assert bioregistry.get_ns_id_from_bioregistry('xxxx', 'xxxx') == \
(None, None)
assert bioregistry.get_ns_id_from_bioregistry('chebi', '3696') == \
('CHEBI', 'CHEBI:3696')
assert bioregistry.get_ns_id_from_bioregistry('hgnc', '1097') == \
('HGNC', '1097')
def test_get_ns_id_from_bioregistry_curie():
assert bioregistry.get_ns_id_from_bioregistry_curie('xxxx:xxxx') == \
(None, None)
assert bioregistry.get_ns_id_from_bioregistry_curie('chebi:3696') == \
('CHEBI', 'CHEBI:3696')
assert bioregistry.get_ns_id_from_bioregistry_curie('hgnc:1097') == \
('HGNC', '1097')
def test_get_bioregistry_prefix():
assert bioregistry.get_bioregistry_prefix('PUBCHEM') == 'pubchem.compound'
assert bioregistry.get_bioregistry_prefix('NXPFA') == 'nextprot.family'
assert bioregistry.get_bioregistry_prefix('HGNC') == 'hgnc'
def test_get_bioregistry_curie():
assert bioregistry.get_bioregistry_curie('PUBCHEM', '100101') == \
'pubchem.compound:100101'
assert bioregistry.get_bioregistry_curie('NXPFA', '01405') == \
'nextprot.family:01405'
assert bioregistry.get_bioregistry_curie('HGNC', '1097') == 'hgnc:1097'
def test_get_bioregistry_url():
assert bioregistry.get_bioregistry_url('PUBCHEM', '100101') == \
'https://bioregistry.io/pubchem.compound:100101'
|
<commit_before><commit_msg>Add tests for Bioregistry module<commit_after>from indra.databases import bioregistry
def test_get_ns_from_bioregistry():
assert bioregistry.get_ns_from_bioregistry('xxxx') is None
assert bioregistry.get_ns_from_bioregistry('noncodev4.rna') == 'NONCODE'
assert bioregistry.get_ns_from_bioregistry('chebi') == 'CHEBI'
def test_get_ns_id_from_bioregistry():
assert bioregistry.get_ns_id_from_bioregistry('xxxx', 'xxxx') == \
(None, None)
assert bioregistry.get_ns_id_from_bioregistry('chebi', '3696') == \
('CHEBI', 'CHEBI:3696')
assert bioregistry.get_ns_id_from_bioregistry('hgnc', '1097') == \
('HGNC', '1097')
def test_get_ns_id_from_bioregistry_curie():
assert bioregistry.get_ns_id_from_bioregistry_curie('xxxx:xxxx') == \
(None, None)
assert bioregistry.get_ns_id_from_bioregistry_curie('chebi:3696') == \
('CHEBI', 'CHEBI:3696')
assert bioregistry.get_ns_id_from_bioregistry_curie('hgnc:1097') == \
('HGNC', '1097')
def test_get_bioregistry_prefix():
assert bioregistry.get_bioregistry_prefix('PUBCHEM') == 'pubchem.compound'
assert bioregistry.get_bioregistry_prefix('NXPFA') == 'nextprot.family'
assert bioregistry.get_bioregistry_prefix('HGNC') == 'hgnc'
def test_get_bioregistry_curie():
assert bioregistry.get_bioregistry_curie('PUBCHEM', '100101') == \
'pubchem.compound:100101'
assert bioregistry.get_bioregistry_curie('NXPFA', '01405') == \
'nextprot.family:01405'
assert bioregistry.get_bioregistry_curie('HGNC', '1097') == 'hgnc:1097'
def test_get_bioregistry_url():
assert bioregistry.get_bioregistry_url('PUBCHEM', '100101') == \
'https://bioregistry.io/pubchem.compound:100101'
|
|
1e6eab50744e77c31918929c8ff825a59b41d292
|
frigg/builds/migrations/0008_auto_20150216_0959.py
|
frigg/builds/migrations/0008_auto_20150216_0959.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('builds', '0007_build_message'),
]
operations = [
migrations.AlterModelOptions(
name='project',
options={'ordering': ['owner', 'name']},
),
migrations.AlterField(
model_name='project',
name='members',
field=models.ManyToManyField(blank=True, related_name='projects', null=True, to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
Add migration for ordering of projects
|
Add migration for ordering of projects
|
Python
|
mit
|
frigg/frigg-hq,frigg/frigg-hq,frigg/frigg-hq
|
Add migration for ordering of projects
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('builds', '0007_build_message'),
]
operations = [
migrations.AlterModelOptions(
name='project',
options={'ordering': ['owner', 'name']},
),
migrations.AlterField(
model_name='project',
name='members',
field=models.ManyToManyField(blank=True, related_name='projects', null=True, to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration for ordering of projects<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('builds', '0007_build_message'),
]
operations = [
migrations.AlterModelOptions(
name='project',
options={'ordering': ['owner', 'name']},
),
migrations.AlterField(
model_name='project',
name='members',
field=models.ManyToManyField(blank=True, related_name='projects', null=True, to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
Add migration for ordering of projects# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('builds', '0007_build_message'),
]
operations = [
migrations.AlterModelOptions(
name='project',
options={'ordering': ['owner', 'name']},
),
migrations.AlterField(
model_name='project',
name='members',
field=models.ManyToManyField(blank=True, related_name='projects', null=True, to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration for ordering of projects<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('builds', '0007_build_message'),
]
operations = [
migrations.AlterModelOptions(
name='project',
options={'ordering': ['owner', 'name']},
),
migrations.AlterField(
model_name='project',
name='members',
field=models.ManyToManyField(blank=True, related_name='projects', null=True, to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
|
e7ced56f3776d78920caa28b6ced3e74b0738de9
|
hardware/explorer_phat/has_explorer_phat_or_hat.py
|
hardware/explorer_phat/has_explorer_phat_or_hat.py
|
#!/usr/bin/env python
# coding: Latin-1
from explorerhat import motor
# If ImportError raised, the library is not installed
# If python exits with the following, then the device is not connected:
# Warning, could not find Analog or Touch...
# Please check your i2c settings!
# On exit:
# $? is 0 if library and device are available.
# $? is 1 otherwise.
|
Add a script to tell if an Explorer PHAT or HAT is available
|
Add a script to tell if an Explorer PHAT or HAT is available
|
Python
|
mit
|
claremacrae/raspi_code,claremacrae/raspi_code,claremacrae/raspi_code
|
Add a script to tell if an Explorer PHAT or HAT is available
|
#!/usr/bin/env python
# coding: Latin-1
from explorerhat import motor
# If ImportError raised, the library is not installed
# If python exits with the following, then the device is not connected:
# Warning, could not find Analog or Touch...
# Please check your i2c settings!
# On exit:
# $? is 0 if library and device are available.
# $? is 1 otherwise.
|
<commit_before><commit_msg>Add a script to tell if an Explorer PHAT or HAT is available<commit_after>
|
#!/usr/bin/env python
# coding: Latin-1
from explorerhat import motor
# If ImportError raised, the library is not installed
# If python exits with the following, then the device is not connected:
# Warning, could not find Analog or Touch...
# Please check your i2c settings!
# On exit:
# $? is 0 if library and device are available.
# $? is 1 otherwise.
|
Add a script to tell if an Explorer PHAT or HAT is available#!/usr/bin/env python
# coding: Latin-1
from explorerhat import motor
# If ImportError raised, the library is not installed
# If python exits with the following, then the device is not connected:
# Warning, could not find Analog or Touch...
# Please check your i2c settings!
# On exit:
# $? is 0 if library and device are available.
# $? is 1 otherwise.
|
<commit_before><commit_msg>Add a script to tell if an Explorer PHAT or HAT is available<commit_after>#!/usr/bin/env python
# coding: Latin-1
from explorerhat import motor
# If ImportError raised, the library is not installed
# If python exits with the following, then the device is not connected:
# Warning, could not find Analog or Touch...
# Please check your i2c settings!
# On exit:
# $? is 0 if library and device are available.
# $? is 1 otherwise.
|
|
6ab078562b946ef87a8a4262ff29be25fbf23e40
|
src/test/test_ipfgraph.py
|
src/test/test_ipfgraph.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os, sys
cmd_folder, f = os.path.split(os.path.dirname(os.path.abspath(__file__)))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import ipf.ipf
import ipf.ipfblock.rgb2gray
import cv
class TestIPFGraph(unittest.TestCase):
def setUp(self):
self.ipf_graph = ipf.ipf.IPFGraph()
block = ipf.ipfblock.rgb2gray.RGB2Gray()
self.test_image = cv.LoadImage("test.png")
self.block.input_ports["input_image"].pass_value(self.test_image)
def tearDown(self):
pass
def testName(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Add test module for IPFGraph
|
Add test module for IPFGraph
|
Python
|
lgpl-2.1
|
anton-golubkov/Garland,anton-golubkov/Garland
|
Add test module for IPFGraph
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os, sys
cmd_folder, f = os.path.split(os.path.dirname(os.path.abspath(__file__)))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import ipf.ipf
import ipf.ipfblock.rgb2gray
import cv
class TestIPFGraph(unittest.TestCase):
def setUp(self):
self.ipf_graph = ipf.ipf.IPFGraph()
block = ipf.ipfblock.rgb2gray.RGB2Gray()
self.test_image = cv.LoadImage("test.png")
self.block.input_ports["input_image"].pass_value(self.test_image)
def tearDown(self):
pass
def testName(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
<commit_before><commit_msg>Add test module for IPFGraph<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os, sys
cmd_folder, f = os.path.split(os.path.dirname(os.path.abspath(__file__)))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import ipf.ipf
import ipf.ipfblock.rgb2gray
import cv
class TestIPFGraph(unittest.TestCase):
def setUp(self):
self.ipf_graph = ipf.ipf.IPFGraph()
block = ipf.ipfblock.rgb2gray.RGB2Gray()
self.test_image = cv.LoadImage("test.png")
self.block.input_ports["input_image"].pass_value(self.test_image)
def tearDown(self):
pass
def testName(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Add test module for IPFGraph#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os, sys
cmd_folder, f = os.path.split(os.path.dirname(os.path.abspath(__file__)))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import ipf.ipf
import ipf.ipfblock.rgb2gray
import cv
class TestIPFGraph(unittest.TestCase):
def setUp(self):
self.ipf_graph = ipf.ipf.IPFGraph()
block = ipf.ipfblock.rgb2gray.RGB2Gray()
self.test_image = cv.LoadImage("test.png")
self.block.input_ports["input_image"].pass_value(self.test_image)
def tearDown(self):
pass
def testName(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
<commit_before><commit_msg>Add test module for IPFGraph<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os, sys
cmd_folder, f = os.path.split(os.path.dirname(os.path.abspath(__file__)))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import ipf.ipf
import ipf.ipfblock.rgb2gray
import cv
class TestIPFGraph(unittest.TestCase):
def setUp(self):
self.ipf_graph = ipf.ipf.IPFGraph()
block = ipf.ipfblock.rgb2gray.RGB2Gray()
self.test_image = cv.LoadImage("test.png")
self.block.input_ports["input_image"].pass_value(self.test_image)
def tearDown(self):
pass
def testName(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
|
062a7beae15fbea11286b4c3266eae16ceb46a3d
|
tests/logic/email_test.py
|
tests/logic/email_test.py
|
# -*- coding: utf-8 -*-
import mock
import unittest
import logic.email
class EmailTest(unittest.TestCase):
"""We really just want to test that configuration is honored here."""
sender = 'test@example.com'
recipient = 'test@example.com'
subject = 'test subject'
html = '<p>hello test</p>'
text = 'hello test'
@mock.patch('logic.email.EMAIL_BACKENDS')
@mock.patch('logic.email.config')
def test_send_email_appengine(self, mock_config, mock_backends):
mock_config.EMAIL_BACKEND = 'appengine'
mock_backends['appengine'] = mock.Mock()
logic.email.send_email(self.sender, self.recipient, self.subject,
self.html, self.text)
mock_backends['appengine'].assert_called_once_with(
self.sender, self.recipient, self.subject, self.html, self.text
)
@mock.patch('logic.email.EMAIL_BACKENDS')
@mock.patch('logic.email.config')
def test_send_email_sendgrid(self, mock_config, mock_backends):
mock_config.EMAIL_BACKEND = 'sendgrid'
mock_backends['sendgrid'] = mock.Mock()
logic.email.send_email(self.sender, self.recipient, self.subject,
self.html, self.text)
mock_backends['sendgrid'].assert_called_once_with(
self.sender, self.recipient, self.subject, self.html, self.text
)
|
Add test for email logic (just config)
|
Add test for email logic (just config)
|
Python
|
mit
|
Yelp/love,Yelp/love,Yelp/love
|
Add test for email logic (just config)
|
# -*- coding: utf-8 -*-
import mock
import unittest
import logic.email
class EmailTest(unittest.TestCase):
"""We really just want to test that configuration is honored here."""
sender = 'test@example.com'
recipient = 'test@example.com'
subject = 'test subject'
html = '<p>hello test</p>'
text = 'hello test'
@mock.patch('logic.email.EMAIL_BACKENDS')
@mock.patch('logic.email.config')
def test_send_email_appengine(self, mock_config, mock_backends):
mock_config.EMAIL_BACKEND = 'appengine'
mock_backends['appengine'] = mock.Mock()
logic.email.send_email(self.sender, self.recipient, self.subject,
self.html, self.text)
mock_backends['appengine'].assert_called_once_with(
self.sender, self.recipient, self.subject, self.html, self.text
)
@mock.patch('logic.email.EMAIL_BACKENDS')
@mock.patch('logic.email.config')
def test_send_email_sendgrid(self, mock_config, mock_backends):
mock_config.EMAIL_BACKEND = 'sendgrid'
mock_backends['sendgrid'] = mock.Mock()
logic.email.send_email(self.sender, self.recipient, self.subject,
self.html, self.text)
mock_backends['sendgrid'].assert_called_once_with(
self.sender, self.recipient, self.subject, self.html, self.text
)
|
<commit_before><commit_msg>Add test for email logic (just config)<commit_after>
|
# -*- coding: utf-8 -*-
import mock
import unittest
import logic.email
class EmailTest(unittest.TestCase):
"""We really just want to test that configuration is honored here."""
sender = 'test@example.com'
recipient = 'test@example.com'
subject = 'test subject'
html = '<p>hello test</p>'
text = 'hello test'
@mock.patch('logic.email.EMAIL_BACKENDS')
@mock.patch('logic.email.config')
def test_send_email_appengine(self, mock_config, mock_backends):
mock_config.EMAIL_BACKEND = 'appengine'
mock_backends['appengine'] = mock.Mock()
logic.email.send_email(self.sender, self.recipient, self.subject,
self.html, self.text)
mock_backends['appengine'].assert_called_once_with(
self.sender, self.recipient, self.subject, self.html, self.text
)
@mock.patch('logic.email.EMAIL_BACKENDS')
@mock.patch('logic.email.config')
def test_send_email_sendgrid(self, mock_config, mock_backends):
mock_config.EMAIL_BACKEND = 'sendgrid'
mock_backends['sendgrid'] = mock.Mock()
logic.email.send_email(self.sender, self.recipient, self.subject,
self.html, self.text)
mock_backends['sendgrid'].assert_called_once_with(
self.sender, self.recipient, self.subject, self.html, self.text
)
|
Add test for email logic (just config)# -*- coding: utf-8 -*-
import mock
import unittest
import logic.email
class EmailTest(unittest.TestCase):
"""We really just want to test that configuration is honored here."""
sender = 'test@example.com'
recipient = 'test@example.com'
subject = 'test subject'
html = '<p>hello test</p>'
text = 'hello test'
@mock.patch('logic.email.EMAIL_BACKENDS')
@mock.patch('logic.email.config')
def test_send_email_appengine(self, mock_config, mock_backends):
mock_config.EMAIL_BACKEND = 'appengine'
mock_backends['appengine'] = mock.Mock()
logic.email.send_email(self.sender, self.recipient, self.subject,
self.html, self.text)
mock_backends['appengine'].assert_called_once_with(
self.sender, self.recipient, self.subject, self.html, self.text
)
@mock.patch('logic.email.EMAIL_BACKENDS')
@mock.patch('logic.email.config')
def test_send_email_sendgrid(self, mock_config, mock_backends):
mock_config.EMAIL_BACKEND = 'sendgrid'
mock_backends['sendgrid'] = mock.Mock()
logic.email.send_email(self.sender, self.recipient, self.subject,
self.html, self.text)
mock_backends['sendgrid'].assert_called_once_with(
self.sender, self.recipient, self.subject, self.html, self.text
)
|
<commit_before><commit_msg>Add test for email logic (just config)<commit_after># -*- coding: utf-8 -*-
import mock
import unittest
import logic.email
class EmailTest(unittest.TestCase):
"""We really just want to test that configuration is honored here."""
sender = 'test@example.com'
recipient = 'test@example.com'
subject = 'test subject'
html = '<p>hello test</p>'
text = 'hello test'
@mock.patch('logic.email.EMAIL_BACKENDS')
@mock.patch('logic.email.config')
def test_send_email_appengine(self, mock_config, mock_backends):
mock_config.EMAIL_BACKEND = 'appengine'
mock_backends['appengine'] = mock.Mock()
logic.email.send_email(self.sender, self.recipient, self.subject,
self.html, self.text)
mock_backends['appengine'].assert_called_once_with(
self.sender, self.recipient, self.subject, self.html, self.text
)
@mock.patch('logic.email.EMAIL_BACKENDS')
@mock.patch('logic.email.config')
def test_send_email_sendgrid(self, mock_config, mock_backends):
mock_config.EMAIL_BACKEND = 'sendgrid'
mock_backends['sendgrid'] = mock.Mock()
logic.email.send_email(self.sender, self.recipient, self.subject,
self.html, self.text)
mock_backends['sendgrid'].assert_called_once_with(
self.sender, self.recipient, self.subject, self.html, self.text
)
|
|
7c4cf25868a907eb3ac718b2d4dbef2966fbe806
|
ext/dcos-installer/dcos_installer/util/__init__.py
|
ext/dcos-installer/dcos_installer/util/__init__.py
|
import logging
import os
CONFIG_PATH = '/genconf/config.yaml'
SSH_KEY_PATH = '/genconf/ssh_key'
IP_DETECT_PATH = '/genconf/ip-detect'
SERVE_DIR = '/genconf/serve'
STATE_DIR = '/genconf/state'
GENCONF_DIR = '/genconf'
log = logging.getLogger(__name__)
def write_file(data, path):
try:
with open(path, 'w') as f:
log.debug("Writing file %s", path)
f.write(data)
except:
log.error("Filed to write path %s", path)
def get_action_state(action_name):
"""
Check the action.json file and if the
success + failed + term == total then we are finished.
If not, return running.
"""
return {
"action_name": "deploy",
"action_state": "running",
"hosts_running": [],
"hosts_success": [],
"hosts_failed": [],
"hosts_terminated": [],
}
def clear_action_jsons():
"""
On startup, remove all the old action.json files (preflight,
postflight, deploy .json). This is because action state is
nullified when the installer shuts down. This way we do not
return inconsistent state in the get_action_state().
"""
pass
def create_directory(path):
if not os.path.exists(path):
os.mkdirs(path)
|
CONFIG_PATH = '/genconf/config.yaml'
SSH_KEY_PATH = '/genconf/ssh_key'
IP_DETECT_PATH = '/genconf/ip-detect'
SERVE_DIR = '/genconf/serve'
STATE_DIR = '/genconf/state'
GENCONF_DIR = '/genconf'
|
Remove dead code in dcos_installer.util
|
Remove dead code in dcos_installer.util
|
Python
|
apache-2.0
|
lingmann/dcos,xinxian0458/dcos,amitaekbote/dcos,branden/dcos,dcos/dcos,xinxian0458/dcos,surdy/dcos,lingmann/dcos,mesosphere-mergebot/dcos,jeid64/dcos,surdy/dcos,mesosphere-mergebot/mergebot-test-dcos,darkonie/dcos,mnaboka/dcos,mnaboka/dcos,dcos/dcos,jeid64/dcos,vishnu2kmohan/dcos,GoelDeepak/dcos,darkonie/dcos,asridharan/dcos,branden/dcos,dcos/dcos,BenWhitehead/dcos,jeid64/dcos,GoelDeepak/dcos,vishnu2kmohan/dcos,BenWhitehead/dcos,mesosphere-mergebot/dcos,mesosphere-mergebot/mergebot-test-dcos,dcos/dcos,jeid64/dcos,vishnu2kmohan/dcos,amitaekbote/dcos,kensipe/dcos,kensipe/dcos,asridharan/dcos,darkonie/dcos,mellenburg/dcos,GoelDeepak/dcos,dcos/dcos,GoelDeepak/dcos,mellenburg/dcos,BenWhitehead/dcos,lingmann/dcos,branden/dcos,kensipe/dcos,mesosphere-mergebot/mergebot-test-dcos,xinxian0458/dcos,mnaboka/dcos,surdy/dcos,amitaekbote/dcos,mnaboka/dcos,mellenburg/dcos,vishnu2kmohan/dcos,mellenburg/dcos,darkonie/dcos,mesosphere-mergebot/dcos,asridharan/dcos,surdy/dcos,mesosphere-mergebot/mergebot-test-dcos,asridharan/dcos,BenWhitehead/dcos,xinxian0458/dcos,darkonie/dcos,kensipe/dcos,lingmann/dcos,amitaekbote/dcos,branden/dcos,mesosphere-mergebot/dcos,mnaboka/dcos
|
import logging
import os
CONFIG_PATH = '/genconf/config.yaml'
SSH_KEY_PATH = '/genconf/ssh_key'
IP_DETECT_PATH = '/genconf/ip-detect'
SERVE_DIR = '/genconf/serve'
STATE_DIR = '/genconf/state'
GENCONF_DIR = '/genconf'
log = logging.getLogger(__name__)
def write_file(data, path):
try:
with open(path, 'w') as f:
log.debug("Writing file %s", path)
f.write(data)
except:
log.error("Filed to write path %s", path)
def get_action_state(action_name):
"""
Check the action.json file and if the
success + failed + term == total then we are finished.
If not, return running.
"""
return {
"action_name": "deploy",
"action_state": "running",
"hosts_running": [],
"hosts_success": [],
"hosts_failed": [],
"hosts_terminated": [],
}
def clear_action_jsons():
"""
On startup, remove all the old action.json files (preflight,
postflight, deploy .json). This is because action state is
nullified when the installer shuts down. This way we do not
return inconsistent state in the get_action_state().
"""
pass
def create_directory(path):
if not os.path.exists(path):
os.mkdirs(path)
Remove dead code in dcos_installer.util
|
CONFIG_PATH = '/genconf/config.yaml'
SSH_KEY_PATH = '/genconf/ssh_key'
IP_DETECT_PATH = '/genconf/ip-detect'
SERVE_DIR = '/genconf/serve'
STATE_DIR = '/genconf/state'
GENCONF_DIR = '/genconf'
|
<commit_before>import logging
import os
CONFIG_PATH = '/genconf/config.yaml'
SSH_KEY_PATH = '/genconf/ssh_key'
IP_DETECT_PATH = '/genconf/ip-detect'
SERVE_DIR = '/genconf/serve'
STATE_DIR = '/genconf/state'
GENCONF_DIR = '/genconf'
log = logging.getLogger(__name__)
def write_file(data, path):
try:
with open(path, 'w') as f:
log.debug("Writing file %s", path)
f.write(data)
except:
log.error("Filed to write path %s", path)
def get_action_state(action_name):
"""
Check the action.json file and if the
success + failed + term == total then we are finished.
If not, return running.
"""
return {
"action_name": "deploy",
"action_state": "running",
"hosts_running": [],
"hosts_success": [],
"hosts_failed": [],
"hosts_terminated": [],
}
def clear_action_jsons():
"""
On startup, remove all the old action.json files (preflight,
postflight, deploy .json). This is because action state is
nullified when the installer shuts down. This way we do not
return inconsistent state in the get_action_state().
"""
pass
def create_directory(path):
if not os.path.exists(path):
os.mkdirs(path)
<commit_msg>Remove dead code in dcos_installer.util<commit_after>
|
CONFIG_PATH = '/genconf/config.yaml'
SSH_KEY_PATH = '/genconf/ssh_key'
IP_DETECT_PATH = '/genconf/ip-detect'
SERVE_DIR = '/genconf/serve'
STATE_DIR = '/genconf/state'
GENCONF_DIR = '/genconf'
|
import logging
import os
CONFIG_PATH = '/genconf/config.yaml'
SSH_KEY_PATH = '/genconf/ssh_key'
IP_DETECT_PATH = '/genconf/ip-detect'
SERVE_DIR = '/genconf/serve'
STATE_DIR = '/genconf/state'
GENCONF_DIR = '/genconf'
log = logging.getLogger(__name__)
def write_file(data, path):
try:
with open(path, 'w') as f:
log.debug("Writing file %s", path)
f.write(data)
except:
log.error("Filed to write path %s", path)
def get_action_state(action_name):
"""
Check the action.json file and if the
success + failed + term == total then we are finished.
If not, return running.
"""
return {
"action_name": "deploy",
"action_state": "running",
"hosts_running": [],
"hosts_success": [],
"hosts_failed": [],
"hosts_terminated": [],
}
def clear_action_jsons():
"""
On startup, remove all the old action.json files (preflight,
postflight, deploy .json). This is because action state is
nullified when the installer shuts down. This way we do not
return inconsistent state in the get_action_state().
"""
pass
def create_directory(path):
if not os.path.exists(path):
os.mkdirs(path)
Remove dead code in dcos_installer.utilCONFIG_PATH = '/genconf/config.yaml'
SSH_KEY_PATH = '/genconf/ssh_key'
IP_DETECT_PATH = '/genconf/ip-detect'
SERVE_DIR = '/genconf/serve'
STATE_DIR = '/genconf/state'
GENCONF_DIR = '/genconf'
|
<commit_before>import logging
import os
CONFIG_PATH = '/genconf/config.yaml'
SSH_KEY_PATH = '/genconf/ssh_key'
IP_DETECT_PATH = '/genconf/ip-detect'
SERVE_DIR = '/genconf/serve'
STATE_DIR = '/genconf/state'
GENCONF_DIR = '/genconf'
log = logging.getLogger(__name__)
def write_file(data, path):
try:
with open(path, 'w') as f:
log.debug("Writing file %s", path)
f.write(data)
except:
log.error("Filed to write path %s", path)
def get_action_state(action_name):
"""
Check the action.json file and if the
success + failed + term == total then we are finished.
If not, return running.
"""
return {
"action_name": "deploy",
"action_state": "running",
"hosts_running": [],
"hosts_success": [],
"hosts_failed": [],
"hosts_terminated": [],
}
def clear_action_jsons():
"""
On startup, remove all the old action.json files (preflight,
postflight, deploy .json). This is because action state is
nullified when the installer shuts down. This way we do not
return inconsistent state in the get_action_state().
"""
pass
def create_directory(path):
if not os.path.exists(path):
os.mkdirs(path)
<commit_msg>Remove dead code in dcos_installer.util<commit_after>CONFIG_PATH = '/genconf/config.yaml'
SSH_KEY_PATH = '/genconf/ssh_key'
IP_DETECT_PATH = '/genconf/ip-detect'
SERVE_DIR = '/genconf/serve'
STATE_DIR = '/genconf/state'
GENCONF_DIR = '/genconf'
|
e6bbf8e84cf27080e62716a72d0a9fb622d39d8b
|
python_apps/pypo/cloud_storage_downloader.py
|
python_apps/pypo/cloud_storage_downloader.py
|
import os
import logging
import ConfigParser
import urllib2
from libcloud.storage.types import Provider, ContainerDoesNotExistError, ObjectDoesNotExistError
from libcloud.storage.providers import get_driver
CONFIG_PATH = '/etc/airtime/airtime.conf'
class CloudStorageDownloader:
def __init__(self):
config = self.read_config_file(CONFIG_PATH)
S3_CONFIG_SECTION = "s3"
self._s3_bucket = config.get(S3_CONFIG_SECTION, 'bucket')
self._s3_api_key = config.get(S3_CONFIG_SECTION, 'api_key')
self._s3_api_key_secret = config.get(S3_CONFIG_SECTION, 'api_key_secret')
def download_obj(self, dst, obj_name):
cls = get_driver(Provider.S3)
driver = cls(self._s3_api_key, self._s3_api_key_secret)
#object_name = os.path.basename(urllib2.unquote(obj_url).decode('utf8'))
try:
cloud_obj = driver.get_object(container_name=self._s3_bucket,
object_name=obj_name)
except ObjectDoesNotExistError:
logging.info("Could not find object: %s" % obj_name)
exit(-1)
logging.info('Downloading: %s to %s' % (cloud_obj.name, dst))
cloud_obj.download(destination_path=dst)
def read_config_file(self, config_path):
"""Parse the application's config file located at config_path."""
config = ConfigParser.SafeConfigParser()
try:
config.readfp(open(config_path))
except IOError as e:
print "Failed to open config file at " + config_path + ": " + e.strerror
exit(-1)
except Exception:
print e.strerror
exit(-1)
return config
|
Modify Pypo -> Download files from cloud storage
|
CC-5884: Modify Pypo -> Download files from cloud storage
|
Python
|
agpl-3.0
|
Lapotor/libretime,comiconomenclaturist/libretime,LibreTime/libretime,LibreTime/libretime,comiconomenclaturist/libretime,comiconomenclaturist/libretime,LibreTime/libretime,LibreTime/libretime,Lapotor/libretime,comiconomenclaturist/libretime,LibreTime/libretime,comiconomenclaturist/libretime,LibreTime/libretime,Lapotor/libretime,comiconomenclaturist/libretime,Lapotor/libretime,Lapotor/libretime,Lapotor/libretime,comiconomenclaturist/libretime
|
CC-5884: Modify Pypo -> Download files from cloud storage
|
import os
import logging
import ConfigParser
import urllib2
from libcloud.storage.types import Provider, ContainerDoesNotExistError, ObjectDoesNotExistError
from libcloud.storage.providers import get_driver
CONFIG_PATH = '/etc/airtime/airtime.conf'
class CloudStorageDownloader:
def __init__(self):
config = self.read_config_file(CONFIG_PATH)
S3_CONFIG_SECTION = "s3"
self._s3_bucket = config.get(S3_CONFIG_SECTION, 'bucket')
self._s3_api_key = config.get(S3_CONFIG_SECTION, 'api_key')
self._s3_api_key_secret = config.get(S3_CONFIG_SECTION, 'api_key_secret')
def download_obj(self, dst, obj_name):
cls = get_driver(Provider.S3)
driver = cls(self._s3_api_key, self._s3_api_key_secret)
#object_name = os.path.basename(urllib2.unquote(obj_url).decode('utf8'))
try:
cloud_obj = driver.get_object(container_name=self._s3_bucket,
object_name=obj_name)
except ObjectDoesNotExistError:
logging.info("Could not find object: %s" % obj_name)
exit(-1)
logging.info('Downloading: %s to %s' % (cloud_obj.name, dst))
cloud_obj.download(destination_path=dst)
def read_config_file(self, config_path):
"""Parse the application's config file located at config_path."""
config = ConfigParser.SafeConfigParser()
try:
config.readfp(open(config_path))
except IOError as e:
print "Failed to open config file at " + config_path + ": " + e.strerror
exit(-1)
except Exception:
print e.strerror
exit(-1)
return config
|
<commit_before><commit_msg>CC-5884: Modify Pypo -> Download files from cloud storage<commit_after>
|
import os
import logging
import ConfigParser
import urllib2
from libcloud.storage.types import Provider, ContainerDoesNotExistError, ObjectDoesNotExistError
from libcloud.storage.providers import get_driver
CONFIG_PATH = '/etc/airtime/airtime.conf'
class CloudStorageDownloader:
def __init__(self):
config = self.read_config_file(CONFIG_PATH)
S3_CONFIG_SECTION = "s3"
self._s3_bucket = config.get(S3_CONFIG_SECTION, 'bucket')
self._s3_api_key = config.get(S3_CONFIG_SECTION, 'api_key')
self._s3_api_key_secret = config.get(S3_CONFIG_SECTION, 'api_key_secret')
def download_obj(self, dst, obj_name):
cls = get_driver(Provider.S3)
driver = cls(self._s3_api_key, self._s3_api_key_secret)
#object_name = os.path.basename(urllib2.unquote(obj_url).decode('utf8'))
try:
cloud_obj = driver.get_object(container_name=self._s3_bucket,
object_name=obj_name)
except ObjectDoesNotExistError:
logging.info("Could not find object: %s" % obj_name)
exit(-1)
logging.info('Downloading: %s to %s' % (cloud_obj.name, dst))
cloud_obj.download(destination_path=dst)
def read_config_file(self, config_path):
"""Parse the application's config file located at config_path."""
config = ConfigParser.SafeConfigParser()
try:
config.readfp(open(config_path))
except IOError as e:
print "Failed to open config file at " + config_path + ": " + e.strerror
exit(-1)
except Exception:
print e.strerror
exit(-1)
return config
|
CC-5884: Modify Pypo -> Download files from cloud storageimport os
import logging
import ConfigParser
import urllib2
from libcloud.storage.types import Provider, ContainerDoesNotExistError, ObjectDoesNotExistError
from libcloud.storage.providers import get_driver
CONFIG_PATH = '/etc/airtime/airtime.conf'
class CloudStorageDownloader:
def __init__(self):
config = self.read_config_file(CONFIG_PATH)
S3_CONFIG_SECTION = "s3"
self._s3_bucket = config.get(S3_CONFIG_SECTION, 'bucket')
self._s3_api_key = config.get(S3_CONFIG_SECTION, 'api_key')
self._s3_api_key_secret = config.get(S3_CONFIG_SECTION, 'api_key_secret')
def download_obj(self, dst, obj_name):
cls = get_driver(Provider.S3)
driver = cls(self._s3_api_key, self._s3_api_key_secret)
#object_name = os.path.basename(urllib2.unquote(obj_url).decode('utf8'))
try:
cloud_obj = driver.get_object(container_name=self._s3_bucket,
object_name=obj_name)
except ObjectDoesNotExistError:
logging.info("Could not find object: %s" % obj_name)
exit(-1)
logging.info('Downloading: %s to %s' % (cloud_obj.name, dst))
cloud_obj.download(destination_path=dst)
def read_config_file(self, config_path):
"""Parse the application's config file located at config_path."""
config = ConfigParser.SafeConfigParser()
try:
config.readfp(open(config_path))
except IOError as e:
print "Failed to open config file at " + config_path + ": " + e.strerror
exit(-1)
except Exception:
print e.strerror
exit(-1)
return config
|
<commit_before><commit_msg>CC-5884: Modify Pypo -> Download files from cloud storage<commit_after>import os
import logging
import ConfigParser
import urllib2
from libcloud.storage.types import Provider, ContainerDoesNotExistError, ObjectDoesNotExistError
from libcloud.storage.providers import get_driver
CONFIG_PATH = '/etc/airtime/airtime.conf'
class CloudStorageDownloader:
def __init__(self):
config = self.read_config_file(CONFIG_PATH)
S3_CONFIG_SECTION = "s3"
self._s3_bucket = config.get(S3_CONFIG_SECTION, 'bucket')
self._s3_api_key = config.get(S3_CONFIG_SECTION, 'api_key')
self._s3_api_key_secret = config.get(S3_CONFIG_SECTION, 'api_key_secret')
def download_obj(self, dst, obj_name):
cls = get_driver(Provider.S3)
driver = cls(self._s3_api_key, self._s3_api_key_secret)
#object_name = os.path.basename(urllib2.unquote(obj_url).decode('utf8'))
try:
cloud_obj = driver.get_object(container_name=self._s3_bucket,
object_name=obj_name)
except ObjectDoesNotExistError:
logging.info("Could not find object: %s" % obj_name)
exit(-1)
logging.info('Downloading: %s to %s' % (cloud_obj.name, dst))
cloud_obj.download(destination_path=dst)
def read_config_file(self, config_path):
"""Parse the application's config file located at config_path."""
config = ConfigParser.SafeConfigParser()
try:
config.readfp(open(config_path))
except IOError as e:
print "Failed to open config file at " + config_path + ": " + e.strerror
exit(-1)
except Exception:
print e.strerror
exit(-1)
return config
|
|
e032e8156843109931c1f66e9deb64c580348ebf
|
lintcode/Easy/096_Partition_List.py
|
lintcode/Easy/096_Partition_List.py
|
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: The first node of linked list.
@param x: an integer
@return: a ListNode
"""
def partition(self, head, x):
# write your code here
# Solution 1
arr = []
tmp = head
while (tmp):
arr.append(tmp.val)
tmp = tmp.next
less = filter(lambda n: n < x, arr)
rest = filter(lambda n: n >= x, arr)
arr = less + rest
arr = map(lambda n: ListNode(n), arr)
for i in range(len(arr) - 1):
arr[i].next = arr[i + 1]
return arr[0] if len(arr) > 0 else None
|
Add solution to lintcode question 96
|
Add solution to lintcode question 96
|
Python
|
mit
|
Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode
|
Add solution to lintcode question 96
|
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: The first node of linked list.
@param x: an integer
@return: a ListNode
"""
def partition(self, head, x):
# write your code here
# Solution 1
arr = []
tmp = head
while (tmp):
arr.append(tmp.val)
tmp = tmp.next
less = filter(lambda n: n < x, arr)
rest = filter(lambda n: n >= x, arr)
arr = less + rest
arr = map(lambda n: ListNode(n), arr)
for i in range(len(arr) - 1):
arr[i].next = arr[i + 1]
return arr[0] if len(arr) > 0 else None
|
<commit_before><commit_msg>Add solution to lintcode question 96<commit_after>
|
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: The first node of linked list.
@param x: an integer
@return: a ListNode
"""
def partition(self, head, x):
# write your code here
# Solution 1
arr = []
tmp = head
while (tmp):
arr.append(tmp.val)
tmp = tmp.next
less = filter(lambda n: n < x, arr)
rest = filter(lambda n: n >= x, arr)
arr = less + rest
arr = map(lambda n: ListNode(n), arr)
for i in range(len(arr) - 1):
arr[i].next = arr[i + 1]
return arr[0] if len(arr) > 0 else None
|
Add solution to lintcode question 96"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: The first node of linked list.
@param x: an integer
@return: a ListNode
"""
def partition(self, head, x):
# write your code here
# Solution 1
arr = []
tmp = head
while (tmp):
arr.append(tmp.val)
tmp = tmp.next
less = filter(lambda n: n < x, arr)
rest = filter(lambda n: n >= x, arr)
arr = less + rest
arr = map(lambda n: ListNode(n), arr)
for i in range(len(arr) - 1):
arr[i].next = arr[i + 1]
return arr[0] if len(arr) > 0 else None
|
<commit_before><commit_msg>Add solution to lintcode question 96<commit_after>"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: The first node of linked list.
@param x: an integer
@return: a ListNode
"""
def partition(self, head, x):
# write your code here
# Solution 1
arr = []
tmp = head
while (tmp):
arr.append(tmp.val)
tmp = tmp.next
less = filter(lambda n: n < x, arr)
rest = filter(lambda n: n >= x, arr)
arr = less + rest
arr = map(lambda n: ListNode(n), arr)
for i in range(len(arr) - 1):
arr[i].next = arr[i + 1]
return arr[0] if len(arr) > 0 else None
|
|
b682339c19702e01de228d0ff982ca086ef9906f
|
aleph/migrate/versions/a8849e4e6784_match_table.py
|
aleph/migrate/versions/a8849e4e6784_match_table.py
|
"""Remove the match table.
Revision ID: a8849e4e6784
Revises: 2979a1322381
Create Date: 2020-03-14 20:16:35.882396
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'a8849e4e6784'
down_revision = '2979a1322381'
def upgrade():
op.drop_index('ix_match_collection_id', table_name='match')
op.drop_index('ix_match_match_collection_id', table_name='match')
op.drop_table('match')
def downgrade():
pass
|
Drop the old xref table
|
Drop the old xref table
|
Python
|
mit
|
pudo/aleph,alephdata/aleph,alephdata/aleph,alephdata/aleph,pudo/aleph,alephdata/aleph,pudo/aleph,alephdata/aleph
|
Drop the old xref table
|
"""Remove the match table.
Revision ID: a8849e4e6784
Revises: 2979a1322381
Create Date: 2020-03-14 20:16:35.882396
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'a8849e4e6784'
down_revision = '2979a1322381'
def upgrade():
op.drop_index('ix_match_collection_id', table_name='match')
op.drop_index('ix_match_match_collection_id', table_name='match')
op.drop_table('match')
def downgrade():
pass
|
<commit_before><commit_msg>Drop the old xref table<commit_after>
|
"""Remove the match table.
Revision ID: a8849e4e6784
Revises: 2979a1322381
Create Date: 2020-03-14 20:16:35.882396
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'a8849e4e6784'
down_revision = '2979a1322381'
def upgrade():
op.drop_index('ix_match_collection_id', table_name='match')
op.drop_index('ix_match_match_collection_id', table_name='match')
op.drop_table('match')
def downgrade():
pass
|
Drop the old xref table"""Remove the match table.
Revision ID: a8849e4e6784
Revises: 2979a1322381
Create Date: 2020-03-14 20:16:35.882396
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'a8849e4e6784'
down_revision = '2979a1322381'
def upgrade():
op.drop_index('ix_match_collection_id', table_name='match')
op.drop_index('ix_match_match_collection_id', table_name='match')
op.drop_table('match')
def downgrade():
pass
|
<commit_before><commit_msg>Drop the old xref table<commit_after>"""Remove the match table.
Revision ID: a8849e4e6784
Revises: 2979a1322381
Create Date: 2020-03-14 20:16:35.882396
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'a8849e4e6784'
down_revision = '2979a1322381'
def upgrade():
op.drop_index('ix_match_collection_id', table_name='match')
op.drop_index('ix_match_match_collection_id', table_name='match')
op.drop_table('match')
def downgrade():
pass
|
|
cafa02b22a5b3600f9c46331bf73fa3a4bf75ed1
|
api/migrations/0098_userpreferences_default_tab.py
|
api/migrations/0098_userpreferences_default_tab.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0097_auto_20160210_0814'),
]
operations = [
migrations.AddField(
model_name='userpreferences',
name='default_tab',
field=models.CharField(default=b'following', help_text='The activities you see by default on the homepage.', max_length=30, verbose_name='Default tab', choices=[(b'following', 'Following'), (b'all', 'All')]),
preserve_default=True,
),
]
|
Choose default tab on homepage in settings (following or all)
|
Choose default tab on homepage in settings (following or all)
|
Python
|
apache-2.0
|
rdsathene/SchoolIdolAPI,SchoolIdolTomodachi/SchoolIdolAPI,dburr/SchoolIdolAPI,rdsathene/SchoolIdolAPI,dburr/SchoolIdolAPI,SchoolIdolTomodachi/SchoolIdolAPI,dburr/SchoolIdolAPI,rdsathene/SchoolIdolAPI,SchoolIdolTomodachi/SchoolIdolAPI
|
Choose default tab on homepage in settings (following or all)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0097_auto_20160210_0814'),
]
operations = [
migrations.AddField(
model_name='userpreferences',
name='default_tab',
field=models.CharField(default=b'following', help_text='The activities you see by default on the homepage.', max_length=30, verbose_name='Default tab', choices=[(b'following', 'Following'), (b'all', 'All')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Choose default tab on homepage in settings (following or all)<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0097_auto_20160210_0814'),
]
operations = [
migrations.AddField(
model_name='userpreferences',
name='default_tab',
field=models.CharField(default=b'following', help_text='The activities you see by default on the homepage.', max_length=30, verbose_name='Default tab', choices=[(b'following', 'Following'), (b'all', 'All')]),
preserve_default=True,
),
]
|
Choose default tab on homepage in settings (following or all)# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0097_auto_20160210_0814'),
]
operations = [
migrations.AddField(
model_name='userpreferences',
name='default_tab',
field=models.CharField(default=b'following', help_text='The activities you see by default on the homepage.', max_length=30, verbose_name='Default tab', choices=[(b'following', 'Following'), (b'all', 'All')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Choose default tab on homepage in settings (following or all)<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0097_auto_20160210_0814'),
]
operations = [
migrations.AddField(
model_name='userpreferences',
name='default_tab',
field=models.CharField(default=b'following', help_text='The activities you see by default on the homepage.', max_length=30, verbose_name='Default tab', choices=[(b'following', 'Following'), (b'all', 'All')]),
preserve_default=True,
),
]
|
|
71267c56b3dbe1fafc953d676ee68da7a31aaa89
|
UI/flask_ownstorj/ownstorj/models/backend_engine.py
|
UI/flask_ownstorj/ownstorj/models/backend_engine.py
|
from UI.utilities import account_manager
from UI.engine import StorjEngine
from ownstorj_config_manager import OwnStorjConfigManager
# OwnStorj backend enging
storj_engine = StorjEngine() # init StorjEngine
ownstorj_config_manager = OwnStorjConfigManager()
class OwnStorjBuckets:
def __init__(self):
print 1
def initialize_public_bucket(self):
try:
bucket_create_response = storj_engine.storj_client.bucket_create(name="ownstorj_public", transfer=1, storage=1)
ownstorj_config_manager.add_public_bucket(bucket_id=bucket_create_response.id,
bucket_name=bucket_create_response.name)
return True
except BaseException as e:
print e
return False
|
Create OwnStorj backend engine module
|
Create OwnStorj backend engine module
|
Python
|
mit
|
lakewik/storj-gui-client
|
Create OwnStorj backend engine module
|
from UI.utilities import account_manager
from UI.engine import StorjEngine
from ownstorj_config_manager import OwnStorjConfigManager
# OwnStorj backend enging
storj_engine = StorjEngine() # init StorjEngine
ownstorj_config_manager = OwnStorjConfigManager()
class OwnStorjBuckets:
def __init__(self):
print 1
def initialize_public_bucket(self):
try:
bucket_create_response = storj_engine.storj_client.bucket_create(name="ownstorj_public", transfer=1, storage=1)
ownstorj_config_manager.add_public_bucket(bucket_id=bucket_create_response.id,
bucket_name=bucket_create_response.name)
return True
except BaseException as e:
print e
return False
|
<commit_before><commit_msg>Create OwnStorj backend engine module<commit_after>
|
from UI.utilities import account_manager
from UI.engine import StorjEngine
from ownstorj_config_manager import OwnStorjConfigManager
# OwnStorj backend enging
storj_engine = StorjEngine() # init StorjEngine
ownstorj_config_manager = OwnStorjConfigManager()
class OwnStorjBuckets:
def __init__(self):
print 1
def initialize_public_bucket(self):
try:
bucket_create_response = storj_engine.storj_client.bucket_create(name="ownstorj_public", transfer=1, storage=1)
ownstorj_config_manager.add_public_bucket(bucket_id=bucket_create_response.id,
bucket_name=bucket_create_response.name)
return True
except BaseException as e:
print e
return False
|
Create OwnStorj backend engine modulefrom UI.utilities import account_manager
from UI.engine import StorjEngine
from ownstorj_config_manager import OwnStorjConfigManager
# OwnStorj backend enging
storj_engine = StorjEngine() # init StorjEngine
ownstorj_config_manager = OwnStorjConfigManager()
class OwnStorjBuckets:
def __init__(self):
print 1
def initialize_public_bucket(self):
try:
bucket_create_response = storj_engine.storj_client.bucket_create(name="ownstorj_public", transfer=1, storage=1)
ownstorj_config_manager.add_public_bucket(bucket_id=bucket_create_response.id,
bucket_name=bucket_create_response.name)
return True
except BaseException as e:
print e
return False
|
<commit_before><commit_msg>Create OwnStorj backend engine module<commit_after>from UI.utilities import account_manager
from UI.engine import StorjEngine
from ownstorj_config_manager import OwnStorjConfigManager
# OwnStorj backend enging
storj_engine = StorjEngine() # init StorjEngine
ownstorj_config_manager = OwnStorjConfigManager()
class OwnStorjBuckets:
def __init__(self):
print 1
def initialize_public_bucket(self):
try:
bucket_create_response = storj_engine.storj_client.bucket_create(name="ownstorj_public", transfer=1, storage=1)
ownstorj_config_manager.add_public_bucket(bucket_id=bucket_create_response.id,
bucket_name=bucket_create_response.name)
return True
except BaseException as e:
print e
return False
|
|
7d828d3715e2cca4e3a073c1f71bf16a5e4d8c38
|
symposion/speakers/migrations/0007_add_biography_help_text.py
|
symposion/speakers/migrations/0007_add_biography_help_text.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-01-26 18:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('symposion_speakers', '0006_add_photo_help_text'),
]
operations = [
migrations.AlterField(
model_name='speaker',
name='biography',
field=models.TextField(blank=True, help_text="A little bit about you. Edit using <a href='http://warpedvisions.org/projects/markdown-cheat-sheet/' target='_blank'>Markdown</a>.", verbose_name='Biography'),
),
]
|
Add missing migration for Speaker.biography.
|
Add missing migration for Speaker.biography.
See 370690e.
|
Python
|
bsd-3-clause
|
pydata/symposion,pydata/symposion
|
Add missing migration for Speaker.biography.
See 370690e.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-01-26 18:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('symposion_speakers', '0006_add_photo_help_text'),
]
operations = [
migrations.AlterField(
model_name='speaker',
name='biography',
field=models.TextField(blank=True, help_text="A little bit about you. Edit using <a href='http://warpedvisions.org/projects/markdown-cheat-sheet/' target='_blank'>Markdown</a>.", verbose_name='Biography'),
),
]
|
<commit_before><commit_msg>Add missing migration for Speaker.biography.
See 370690e.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-01-26 18:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('symposion_speakers', '0006_add_photo_help_text'),
]
operations = [
migrations.AlterField(
model_name='speaker',
name='biography',
field=models.TextField(blank=True, help_text="A little bit about you. Edit using <a href='http://warpedvisions.org/projects/markdown-cheat-sheet/' target='_blank'>Markdown</a>.", verbose_name='Biography'),
),
]
|
Add missing migration for Speaker.biography.
See 370690e.# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-01-26 18:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('symposion_speakers', '0006_add_photo_help_text'),
]
operations = [
migrations.AlterField(
model_name='speaker',
name='biography',
field=models.TextField(blank=True, help_text="A little bit about you. Edit using <a href='http://warpedvisions.org/projects/markdown-cheat-sheet/' target='_blank'>Markdown</a>.", verbose_name='Biography'),
),
]
|
<commit_before><commit_msg>Add missing migration for Speaker.biography.
See 370690e.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-01-26 18:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('symposion_speakers', '0006_add_photo_help_text'),
]
operations = [
migrations.AlterField(
model_name='speaker',
name='biography',
field=models.TextField(blank=True, help_text="A little bit about you. Edit using <a href='http://warpedvisions.org/projects/markdown-cheat-sheet/' target='_blank'>Markdown</a>.", verbose_name='Biography'),
),
]
|
|
5dce9c5438b4fcb63c2b7a4a4f481cba331836ed
|
wger/exercises/management/commands/exercises-health-check.py
|
wger/exercises/management/commands/exercises-health-check.py
|
# -*- coding: utf-8 *-*
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import collections
# Django
from django.core.management.base import BaseCommand
# wger
from wger.core.models import Language
from wger.exercises.models import ExerciseBase
from wger.utils.constants import ENGLISH_SHORT_NAME
class Command(BaseCommand):
"""
Performs some sanity checks on the exercise database
"""
help = """Performs some sanity checks on the database
At the moment this script checks the following:
- each base has at least one exercise
- each exercise base has a translation in English
- exercise bases have no duplicate translations
"""
def add_arguments(self, parser):
# Add dry run argument
parser.add_argument(
'--delete',
action='store_true',
dest='delete',
default=False,
help='Delete problematic exercise bases from the database (use with care!!)',
)
def handle(self, **options):
delete = options['delete']
english = Language.objects.get(short_name=ENGLISH_SHORT_NAME)
for base in ExerciseBase.objects.all():
if not base.exercises.count():
warning = f'Exercise base {base.uuid} has no translations!'
self.stdout.write(self.style.WARNING(warning))
if delete:
base.delete()
self.stdout.write(' Deleting base...')
continue
if not base.exercises.filter(language=english).exists():
warning = f'Exercise base {base.uuid} has no English translation!'
self.stdout.write(self.style.WARNING(warning))
if delete:
base.delete()
self.stdout.write(' Deleting base...')
exercise_languages = base.exercises.values_list('language_id', flat=True)
duplicates = [
item for item, count in collections.Counter(exercise_languages).items() if count > 1
]
if not duplicates:
continue
warning = f'Exercise base {base.uuid} has duplicate translations for language IDs: {duplicates}!'
self.stdout.write(self.style.WARNING(warning))
if delete:
exercises = base.exercises.filter(language_id__in=duplicates)
for exercise in exercises[1:]:
exercise.delete()
self.stdout.write(
f' Deleting translation {exercise.uuid} for language ID {exercise.language_id}...'
)
|
Add script to check the current exercise translations
|
Add script to check the current exercise translations
|
Python
|
agpl-3.0
|
wger-project/wger,wger-project/wger,wger-project/wger,wger-project/wger
|
Add script to check the current exercise translations
|
# -*- coding: utf-8 *-*
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import collections
# Django
from django.core.management.base import BaseCommand
# wger
from wger.core.models import Language
from wger.exercises.models import ExerciseBase
from wger.utils.constants import ENGLISH_SHORT_NAME
class Command(BaseCommand):
"""
Performs some sanity checks on the exercise database
"""
help = """Performs some sanity checks on the database
At the moment this script checks the following:
- each base has at least one exercise
- each exercise base has a translation in English
- exercise bases have no duplicate translations
"""
def add_arguments(self, parser):
# Add dry run argument
parser.add_argument(
'--delete',
action='store_true',
dest='delete',
default=False,
help='Delete problematic exercise bases from the database (use with care!!)',
)
def handle(self, **options):
delete = options['delete']
english = Language.objects.get(short_name=ENGLISH_SHORT_NAME)
for base in ExerciseBase.objects.all():
if not base.exercises.count():
warning = f'Exercise base {base.uuid} has no translations!'
self.stdout.write(self.style.WARNING(warning))
if delete:
base.delete()
self.stdout.write(' Deleting base...')
continue
if not base.exercises.filter(language=english).exists():
warning = f'Exercise base {base.uuid} has no English translation!'
self.stdout.write(self.style.WARNING(warning))
if delete:
base.delete()
self.stdout.write(' Deleting base...')
exercise_languages = base.exercises.values_list('language_id', flat=True)
duplicates = [
item for item, count in collections.Counter(exercise_languages).items() if count > 1
]
if not duplicates:
continue
warning = f'Exercise base {base.uuid} has duplicate translations for language IDs: {duplicates}!'
self.stdout.write(self.style.WARNING(warning))
if delete:
exercises = base.exercises.filter(language_id__in=duplicates)
for exercise in exercises[1:]:
exercise.delete()
self.stdout.write(
f' Deleting translation {exercise.uuid} for language ID {exercise.language_id}...'
)
|
<commit_before><commit_msg>Add script to check the current exercise translations<commit_after>
|
# -*- coding: utf-8 *-*
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import collections
# Django
from django.core.management.base import BaseCommand
# wger
from wger.core.models import Language
from wger.exercises.models import ExerciseBase
from wger.utils.constants import ENGLISH_SHORT_NAME
class Command(BaseCommand):
"""
Performs some sanity checks on the exercise database
"""
help = """Performs some sanity checks on the database
At the moment this script checks the following:
- each base has at least one exercise
- each exercise base has a translation in English
- exercise bases have no duplicate translations
"""
def add_arguments(self, parser):
# Add dry run argument
parser.add_argument(
'--delete',
action='store_true',
dest='delete',
default=False,
help='Delete problematic exercise bases from the database (use with care!!)',
)
def handle(self, **options):
delete = options['delete']
english = Language.objects.get(short_name=ENGLISH_SHORT_NAME)
for base in ExerciseBase.objects.all():
if not base.exercises.count():
warning = f'Exercise base {base.uuid} has no translations!'
self.stdout.write(self.style.WARNING(warning))
if delete:
base.delete()
self.stdout.write(' Deleting base...')
continue
if not base.exercises.filter(language=english).exists():
warning = f'Exercise base {base.uuid} has no English translation!'
self.stdout.write(self.style.WARNING(warning))
if delete:
base.delete()
self.stdout.write(' Deleting base...')
exercise_languages = base.exercises.values_list('language_id', flat=True)
duplicates = [
item for item, count in collections.Counter(exercise_languages).items() if count > 1
]
if not duplicates:
continue
warning = f'Exercise base {base.uuid} has duplicate translations for language IDs: {duplicates}!'
self.stdout.write(self.style.WARNING(warning))
if delete:
exercises = base.exercises.filter(language_id__in=duplicates)
for exercise in exercises[1:]:
exercise.delete()
self.stdout.write(
f' Deleting translation {exercise.uuid} for language ID {exercise.language_id}...'
)
|
Add script to check the current exercise translations# -*- coding: utf-8 *-*
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import collections
# Django
from django.core.management.base import BaseCommand
# wger
from wger.core.models import Language
from wger.exercises.models import ExerciseBase
from wger.utils.constants import ENGLISH_SHORT_NAME
class Command(BaseCommand):
"""
Performs some sanity checks on the exercise database
"""
help = """Performs some sanity checks on the database
At the moment this script checks the following:
- each base has at least one exercise
- each exercise base has a translation in English
- exercise bases have no duplicate translations
"""
def add_arguments(self, parser):
# Add dry run argument
parser.add_argument(
'--delete',
action='store_true',
dest='delete',
default=False,
help='Delete problematic exercise bases from the database (use with care!!)',
)
def handle(self, **options):
delete = options['delete']
english = Language.objects.get(short_name=ENGLISH_SHORT_NAME)
for base in ExerciseBase.objects.all():
if not base.exercises.count():
warning = f'Exercise base {base.uuid} has no translations!'
self.stdout.write(self.style.WARNING(warning))
if delete:
base.delete()
self.stdout.write(' Deleting base...')
continue
if not base.exercises.filter(language=english).exists():
warning = f'Exercise base {base.uuid} has no English translation!'
self.stdout.write(self.style.WARNING(warning))
if delete:
base.delete()
self.stdout.write(' Deleting base...')
exercise_languages = base.exercises.values_list('language_id', flat=True)
duplicates = [
item for item, count in collections.Counter(exercise_languages).items() if count > 1
]
if not duplicates:
continue
warning = f'Exercise base {base.uuid} has duplicate translations for language IDs: {duplicates}!'
self.stdout.write(self.style.WARNING(warning))
if delete:
exercises = base.exercises.filter(language_id__in=duplicates)
for exercise in exercises[1:]:
exercise.delete()
self.stdout.write(
f' Deleting translation {exercise.uuid} for language ID {exercise.language_id}...'
)
|
<commit_before><commit_msg>Add script to check the current exercise translations<commit_after># -*- coding: utf-8 *-*
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import collections
# Django
from django.core.management.base import BaseCommand
# wger
from wger.core.models import Language
from wger.exercises.models import ExerciseBase
from wger.utils.constants import ENGLISH_SHORT_NAME
class Command(BaseCommand):
"""
Performs some sanity checks on the exercise database
"""
help = """Performs some sanity checks on the database
At the moment this script checks the following:
- each base has at least one exercise
- each exercise base has a translation in English
- exercise bases have no duplicate translations
"""
def add_arguments(self, parser):
# Add dry run argument
parser.add_argument(
'--delete',
action='store_true',
dest='delete',
default=False,
help='Delete problematic exercise bases from the database (use with care!!)',
)
def handle(self, **options):
delete = options['delete']
english = Language.objects.get(short_name=ENGLISH_SHORT_NAME)
for base in ExerciseBase.objects.all():
if not base.exercises.count():
warning = f'Exercise base {base.uuid} has no translations!'
self.stdout.write(self.style.WARNING(warning))
if delete:
base.delete()
self.stdout.write(' Deleting base...')
continue
if not base.exercises.filter(language=english).exists():
warning = f'Exercise base {base.uuid} has no English translation!'
self.stdout.write(self.style.WARNING(warning))
if delete:
base.delete()
self.stdout.write(' Deleting base...')
exercise_languages = base.exercises.values_list('language_id', flat=True)
duplicates = [
item for item, count in collections.Counter(exercise_languages).items() if count > 1
]
if not duplicates:
continue
warning = f'Exercise base {base.uuid} has duplicate translations for language IDs: {duplicates}!'
self.stdout.write(self.style.WARNING(warning))
if delete:
exercises = base.exercises.filter(language_id__in=duplicates)
for exercise in exercises[1:]:
exercise.delete()
self.stdout.write(
f' Deleting translation {exercise.uuid} for language ID {exercise.language_id}...'
)
|
|
4faadfd42bc44542423e3ca73ad659fa4f372a22
|
shuup_tests/notify/test_action_send_mail.py
|
shuup_tests/notify/test_action_send_mail.py
|
import pytest
from shuup.notify.base import Action
from shuup.notify.script import Step, Context
TEST_STEP_ACTIONS = [
{
"identifier": "send_email",
"language": {
"constant": "fi"
},
"recipient": {
"variable": "customer_email"
},
"template_data": {
"fi": {
"body": "Irrelevant body",
"subject": "Irrelevant subject"
}
}
},
{
"identifier": "send_email",
"language": {
"constant": "fi"
},
"recipient": {
"constant": "some.email@domain.net"
},
"template_data": {
"fi": {
"body": "Irrelevant body",
"subject": "Irrelevant subject"
}
}
}
]
@pytest.mark.django_db
def test_render_template():
step = Step(
conditions=(),
actions=[Action.unserialize(action) for action in TEST_STEP_ACTIONS],
)
assert step
execution_context = Context(variables={
"customer_phone": "0594036495",
"language": "fi",
"customer_email": "some.email@gmail.com"
})
step.execute(context=execution_context)
|
Add a test proving a bug in the notify app
|
Add a test proving a bug in the notify app
Add a test that reproduces a failure seen in a production environment
Leaving the actual fix for the people who know how they want it done
No refs
|
Python
|
agpl-3.0
|
shoopio/shoop,suutari-ai/shoop,suutari-ai/shoop,shoopio/shoop,suutari-ai/shoop,shoopio/shoop
|
Add a test proving a bug in the notify app
Add a test that reproduces a failure seen in a production environment
Leaving the actual fix for the people who know how they want it done
No refs
|
import pytest
from shuup.notify.base import Action
from shuup.notify.script import Step, Context
TEST_STEP_ACTIONS = [
{
"identifier": "send_email",
"language": {
"constant": "fi"
},
"recipient": {
"variable": "customer_email"
},
"template_data": {
"fi": {
"body": "Irrelevant body",
"subject": "Irrelevant subject"
}
}
},
{
"identifier": "send_email",
"language": {
"constant": "fi"
},
"recipient": {
"constant": "some.email@domain.net"
},
"template_data": {
"fi": {
"body": "Irrelevant body",
"subject": "Irrelevant subject"
}
}
}
]
@pytest.mark.django_db
def test_render_template():
step = Step(
conditions=(),
actions=[Action.unserialize(action) for action in TEST_STEP_ACTIONS],
)
assert step
execution_context = Context(variables={
"customer_phone": "0594036495",
"language": "fi",
"customer_email": "some.email@gmail.com"
})
step.execute(context=execution_context)
|
<commit_before><commit_msg>Add a test proving a bug in the notify app
Add a test that reproduces a failure seen in a production environment
Leaving the actual fix for the people who know how they want it done
No refs<commit_after>
|
import pytest
from shuup.notify.base import Action
from shuup.notify.script import Step, Context
TEST_STEP_ACTIONS = [
{
"identifier": "send_email",
"language": {
"constant": "fi"
},
"recipient": {
"variable": "customer_email"
},
"template_data": {
"fi": {
"body": "Irrelevant body",
"subject": "Irrelevant subject"
}
}
},
{
"identifier": "send_email",
"language": {
"constant": "fi"
},
"recipient": {
"constant": "some.email@domain.net"
},
"template_data": {
"fi": {
"body": "Irrelevant body",
"subject": "Irrelevant subject"
}
}
}
]
@pytest.mark.django_db
def test_render_template():
step = Step(
conditions=(),
actions=[Action.unserialize(action) for action in TEST_STEP_ACTIONS],
)
assert step
execution_context = Context(variables={
"customer_phone": "0594036495",
"language": "fi",
"customer_email": "some.email@gmail.com"
})
step.execute(context=execution_context)
|
Add a test proving a bug in the notify app
Add a test that reproduces a failure seen in a production environment
Leaving the actual fix for the people who know how they want it done
No refsimport pytest
from shuup.notify.base import Action
from shuup.notify.script import Step, Context
TEST_STEP_ACTIONS = [
{
"identifier": "send_email",
"language": {
"constant": "fi"
},
"recipient": {
"variable": "customer_email"
},
"template_data": {
"fi": {
"body": "Irrelevant body",
"subject": "Irrelevant subject"
}
}
},
{
"identifier": "send_email",
"language": {
"constant": "fi"
},
"recipient": {
"constant": "some.email@domain.net"
},
"template_data": {
"fi": {
"body": "Irrelevant body",
"subject": "Irrelevant subject"
}
}
}
]
@pytest.mark.django_db
def test_render_template():
step = Step(
conditions=(),
actions=[Action.unserialize(action) for action in TEST_STEP_ACTIONS],
)
assert step
execution_context = Context(variables={
"customer_phone": "0594036495",
"language": "fi",
"customer_email": "some.email@gmail.com"
})
step.execute(context=execution_context)
|
<commit_before><commit_msg>Add a test proving a bug in the notify app
Add a test that reproduces a failure seen in a production environment
Leaving the actual fix for the people who know how they want it done
No refs<commit_after>import pytest
from shuup.notify.base import Action
from shuup.notify.script import Step, Context
TEST_STEP_ACTIONS = [
{
"identifier": "send_email",
"language": {
"constant": "fi"
},
"recipient": {
"variable": "customer_email"
},
"template_data": {
"fi": {
"body": "Irrelevant body",
"subject": "Irrelevant subject"
}
}
},
{
"identifier": "send_email",
"language": {
"constant": "fi"
},
"recipient": {
"constant": "some.email@domain.net"
},
"template_data": {
"fi": {
"body": "Irrelevant body",
"subject": "Irrelevant subject"
}
}
}
]
@pytest.mark.django_db
def test_render_template():
step = Step(
conditions=(),
actions=[Action.unserialize(action) for action in TEST_STEP_ACTIONS],
)
assert step
execution_context = Context(variables={
"customer_phone": "0594036495",
"language": "fi",
"customer_email": "some.email@gmail.com"
})
step.execute(context=execution_context)
|
|
ebfdc2e87a6798cfd0e3aae1e974069c828fbc4d
|
kufpybio/goxmlparser.py
|
kufpybio/goxmlparser.py
|
import xml.etree.ElementTree as ElementTree
class GOXMLParser(object):
def go_term_information(self, xml):
tree = ElementTree.fromstring(xml)
return{
"name" : tree.findtext("./term/name"),
"namespace" : tree.findtext("./term/namespace"),
"def" : tree.findtext("./term/def/defstr")}
|
Add a XML pareser for GO OBO XML
|
Add a XML pareser for GO OBO XML
|
Python
|
isc
|
konrad/kufpybio
|
Add a XML pareser for GO OBO XML
|
import xml.etree.ElementTree as ElementTree
class GOXMLParser(object):
def go_term_information(self, xml):
tree = ElementTree.fromstring(xml)
return{
"name" : tree.findtext("./term/name"),
"namespace" : tree.findtext("./term/namespace"),
"def" : tree.findtext("./term/def/defstr")}
|
<commit_before><commit_msg>Add a XML pareser for GO OBO XML<commit_after>
|
import xml.etree.ElementTree as ElementTree
class GOXMLParser(object):
def go_term_information(self, xml):
tree = ElementTree.fromstring(xml)
return{
"name" : tree.findtext("./term/name"),
"namespace" : tree.findtext("./term/namespace"),
"def" : tree.findtext("./term/def/defstr")}
|
Add a XML pareser for GO OBO XMLimport xml.etree.ElementTree as ElementTree
class GOXMLParser(object):
def go_term_information(self, xml):
tree = ElementTree.fromstring(xml)
return{
"name" : tree.findtext("./term/name"),
"namespace" : tree.findtext("./term/namespace"),
"def" : tree.findtext("./term/def/defstr")}
|
<commit_before><commit_msg>Add a XML pareser for GO OBO XML<commit_after>import xml.etree.ElementTree as ElementTree
class GOXMLParser(object):
def go_term_information(self, xml):
tree = ElementTree.fromstring(xml)
return{
"name" : tree.findtext("./term/name"),
"namespace" : tree.findtext("./term/namespace"),
"def" : tree.findtext("./term/def/defstr")}
|
|
f3b32a91e2f7ae5a04b19f2c5918f24e443033ad
|
appengine/swarming/tools/health_check.py
|
appengine/swarming/tools/health_check.py
|
#!/usr/bin/env python
# Copyright 2018 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Check the health of a Swarming version."""
import argparse
import os
import subprocess
import sys
HERE = os.path.dirname(__file__)
SWARMING_TOOL = os.path.join(HERE, '..', '..', '..', 'client', 'swarming.py')
# TODO(flowblok): remove this hard-coded pool
POOL = 'ChromeOS'
def main():
parser = argparse.ArgumentParser()
parser.add_argument('application')
parser.add_argument('server_version')
args = parser.parse_args()
url = 'https://{server_version}-dot-{application}.appspot.com'.format(
application=args.application,
server_version=args.server_version)
print 'Scheduling no-op task on', url
rv = subprocess.call([
SWARMING_TOOL, 'run',
'-S', url,
'--expiration', '120',
'--hard-timeout', '120',
'-d', 'pool', POOL,
'-d', 'server_version', args.server_version,
'--raw-cmd', '--', 'python', '-c', 'pass'])
if rv != 0:
print>>sys.stderr, 'Failed to run no-op task'
return 2
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add a health check tool for use by the deployment tools.
|
Add a health check tool for use by the deployment tools.
Bug: 812965
Change-Id: I3edcc8830f186c6b02e3d2071a8d7c73b54a4193
Reviewed-on: https://chromium-review.googlesource.com/940382
Reviewed-by: Marc-Antoine Ruel <d2bf0fc09b08f7b0888b3ddab32b3e89c7122c8b@chromium.org>
Commit-Queue: Marc-Antoine Ruel <d2bf0fc09b08f7b0888b3ddab32b3e89c7122c8b@chromium.org>
|
Python
|
apache-2.0
|
luci/luci-py,luci/luci-py,luci/luci-py,luci/luci-py
|
Add a health check tool for use by the deployment tools.
Bug: 812965
Change-Id: I3edcc8830f186c6b02e3d2071a8d7c73b54a4193
Reviewed-on: https://chromium-review.googlesource.com/940382
Reviewed-by: Marc-Antoine Ruel <d2bf0fc09b08f7b0888b3ddab32b3e89c7122c8b@chromium.org>
Commit-Queue: Marc-Antoine Ruel <d2bf0fc09b08f7b0888b3ddab32b3e89c7122c8b@chromium.org>
|
#!/usr/bin/env python
# Copyright 2018 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Check the health of a Swarming version."""
import argparse
import os
import subprocess
import sys
HERE = os.path.dirname(__file__)
SWARMING_TOOL = os.path.join(HERE, '..', '..', '..', 'client', 'swarming.py')
# TODO(flowblok): remove this hard-coded pool
POOL = 'ChromeOS'
def main():
parser = argparse.ArgumentParser()
parser.add_argument('application')
parser.add_argument('server_version')
args = parser.parse_args()
url = 'https://{server_version}-dot-{application}.appspot.com'.format(
application=args.application,
server_version=args.server_version)
print 'Scheduling no-op task on', url
rv = subprocess.call([
SWARMING_TOOL, 'run',
'-S', url,
'--expiration', '120',
'--hard-timeout', '120',
'-d', 'pool', POOL,
'-d', 'server_version', args.server_version,
'--raw-cmd', '--', 'python', '-c', 'pass'])
if rv != 0:
print>>sys.stderr, 'Failed to run no-op task'
return 2
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add a health check tool for use by the deployment tools.
Bug: 812965
Change-Id: I3edcc8830f186c6b02e3d2071a8d7c73b54a4193
Reviewed-on: https://chromium-review.googlesource.com/940382
Reviewed-by: Marc-Antoine Ruel <d2bf0fc09b08f7b0888b3ddab32b3e89c7122c8b@chromium.org>
Commit-Queue: Marc-Antoine Ruel <d2bf0fc09b08f7b0888b3ddab32b3e89c7122c8b@chromium.org><commit_after>
|
#!/usr/bin/env python
# Copyright 2018 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Check the health of a Swarming version."""
import argparse
import os
import subprocess
import sys
HERE = os.path.dirname(__file__)
SWARMING_TOOL = os.path.join(HERE, '..', '..', '..', 'client', 'swarming.py')
# TODO(flowblok): remove this hard-coded pool
POOL = 'ChromeOS'
def main():
parser = argparse.ArgumentParser()
parser.add_argument('application')
parser.add_argument('server_version')
args = parser.parse_args()
url = 'https://{server_version}-dot-{application}.appspot.com'.format(
application=args.application,
server_version=args.server_version)
print 'Scheduling no-op task on', url
rv = subprocess.call([
SWARMING_TOOL, 'run',
'-S', url,
'--expiration', '120',
'--hard-timeout', '120',
'-d', 'pool', POOL,
'-d', 'server_version', args.server_version,
'--raw-cmd', '--', 'python', '-c', 'pass'])
if rv != 0:
print>>sys.stderr, 'Failed to run no-op task'
return 2
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add a health check tool for use by the deployment tools.
Bug: 812965
Change-Id: I3edcc8830f186c6b02e3d2071a8d7c73b54a4193
Reviewed-on: https://chromium-review.googlesource.com/940382
Reviewed-by: Marc-Antoine Ruel <d2bf0fc09b08f7b0888b3ddab32b3e89c7122c8b@chromium.org>
Commit-Queue: Marc-Antoine Ruel <d2bf0fc09b08f7b0888b3ddab32b3e89c7122c8b@chromium.org>#!/usr/bin/env python
# Copyright 2018 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Check the health of a Swarming version."""
import argparse
import os
import subprocess
import sys
HERE = os.path.dirname(__file__)
SWARMING_TOOL = os.path.join(HERE, '..', '..', '..', 'client', 'swarming.py')
# TODO(flowblok): remove this hard-coded pool
POOL = 'ChromeOS'
def main():
parser = argparse.ArgumentParser()
parser.add_argument('application')
parser.add_argument('server_version')
args = parser.parse_args()
url = 'https://{server_version}-dot-{application}.appspot.com'.format(
application=args.application,
server_version=args.server_version)
print 'Scheduling no-op task on', url
rv = subprocess.call([
SWARMING_TOOL, 'run',
'-S', url,
'--expiration', '120',
'--hard-timeout', '120',
'-d', 'pool', POOL,
'-d', 'server_version', args.server_version,
'--raw-cmd', '--', 'python', '-c', 'pass'])
if rv != 0:
print>>sys.stderr, 'Failed to run no-op task'
return 2
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add a health check tool for use by the deployment tools.
Bug: 812965
Change-Id: I3edcc8830f186c6b02e3d2071a8d7c73b54a4193
Reviewed-on: https://chromium-review.googlesource.com/940382
Reviewed-by: Marc-Antoine Ruel <d2bf0fc09b08f7b0888b3ddab32b3e89c7122c8b@chromium.org>
Commit-Queue: Marc-Antoine Ruel <d2bf0fc09b08f7b0888b3ddab32b3e89c7122c8b@chromium.org><commit_after>#!/usr/bin/env python
# Copyright 2018 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Check the health of a Swarming version."""
import argparse
import os
import subprocess
import sys
HERE = os.path.dirname(__file__)
SWARMING_TOOL = os.path.join(HERE, '..', '..', '..', 'client', 'swarming.py')
# TODO(flowblok): remove this hard-coded pool
POOL = 'ChromeOS'
def main():
parser = argparse.ArgumentParser()
parser.add_argument('application')
parser.add_argument('server_version')
args = parser.parse_args()
url = 'https://{server_version}-dot-{application}.appspot.com'.format(
application=args.application,
server_version=args.server_version)
print 'Scheduling no-op task on', url
rv = subprocess.call([
SWARMING_TOOL, 'run',
'-S', url,
'--expiration', '120',
'--hard-timeout', '120',
'-d', 'pool', POOL,
'-d', 'server_version', args.server_version,
'--raw-cmd', '--', 'python', '-c', 'pass'])
if rv != 0:
print>>sys.stderr, 'Failed to run no-op task'
return 2
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
2f4be8ad7234ab59fc6e98b8353a40195dd08ffb
|
scripts/video/get_stream.py
|
scripts/video/get_stream.py
|
#!/usr/bin/env python3
import requests
frames = []
data = b""
found_first = False
response = requests.get("http://navigation.local:8080/stream/video.h264", stream=True)
for chunk in response.iter_content(chunk_size=1024):
if chunk:
starting_offset = len(data)
if starting_offset >= 2:
if data[-1] == b"\x00":
print("last byte is zero, backing up one")
starting_offset -= 1
if data[-2] == b"\x00":
print("second to last byte is zero, backing up one more")
starting_offset -= 1
data = data + chunk
offset = data.find(b"\x00\x00\x01", starting_offset)
if offset != -1:
print("found frame")
remaining = data[offset:]
if not found_first:
print("dropping partial first frame")
found_first = True
else:
print("adding frame", len(frames) + 1)
frames.append(data[:offset])
if len(frames) == 120:
break
data = remaining
with open("navigation.h264", "wb") as out:
out.write(b"\x00")
for frame in frames:
out.write(frame)
|
Add script to stream h264 frames from uv4l
|
Add script to stream h264 frames from uv4l
|
Python
|
bsd-3-clause
|
gizmo-cda/g2x-submarine-v2,gizmo-cda/g2x-submarine-v2,gizmo-cda/g2x-submarine-v2,gizmo-cda/g2x-submarine-v2
|
Add script to stream h264 frames from uv4l
|
#!/usr/bin/env python3
import requests
frames = []
data = b""
found_first = False
response = requests.get("http://navigation.local:8080/stream/video.h264", stream=True)
for chunk in response.iter_content(chunk_size=1024):
if chunk:
starting_offset = len(data)
if starting_offset >= 2:
if data[-1] == b"\x00":
print("last byte is zero, backing up one")
starting_offset -= 1
if data[-2] == b"\x00":
print("second to last byte is zero, backing up one more")
starting_offset -= 1
data = data + chunk
offset = data.find(b"\x00\x00\x01", starting_offset)
if offset != -1:
print("found frame")
remaining = data[offset:]
if not found_first:
print("dropping partial first frame")
found_first = True
else:
print("adding frame", len(frames) + 1)
frames.append(data[:offset])
if len(frames) == 120:
break
data = remaining
with open("navigation.h264", "wb") as out:
out.write(b"\x00")
for frame in frames:
out.write(frame)
|
<commit_before><commit_msg>Add script to stream h264 frames from uv4l<commit_after>
|
#!/usr/bin/env python3
import requests
frames = []
data = b""
found_first = False
response = requests.get("http://navigation.local:8080/stream/video.h264", stream=True)
for chunk in response.iter_content(chunk_size=1024):
if chunk:
starting_offset = len(data)
if starting_offset >= 2:
if data[-1] == b"\x00":
print("last byte is zero, backing up one")
starting_offset -= 1
if data[-2] == b"\x00":
print("second to last byte is zero, backing up one more")
starting_offset -= 1
data = data + chunk
offset = data.find(b"\x00\x00\x01", starting_offset)
if offset != -1:
print("found frame")
remaining = data[offset:]
if not found_first:
print("dropping partial first frame")
found_first = True
else:
print("adding frame", len(frames) + 1)
frames.append(data[:offset])
if len(frames) == 120:
break
data = remaining
with open("navigation.h264", "wb") as out:
out.write(b"\x00")
for frame in frames:
out.write(frame)
|
Add script to stream h264 frames from uv4l#!/usr/bin/env python3
import requests
frames = []
data = b""
found_first = False
response = requests.get("http://navigation.local:8080/stream/video.h264", stream=True)
for chunk in response.iter_content(chunk_size=1024):
if chunk:
starting_offset = len(data)
if starting_offset >= 2:
if data[-1] == b"\x00":
print("last byte is zero, backing up one")
starting_offset -= 1
if data[-2] == b"\x00":
print("second to last byte is zero, backing up one more")
starting_offset -= 1
data = data + chunk
offset = data.find(b"\x00\x00\x01", starting_offset)
if offset != -1:
print("found frame")
remaining = data[offset:]
if not found_first:
print("dropping partial first frame")
found_first = True
else:
print("adding frame", len(frames) + 1)
frames.append(data[:offset])
if len(frames) == 120:
break
data = remaining
with open("navigation.h264", "wb") as out:
out.write(b"\x00")
for frame in frames:
out.write(frame)
|
<commit_before><commit_msg>Add script to stream h264 frames from uv4l<commit_after>#!/usr/bin/env python3
import requests
frames = []
data = b""
found_first = False
response = requests.get("http://navigation.local:8080/stream/video.h264", stream=True)
for chunk in response.iter_content(chunk_size=1024):
if chunk:
starting_offset = len(data)
if starting_offset >= 2:
if data[-1] == b"\x00":
print("last byte is zero, backing up one")
starting_offset -= 1
if data[-2] == b"\x00":
print("second to last byte is zero, backing up one more")
starting_offset -= 1
data = data + chunk
offset = data.find(b"\x00\x00\x01", starting_offset)
if offset != -1:
print("found frame")
remaining = data[offset:]
if not found_first:
print("dropping partial first frame")
found_first = True
else:
print("adding frame", len(frames) + 1)
frames.append(data[:offset])
if len(frames) == 120:
break
data = remaining
with open("navigation.h264", "wb") as out:
out.write(b"\x00")
for frame in frames:
out.write(frame)
|
|
e50edbfa98d3138836f7f4845e682817538a3e8a
|
akvo/rsr/management/commands/announcement_open_rate.py
|
akvo/rsr/management/commands/announcement_open_rate.py
|
#!/usr/bin/env python3
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
""" Calculate the open rate for announcements
Usage:
python manage.py announcement_open_rate announcement_tag release_date
"""
import datetime
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
User = get_user_model()
class Command(BaseCommand):
help = "Calculate the open rate for a specific announcement"
def add_arguments(self, parser):
parser.add_argument(
"announcement_tag", type=str, help="Tag for the announcment"
)
parser.add_argument(
"release_date", type=str, help="Release date",
)
def handle(self, *args, **options):
tag = options["announcement_tag"]
release_date = datetime.datetime.strptime(
options["release_date"], "%d-%m-%Y"
).date()
# NOTE: We use the last login date here, under the assumption that we
# are calculating the open rate only after a duration of 2 weeks of the
# release date, which is the cookie expiry time for our sessions
logged_in_users = User.objects.filter(last_login__gte=release_date)
seen_users = User.objects.filter(seen_announcements__overlap=[tag])
seen_count = seen_users.count()
logged_in_count = logged_in_users.count()
open_rate = (
seen_users.count() / logged_in_count * 100 if logged_in_count > 0 else None
)
print(f"Seen count: {seen_count}")
print(f"Logged in count: {logged_in_count}")
print(f"Open rate: {open_rate}")
|
Add command to compute the announcement open rate
|
Add command to compute the announcement open rate
|
Python
|
agpl-3.0
|
akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr
|
Add command to compute the announcement open rate
|
#!/usr/bin/env python3
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
""" Calculate the open rate for announcements
Usage:
python manage.py announcement_open_rate announcement_tag release_date
"""
import datetime
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
User = get_user_model()
class Command(BaseCommand):
help = "Calculate the open rate for a specific announcement"
def add_arguments(self, parser):
parser.add_argument(
"announcement_tag", type=str, help="Tag for the announcment"
)
parser.add_argument(
"release_date", type=str, help="Release date",
)
def handle(self, *args, **options):
tag = options["announcement_tag"]
release_date = datetime.datetime.strptime(
options["release_date"], "%d-%m-%Y"
).date()
# NOTE: We use the last login date here, under the assumption that we
# are calculating the open rate only after a duration of 2 weeks of the
# release date, which is the cookie expiry time for our sessions
logged_in_users = User.objects.filter(last_login__gte=release_date)
seen_users = User.objects.filter(seen_announcements__overlap=[tag])
seen_count = seen_users.count()
logged_in_count = logged_in_users.count()
open_rate = (
seen_users.count() / logged_in_count * 100 if logged_in_count > 0 else None
)
print(f"Seen count: {seen_count}")
print(f"Logged in count: {logged_in_count}")
print(f"Open rate: {open_rate}")
|
<commit_before><commit_msg>Add command to compute the announcement open rate<commit_after>
|
#!/usr/bin/env python3
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
""" Calculate the open rate for announcements
Usage:
python manage.py announcement_open_rate announcement_tag release_date
"""
import datetime
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
User = get_user_model()
class Command(BaseCommand):
help = "Calculate the open rate for a specific announcement"
def add_arguments(self, parser):
parser.add_argument(
"announcement_tag", type=str, help="Tag for the announcment"
)
parser.add_argument(
"release_date", type=str, help="Release date",
)
def handle(self, *args, **options):
tag = options["announcement_tag"]
release_date = datetime.datetime.strptime(
options["release_date"], "%d-%m-%Y"
).date()
# NOTE: We use the last login date here, under the assumption that we
# are calculating the open rate only after a duration of 2 weeks of the
# release date, which is the cookie expiry time for our sessions
logged_in_users = User.objects.filter(last_login__gte=release_date)
seen_users = User.objects.filter(seen_announcements__overlap=[tag])
seen_count = seen_users.count()
logged_in_count = logged_in_users.count()
open_rate = (
seen_users.count() / logged_in_count * 100 if logged_in_count > 0 else None
)
print(f"Seen count: {seen_count}")
print(f"Logged in count: {logged_in_count}")
print(f"Open rate: {open_rate}")
|
Add command to compute the announcement open rate#!/usr/bin/env python3
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
""" Calculate the open rate for announcements
Usage:
python manage.py announcement_open_rate announcement_tag release_date
"""
import datetime
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
User = get_user_model()
class Command(BaseCommand):
help = "Calculate the open rate for a specific announcement"
def add_arguments(self, parser):
parser.add_argument(
"announcement_tag", type=str, help="Tag for the announcment"
)
parser.add_argument(
"release_date", type=str, help="Release date",
)
def handle(self, *args, **options):
tag = options["announcement_tag"]
release_date = datetime.datetime.strptime(
options["release_date"], "%d-%m-%Y"
).date()
# NOTE: We use the last login date here, under the assumption that we
# are calculating the open rate only after a duration of 2 weeks of the
# release date, which is the cookie expiry time for our sessions
logged_in_users = User.objects.filter(last_login__gte=release_date)
seen_users = User.objects.filter(seen_announcements__overlap=[tag])
seen_count = seen_users.count()
logged_in_count = logged_in_users.count()
open_rate = (
seen_users.count() / logged_in_count * 100 if logged_in_count > 0 else None
)
print(f"Seen count: {seen_count}")
print(f"Logged in count: {logged_in_count}")
print(f"Open rate: {open_rate}")
|
<commit_before><commit_msg>Add command to compute the announcement open rate<commit_after>#!/usr/bin/env python3
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
""" Calculate the open rate for announcements
Usage:
python manage.py announcement_open_rate announcement_tag release_date
"""
import datetime
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
User = get_user_model()
class Command(BaseCommand):
help = "Calculate the open rate for a specific announcement"
def add_arguments(self, parser):
parser.add_argument(
"announcement_tag", type=str, help="Tag for the announcment"
)
parser.add_argument(
"release_date", type=str, help="Release date",
)
def handle(self, *args, **options):
tag = options["announcement_tag"]
release_date = datetime.datetime.strptime(
options["release_date"], "%d-%m-%Y"
).date()
# NOTE: We use the last login date here, under the assumption that we
# are calculating the open rate only after a duration of 2 weeks of the
# release date, which is the cookie expiry time for our sessions
logged_in_users = User.objects.filter(last_login__gte=release_date)
seen_users = User.objects.filter(seen_announcements__overlap=[tag])
seen_count = seen_users.count()
logged_in_count = logged_in_users.count()
open_rate = (
seen_users.count() / logged_in_count * 100 if logged_in_count > 0 else None
)
print(f"Seen count: {seen_count}")
print(f"Logged in count: {logged_in_count}")
print(f"Open rate: {open_rate}")
|
|
bf51bd060eaaa139851300bf9deed9096afc23e1
|
numscons/checkers/new/perflib_checkers.py
|
numscons/checkers/new/perflib_checkers.py
|
from numscons.checkers.new.common import \
save_and_set, restore, get_initialized_perflib_config
# Performance library checks
def _check_perflib(context, autoadd, info):
context.Message("Checking for %s ... " % info._msg_name)
if info.disabled():
context.Result('no - disabled from user environment')
return 0
saved = save_and_set(context.env, info._core, info._core.keys())
ret = context.TryLink(info.test_code, extension='.c')
if not ret or not autoadd:
restore(context.env, saved)
context.Result(ret)
return ret
def CheckAtlas(context, autoadd=0):
return _check_perflib(context, autoadd,
get_initialized_perflib_config(context.env, 'Atlas'))
def CheckMkl(context, autoadd=0):
return _check_perflib(context, autoadd,
get_initialized_perflib_config(context.env, 'Mkl'))
|
Add Atlas and Mkl checkers.
|
Add Atlas and Mkl checkers.
|
Python
|
bsd-3-clause
|
cournape/numscons,cournape/numscons,cournape/numscons
|
Add Atlas and Mkl checkers.
|
from numscons.checkers.new.common import \
save_and_set, restore, get_initialized_perflib_config
# Performance library checks
def _check_perflib(context, autoadd, info):
context.Message("Checking for %s ... " % info._msg_name)
if info.disabled():
context.Result('no - disabled from user environment')
return 0
saved = save_and_set(context.env, info._core, info._core.keys())
ret = context.TryLink(info.test_code, extension='.c')
if not ret or not autoadd:
restore(context.env, saved)
context.Result(ret)
return ret
def CheckAtlas(context, autoadd=0):
return _check_perflib(context, autoadd,
get_initialized_perflib_config(context.env, 'Atlas'))
def CheckMkl(context, autoadd=0):
return _check_perflib(context, autoadd,
get_initialized_perflib_config(context.env, 'Mkl'))
|
<commit_before><commit_msg>Add Atlas and Mkl checkers.<commit_after>
|
from numscons.checkers.new.common import \
save_and_set, restore, get_initialized_perflib_config
# Performance library checks
def _check_perflib(context, autoadd, info):
context.Message("Checking for %s ... " % info._msg_name)
if info.disabled():
context.Result('no - disabled from user environment')
return 0
saved = save_and_set(context.env, info._core, info._core.keys())
ret = context.TryLink(info.test_code, extension='.c')
if not ret or not autoadd:
restore(context.env, saved)
context.Result(ret)
return ret
def CheckAtlas(context, autoadd=0):
return _check_perflib(context, autoadd,
get_initialized_perflib_config(context.env, 'Atlas'))
def CheckMkl(context, autoadd=0):
return _check_perflib(context, autoadd,
get_initialized_perflib_config(context.env, 'Mkl'))
|
Add Atlas and Mkl checkers.from numscons.checkers.new.common import \
save_and_set, restore, get_initialized_perflib_config
# Performance library checks
def _check_perflib(context, autoadd, info):
context.Message("Checking for %s ... " % info._msg_name)
if info.disabled():
context.Result('no - disabled from user environment')
return 0
saved = save_and_set(context.env, info._core, info._core.keys())
ret = context.TryLink(info.test_code, extension='.c')
if not ret or not autoadd:
restore(context.env, saved)
context.Result(ret)
return ret
def CheckAtlas(context, autoadd=0):
return _check_perflib(context, autoadd,
get_initialized_perflib_config(context.env, 'Atlas'))
def CheckMkl(context, autoadd=0):
return _check_perflib(context, autoadd,
get_initialized_perflib_config(context.env, 'Mkl'))
|
<commit_before><commit_msg>Add Atlas and Mkl checkers.<commit_after>from numscons.checkers.new.common import \
save_and_set, restore, get_initialized_perflib_config
# Performance library checks
def _check_perflib(context, autoadd, info):
context.Message("Checking for %s ... " % info._msg_name)
if info.disabled():
context.Result('no - disabled from user environment')
return 0
saved = save_and_set(context.env, info._core, info._core.keys())
ret = context.TryLink(info.test_code, extension='.c')
if not ret or not autoadd:
restore(context.env, saved)
context.Result(ret)
return ret
def CheckAtlas(context, autoadd=0):
return _check_perflib(context, autoadd,
get_initialized_perflib_config(context.env, 'Atlas'))
def CheckMkl(context, autoadd=0):
return _check_perflib(context, autoadd,
get_initialized_perflib_config(context.env, 'Mkl'))
|
|
15a69fb84ba928dba4d56ce447d16cdd2346e3ef
|
src/nyc_trees/apps/event/migrations/0015_auto_20150730_1735.py
|
src/nyc_trees/apps/event/migrations/0015_auto_20150730_1735.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def clear_event_pdfs(apps, schema_editor):
Event = apps.get_model("event", "Event")
for event in Event.objects.all():
event.map_pdf_filename = ''
event.save()
def no_op(*args, **kwargs):
pass
class Migration(migrations.Migration):
dependencies = [
('event', '0014_auto_20150506_1634'),
]
operations = [
migrations.RunPython(clear_event_pdfs, reverse_code=no_op),
]
|
Add data migration causing all event map PDFs to be regenerated
|
Add data migration causing all event map PDFs to be regenerated
Note we did this before this is copied from `src\nyc_trees\apps\event\migrations\0012_auto_20150424_1705.py`
Connects #1787
|
Python
|
agpl-3.0
|
maurizi/nyc-trees,maurizi/nyc-trees,azavea/nyc-trees,azavea/nyc-trees,kdeloach/nyc-trees,maurizi/nyc-trees,kdeloach/nyc-trees,kdeloach/nyc-trees,azavea/nyc-trees,azavea/nyc-trees,kdeloach/nyc-trees,kdeloach/nyc-trees,maurizi/nyc-trees,azavea/nyc-trees
|
Add data migration causing all event map PDFs to be regenerated
Note we did this before this is copied from `src\nyc_trees\apps\event\migrations\0012_auto_20150424_1705.py`
Connects #1787
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def clear_event_pdfs(apps, schema_editor):
Event = apps.get_model("event", "Event")
for event in Event.objects.all():
event.map_pdf_filename = ''
event.save()
def no_op(*args, **kwargs):
pass
class Migration(migrations.Migration):
dependencies = [
('event', '0014_auto_20150506_1634'),
]
operations = [
migrations.RunPython(clear_event_pdfs, reverse_code=no_op),
]
|
<commit_before><commit_msg>Add data migration causing all event map PDFs to be regenerated
Note we did this before this is copied from `src\nyc_trees\apps\event\migrations\0012_auto_20150424_1705.py`
Connects #1787<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def clear_event_pdfs(apps, schema_editor):
Event = apps.get_model("event", "Event")
for event in Event.objects.all():
event.map_pdf_filename = ''
event.save()
def no_op(*args, **kwargs):
pass
class Migration(migrations.Migration):
dependencies = [
('event', '0014_auto_20150506_1634'),
]
operations = [
migrations.RunPython(clear_event_pdfs, reverse_code=no_op),
]
|
Add data migration causing all event map PDFs to be regenerated
Note we did this before this is copied from `src\nyc_trees\apps\event\migrations\0012_auto_20150424_1705.py`
Connects #1787# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def clear_event_pdfs(apps, schema_editor):
Event = apps.get_model("event", "Event")
for event in Event.objects.all():
event.map_pdf_filename = ''
event.save()
def no_op(*args, **kwargs):
pass
class Migration(migrations.Migration):
dependencies = [
('event', '0014_auto_20150506_1634'),
]
operations = [
migrations.RunPython(clear_event_pdfs, reverse_code=no_op),
]
|
<commit_before><commit_msg>Add data migration causing all event map PDFs to be regenerated
Note we did this before this is copied from `src\nyc_trees\apps\event\migrations\0012_auto_20150424_1705.py`
Connects #1787<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def clear_event_pdfs(apps, schema_editor):
Event = apps.get_model("event", "Event")
for event in Event.objects.all():
event.map_pdf_filename = ''
event.save()
def no_op(*args, **kwargs):
pass
class Migration(migrations.Migration):
dependencies = [
('event', '0014_auto_20150506_1634'),
]
operations = [
migrations.RunPython(clear_event_pdfs, reverse_code=no_op),
]
|
|
388e77bcbc45d88394932e02229c569397ffa12b
|
ynr/apps/candidates/management/commands/candidates_create_party_descriptions_csv.py
|
ynr/apps/candidates/management/commands/candidates_create_party_descriptions_csv.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from compat import BufferDictWriter
from django.core.management.base import BaseCommand
from official_documents.models import OfficialDocument
from popolo.models import Organization, Identifier
class Command(BaseCommand):
help = "Create a CSV with candidate info form the SOPNs"
def handle(self, *args, **options):
fieldnames = (
'party_id',
'party_name',
'party_description',
)
out_csv = BufferDictWriter(fieldnames)
out_csv.writeheader()
for org in Organization.objects.filter(classification='Party'):
try:
party_id = org.identifiers.get(
scheme='electoral-commission').identifier
except Identifier.DoesNotExist:
party_id = org.identifiers.get(
scheme='popit-organization').identifier
out_dict = {
'party_id': party_id,
'party_name': org.name,
'party_description': '',
}
out_csv.writerow(out_dict)
for desc in org.other_names.all():
out_dict['party_description'] = desc.name
out_csv.writerow(out_dict)
self.stdout.write(out_csv.output)
|
Add management comand to export party dewcriptions
|
Add management comand to export party dewcriptions
|
Python
|
agpl-3.0
|
DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative
|
Add management comand to export party dewcriptions
|
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from compat import BufferDictWriter
from django.core.management.base import BaseCommand
from official_documents.models import OfficialDocument
from popolo.models import Organization, Identifier
class Command(BaseCommand):
help = "Create a CSV with candidate info form the SOPNs"
def handle(self, *args, **options):
fieldnames = (
'party_id',
'party_name',
'party_description',
)
out_csv = BufferDictWriter(fieldnames)
out_csv.writeheader()
for org in Organization.objects.filter(classification='Party'):
try:
party_id = org.identifiers.get(
scheme='electoral-commission').identifier
except Identifier.DoesNotExist:
party_id = org.identifiers.get(
scheme='popit-organization').identifier
out_dict = {
'party_id': party_id,
'party_name': org.name,
'party_description': '',
}
out_csv.writerow(out_dict)
for desc in org.other_names.all():
out_dict['party_description'] = desc.name
out_csv.writerow(out_dict)
self.stdout.write(out_csv.output)
|
<commit_before><commit_msg>Add management comand to export party dewcriptions<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from compat import BufferDictWriter
from django.core.management.base import BaseCommand
from official_documents.models import OfficialDocument
from popolo.models import Organization, Identifier
class Command(BaseCommand):
help = "Create a CSV with candidate info form the SOPNs"
def handle(self, *args, **options):
fieldnames = (
'party_id',
'party_name',
'party_description',
)
out_csv = BufferDictWriter(fieldnames)
out_csv.writeheader()
for org in Organization.objects.filter(classification='Party'):
try:
party_id = org.identifiers.get(
scheme='electoral-commission').identifier
except Identifier.DoesNotExist:
party_id = org.identifiers.get(
scheme='popit-organization').identifier
out_dict = {
'party_id': party_id,
'party_name': org.name,
'party_description': '',
}
out_csv.writerow(out_dict)
for desc in org.other_names.all():
out_dict['party_description'] = desc.name
out_csv.writerow(out_dict)
self.stdout.write(out_csv.output)
|
Add management comand to export party dewcriptions# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from compat import BufferDictWriter
from django.core.management.base import BaseCommand
from official_documents.models import OfficialDocument
from popolo.models import Organization, Identifier
class Command(BaseCommand):
help = "Create a CSV with candidate info form the SOPNs"
def handle(self, *args, **options):
fieldnames = (
'party_id',
'party_name',
'party_description',
)
out_csv = BufferDictWriter(fieldnames)
out_csv.writeheader()
for org in Organization.objects.filter(classification='Party'):
try:
party_id = org.identifiers.get(
scheme='electoral-commission').identifier
except Identifier.DoesNotExist:
party_id = org.identifiers.get(
scheme='popit-organization').identifier
out_dict = {
'party_id': party_id,
'party_name': org.name,
'party_description': '',
}
out_csv.writerow(out_dict)
for desc in org.other_names.all():
out_dict['party_description'] = desc.name
out_csv.writerow(out_dict)
self.stdout.write(out_csv.output)
|
<commit_before><commit_msg>Add management comand to export party dewcriptions<commit_after># -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from compat import BufferDictWriter
from django.core.management.base import BaseCommand
from official_documents.models import OfficialDocument
from popolo.models import Organization, Identifier
class Command(BaseCommand):
help = "Create a CSV with candidate info form the SOPNs"
def handle(self, *args, **options):
fieldnames = (
'party_id',
'party_name',
'party_description',
)
out_csv = BufferDictWriter(fieldnames)
out_csv.writeheader()
for org in Organization.objects.filter(classification='Party'):
try:
party_id = org.identifiers.get(
scheme='electoral-commission').identifier
except Identifier.DoesNotExist:
party_id = org.identifiers.get(
scheme='popit-organization').identifier
out_dict = {
'party_id': party_id,
'party_name': org.name,
'party_description': '',
}
out_csv.writerow(out_dict)
for desc in org.other_names.all():
out_dict['party_description'] = desc.name
out_csv.writerow(out_dict)
self.stdout.write(out_csv.output)
|
|
7476d0695b0c39ddf2d55988daaf838c8ba3003e
|
py/valid-parenthesis-string.py
|
py/valid-parenthesis-string.py
|
from collections import Counter
class Solution(object):
def dfs(self, s, pos, stack):
if stack + self.min_possible_opening[-1] - self.min_possible_opening[pos] > self.max_possible_closing[-1] - self.max_possible_closing[pos]:
return False
if stack + self.max_possible_opening[-1] - self.max_possible_opening[pos] < self.min_possible_closing[-1] - self.min_possible_closing[pos]:
return False
if pos == len(s):
return not stack
if s[pos] == '(':
stack += 1
if self.dfs(s, pos + 1, stack):
return True
stack -= 1
elif s[pos] == ')':
if not stack:
return False
else:
stack -= 1
if self.dfs(s, pos + 1, stack):
return True
stack += 1
else:
if stack: # treat as ')'
stack -= 1
if self.dfs(s, pos + 1, stack):
return True
stack += 1
# treat as '('
stack += 1
if self.dfs(s, pos + 1, stack):
return True
stack -= 1
# treat as ''
if self.dfs(s, pos + 1, stack):
return True
return False
def checkValidString(self, s):
"""
:type s: str
:rtype: bool
"""
c = Counter(s)
mpo, mpc = c['('] + c['*'], c[')'] + c['*']
self.max_possible_opening = [0]
self.min_possible_opening = [0]
self.max_possible_closing = [0]
self.min_possible_closing = [0]
for c in s:
self.min_possible_opening.append(self.min_possible_opening[-1] + (c == '('))
self.max_possible_opening.append(self.max_possible_opening[-1] + (c != ')'))
self.min_possible_closing.append(self.min_possible_closing[-1] + (c == ')'))
self.max_possible_closing.append(self.max_possible_closing[-1] + (c != '('))
return self.dfs(s, 0, 0)
|
Add py solution for 678. Valid Parenthesis String
|
Add py solution for 678. Valid Parenthesis String
678. Valid Parenthesis String: https://leetcode.com/problems/valid-parenthesis-string/
Approach1:
DFS with cutting
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 678. Valid Parenthesis String
678. Valid Parenthesis String: https://leetcode.com/problems/valid-parenthesis-string/
Approach1:
DFS with cutting
|
from collections import Counter
class Solution(object):
def dfs(self, s, pos, stack):
if stack + self.min_possible_opening[-1] - self.min_possible_opening[pos] > self.max_possible_closing[-1] - self.max_possible_closing[pos]:
return False
if stack + self.max_possible_opening[-1] - self.max_possible_opening[pos] < self.min_possible_closing[-1] - self.min_possible_closing[pos]:
return False
if pos == len(s):
return not stack
if s[pos] == '(':
stack += 1
if self.dfs(s, pos + 1, stack):
return True
stack -= 1
elif s[pos] == ')':
if not stack:
return False
else:
stack -= 1
if self.dfs(s, pos + 1, stack):
return True
stack += 1
else:
if stack: # treat as ')'
stack -= 1
if self.dfs(s, pos + 1, stack):
return True
stack += 1
# treat as '('
stack += 1
if self.dfs(s, pos + 1, stack):
return True
stack -= 1
# treat as ''
if self.dfs(s, pos + 1, stack):
return True
return False
def checkValidString(self, s):
"""
:type s: str
:rtype: bool
"""
c = Counter(s)
mpo, mpc = c['('] + c['*'], c[')'] + c['*']
self.max_possible_opening = [0]
self.min_possible_opening = [0]
self.max_possible_closing = [0]
self.min_possible_closing = [0]
for c in s:
self.min_possible_opening.append(self.min_possible_opening[-1] + (c == '('))
self.max_possible_opening.append(self.max_possible_opening[-1] + (c != ')'))
self.min_possible_closing.append(self.min_possible_closing[-1] + (c == ')'))
self.max_possible_closing.append(self.max_possible_closing[-1] + (c != '('))
return self.dfs(s, 0, 0)
|
<commit_before><commit_msg>Add py solution for 678. Valid Parenthesis String
678. Valid Parenthesis String: https://leetcode.com/problems/valid-parenthesis-string/
Approach1:
DFS with cutting<commit_after>
|
from collections import Counter
class Solution(object):
def dfs(self, s, pos, stack):
if stack + self.min_possible_opening[-1] - self.min_possible_opening[pos] > self.max_possible_closing[-1] - self.max_possible_closing[pos]:
return False
if stack + self.max_possible_opening[-1] - self.max_possible_opening[pos] < self.min_possible_closing[-1] - self.min_possible_closing[pos]:
return False
if pos == len(s):
return not stack
if s[pos] == '(':
stack += 1
if self.dfs(s, pos + 1, stack):
return True
stack -= 1
elif s[pos] == ')':
if not stack:
return False
else:
stack -= 1
if self.dfs(s, pos + 1, stack):
return True
stack += 1
else:
if stack: # treat as ')'
stack -= 1
if self.dfs(s, pos + 1, stack):
return True
stack += 1
# treat as '('
stack += 1
if self.dfs(s, pos + 1, stack):
return True
stack -= 1
# treat as ''
if self.dfs(s, pos + 1, stack):
return True
return False
def checkValidString(self, s):
"""
:type s: str
:rtype: bool
"""
c = Counter(s)
mpo, mpc = c['('] + c['*'], c[')'] + c['*']
self.max_possible_opening = [0]
self.min_possible_opening = [0]
self.max_possible_closing = [0]
self.min_possible_closing = [0]
for c in s:
self.min_possible_opening.append(self.min_possible_opening[-1] + (c == '('))
self.max_possible_opening.append(self.max_possible_opening[-1] + (c != ')'))
self.min_possible_closing.append(self.min_possible_closing[-1] + (c == ')'))
self.max_possible_closing.append(self.max_possible_closing[-1] + (c != '('))
return self.dfs(s, 0, 0)
|
Add py solution for 678. Valid Parenthesis String
678. Valid Parenthesis String: https://leetcode.com/problems/valid-parenthesis-string/
Approach1:
DFS with cuttingfrom collections import Counter
class Solution(object):
def dfs(self, s, pos, stack):
if stack + self.min_possible_opening[-1] - self.min_possible_opening[pos] > self.max_possible_closing[-1] - self.max_possible_closing[pos]:
return False
if stack + self.max_possible_opening[-1] - self.max_possible_opening[pos] < self.min_possible_closing[-1] - self.min_possible_closing[pos]:
return False
if pos == len(s):
return not stack
if s[pos] == '(':
stack += 1
if self.dfs(s, pos + 1, stack):
return True
stack -= 1
elif s[pos] == ')':
if not stack:
return False
else:
stack -= 1
if self.dfs(s, pos + 1, stack):
return True
stack += 1
else:
if stack: # treat as ')'
stack -= 1
if self.dfs(s, pos + 1, stack):
return True
stack += 1
# treat as '('
stack += 1
if self.dfs(s, pos + 1, stack):
return True
stack -= 1
# treat as ''
if self.dfs(s, pos + 1, stack):
return True
return False
def checkValidString(self, s):
"""
:type s: str
:rtype: bool
"""
c = Counter(s)
mpo, mpc = c['('] + c['*'], c[')'] + c['*']
self.max_possible_opening = [0]
self.min_possible_opening = [0]
self.max_possible_closing = [0]
self.min_possible_closing = [0]
for c in s:
self.min_possible_opening.append(self.min_possible_opening[-1] + (c == '('))
self.max_possible_opening.append(self.max_possible_opening[-1] + (c != ')'))
self.min_possible_closing.append(self.min_possible_closing[-1] + (c == ')'))
self.max_possible_closing.append(self.max_possible_closing[-1] + (c != '('))
return self.dfs(s, 0, 0)
|
<commit_before><commit_msg>Add py solution for 678. Valid Parenthesis String
678. Valid Parenthesis String: https://leetcode.com/problems/valid-parenthesis-string/
Approach1:
DFS with cutting<commit_after>from collections import Counter
class Solution(object):
def dfs(self, s, pos, stack):
if stack + self.min_possible_opening[-1] - self.min_possible_opening[pos] > self.max_possible_closing[-1] - self.max_possible_closing[pos]:
return False
if stack + self.max_possible_opening[-1] - self.max_possible_opening[pos] < self.min_possible_closing[-1] - self.min_possible_closing[pos]:
return False
if pos == len(s):
return not stack
if s[pos] == '(':
stack += 1
if self.dfs(s, pos + 1, stack):
return True
stack -= 1
elif s[pos] == ')':
if not stack:
return False
else:
stack -= 1
if self.dfs(s, pos + 1, stack):
return True
stack += 1
else:
if stack: # treat as ')'
stack -= 1
if self.dfs(s, pos + 1, stack):
return True
stack += 1
# treat as '('
stack += 1
if self.dfs(s, pos + 1, stack):
return True
stack -= 1
# treat as ''
if self.dfs(s, pos + 1, stack):
return True
return False
def checkValidString(self, s):
"""
:type s: str
:rtype: bool
"""
c = Counter(s)
mpo, mpc = c['('] + c['*'], c[')'] + c['*']
self.max_possible_opening = [0]
self.min_possible_opening = [0]
self.max_possible_closing = [0]
self.min_possible_closing = [0]
for c in s:
self.min_possible_opening.append(self.min_possible_opening[-1] + (c == '('))
self.max_possible_opening.append(self.max_possible_opening[-1] + (c != ')'))
self.min_possible_closing.append(self.min_possible_closing[-1] + (c == ')'))
self.max_possible_closing.append(self.max_possible_closing[-1] + (c != '('))
return self.dfs(s, 0, 0)
|
|
68ed2a5c2dfa24551ea936aa52e98525acbe9d42
|
django_project/realtime/migrations/0035_ash_impact_file_path.py
|
django_project/realtime/migrations/0035_ash_impact_file_path.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('realtime', '0034_auto_20180208_1327'),
]
operations = [
migrations.AddField(
model_name='ash',
name='impact_file_path',
field=models.CharField(default=None, max_length=255, blank=True, help_text='Location of impact file.', null=True, verbose_name='Impact File path'),
),
]
|
Add migration file for ash impact file path.
|
Add migration file for ash impact file path.
|
Python
|
bsd-2-clause
|
AIFDR/inasafe-django,AIFDR/inasafe-django,AIFDR/inasafe-django,AIFDR/inasafe-django
|
Add migration file for ash impact file path.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('realtime', '0034_auto_20180208_1327'),
]
operations = [
migrations.AddField(
model_name='ash',
name='impact_file_path',
field=models.CharField(default=None, max_length=255, blank=True, help_text='Location of impact file.', null=True, verbose_name='Impact File path'),
),
]
|
<commit_before><commit_msg>Add migration file for ash impact file path.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('realtime', '0034_auto_20180208_1327'),
]
operations = [
migrations.AddField(
model_name='ash',
name='impact_file_path',
field=models.CharField(default=None, max_length=255, blank=True, help_text='Location of impact file.', null=True, verbose_name='Impact File path'),
),
]
|
Add migration file for ash impact file path.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('realtime', '0034_auto_20180208_1327'),
]
operations = [
migrations.AddField(
model_name='ash',
name='impact_file_path',
field=models.CharField(default=None, max_length=255, blank=True, help_text='Location of impact file.', null=True, verbose_name='Impact File path'),
),
]
|
<commit_before><commit_msg>Add migration file for ash impact file path.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('realtime', '0034_auto_20180208_1327'),
]
operations = [
migrations.AddField(
model_name='ash',
name='impact_file_path',
field=models.CharField(default=None, max_length=255, blank=True, help_text='Location of impact file.', null=True, verbose_name='Impact File path'),
),
]
|
|
c339c54d44963ece2ef3a29787f4475e8736824e
|
tests/compiler/test_parametrized_usage.py
|
tests/compiler/test_parametrized_usage.py
|
import pytest
from tests.compiler import compile_snippet
from thinglang.compiler.errors import UnfilledGenericParameters
def test_both_parameters_unfilled():
with pytest.raises(UnfilledGenericParameters):
compile_snippet('list my_lst = list()')
def test_right_parameter_unfilled():
with pytest.raises(UnfilledGenericParameters):
compile_snippet('list<Container> my_lst = list()')
def test_left_parameter_unfilled():
with pytest.raises(UnfilledGenericParameters):
compile_snippet('list my_lst = list<Container>()')
def test_valid_static_usage():
compile_snippet('Generic.static_method()')
def test_invalid_instance_usage():
with pytest.raises(UnfilledGenericParameters):
compile_snippet('Generic().instance_method()')
|
Add test for invalid parametrized usage cases
|
Add test for invalid parametrized usage cases
|
Python
|
mit
|
ytanay/thinglang,ytanay/thinglang,ytanay/thinglang,ytanay/thinglang
|
Add test for invalid parametrized usage cases
|
import pytest
from tests.compiler import compile_snippet
from thinglang.compiler.errors import UnfilledGenericParameters
def test_both_parameters_unfilled():
with pytest.raises(UnfilledGenericParameters):
compile_snippet('list my_lst = list()')
def test_right_parameter_unfilled():
with pytest.raises(UnfilledGenericParameters):
compile_snippet('list<Container> my_lst = list()')
def test_left_parameter_unfilled():
with pytest.raises(UnfilledGenericParameters):
compile_snippet('list my_lst = list<Container>()')
def test_valid_static_usage():
compile_snippet('Generic.static_method()')
def test_invalid_instance_usage():
with pytest.raises(UnfilledGenericParameters):
compile_snippet('Generic().instance_method()')
|
<commit_before><commit_msg>Add test for invalid parametrized usage cases<commit_after>
|
import pytest
from tests.compiler import compile_snippet
from thinglang.compiler.errors import UnfilledGenericParameters
def test_both_parameters_unfilled():
with pytest.raises(UnfilledGenericParameters):
compile_snippet('list my_lst = list()')
def test_right_parameter_unfilled():
with pytest.raises(UnfilledGenericParameters):
compile_snippet('list<Container> my_lst = list()')
def test_left_parameter_unfilled():
with pytest.raises(UnfilledGenericParameters):
compile_snippet('list my_lst = list<Container>()')
def test_valid_static_usage():
compile_snippet('Generic.static_method()')
def test_invalid_instance_usage():
with pytest.raises(UnfilledGenericParameters):
compile_snippet('Generic().instance_method()')
|
Add test for invalid parametrized usage casesimport pytest
from tests.compiler import compile_snippet
from thinglang.compiler.errors import UnfilledGenericParameters
def test_both_parameters_unfilled():
with pytest.raises(UnfilledGenericParameters):
compile_snippet('list my_lst = list()')
def test_right_parameter_unfilled():
with pytest.raises(UnfilledGenericParameters):
compile_snippet('list<Container> my_lst = list()')
def test_left_parameter_unfilled():
with pytest.raises(UnfilledGenericParameters):
compile_snippet('list my_lst = list<Container>()')
def test_valid_static_usage():
compile_snippet('Generic.static_method()')
def test_invalid_instance_usage():
with pytest.raises(UnfilledGenericParameters):
compile_snippet('Generic().instance_method()')
|
<commit_before><commit_msg>Add test for invalid parametrized usage cases<commit_after>import pytest
from tests.compiler import compile_snippet
from thinglang.compiler.errors import UnfilledGenericParameters
def test_both_parameters_unfilled():
with pytest.raises(UnfilledGenericParameters):
compile_snippet('list my_lst = list()')
def test_right_parameter_unfilled():
with pytest.raises(UnfilledGenericParameters):
compile_snippet('list<Container> my_lst = list()')
def test_left_parameter_unfilled():
with pytest.raises(UnfilledGenericParameters):
compile_snippet('list my_lst = list<Container>()')
def test_valid_static_usage():
compile_snippet('Generic.static_method()')
def test_invalid_instance_usage():
with pytest.raises(UnfilledGenericParameters):
compile_snippet('Generic().instance_method()')
|
|
531d11ea10064fdbbad85b482bcdf075529c977d
|
tests/test_utils.py
|
tests/test_utils.py
|
import unittest
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), "..\\"))
from app import create_app, db
from app.utils import get_or_create
from app.models import User
class TestUtils(unittest.TestCase):
def setUp(self):
self.app = create_app("testing")
self.app_ctx = self.app.app_context()
self.app_ctx.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_ctx.pop()
def test_get_or_create(self):
user1, created1 = get_or_create(User, name="foo", social_id="bar")
db.session.add(user1)
db.session.commit()
user2, created2 = get_or_create(User, name="foo", social_id="bar")
assert created1
assert not created2
assert user1 == user2
|
Add test case for get_or_create util
|
Add test case for get_or_create util
|
Python
|
mit
|
Encrylize/MyDictionary,Encrylize/MyDictionary,Encrylize/MyDictionary
|
Add test case for get_or_create util
|
import unittest
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), "..\\"))
from app import create_app, db
from app.utils import get_or_create
from app.models import User
class TestUtils(unittest.TestCase):
def setUp(self):
self.app = create_app("testing")
self.app_ctx = self.app.app_context()
self.app_ctx.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_ctx.pop()
def test_get_or_create(self):
user1, created1 = get_or_create(User, name="foo", social_id="bar")
db.session.add(user1)
db.session.commit()
user2, created2 = get_or_create(User, name="foo", social_id="bar")
assert created1
assert not created2
assert user1 == user2
|
<commit_before><commit_msg>Add test case for get_or_create util<commit_after>
|
import unittest
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), "..\\"))
from app import create_app, db
from app.utils import get_or_create
from app.models import User
class TestUtils(unittest.TestCase):
def setUp(self):
self.app = create_app("testing")
self.app_ctx = self.app.app_context()
self.app_ctx.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_ctx.pop()
def test_get_or_create(self):
user1, created1 = get_or_create(User, name="foo", social_id="bar")
db.session.add(user1)
db.session.commit()
user2, created2 = get_or_create(User, name="foo", social_id="bar")
assert created1
assert not created2
assert user1 == user2
|
Add test case for get_or_create utilimport unittest
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), "..\\"))
from app import create_app, db
from app.utils import get_or_create
from app.models import User
class TestUtils(unittest.TestCase):
def setUp(self):
self.app = create_app("testing")
self.app_ctx = self.app.app_context()
self.app_ctx.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_ctx.pop()
def test_get_or_create(self):
user1, created1 = get_or_create(User, name="foo", social_id="bar")
db.session.add(user1)
db.session.commit()
user2, created2 = get_or_create(User, name="foo", social_id="bar")
assert created1
assert not created2
assert user1 == user2
|
<commit_before><commit_msg>Add test case for get_or_create util<commit_after>import unittest
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), "..\\"))
from app import create_app, db
from app.utils import get_or_create
from app.models import User
class TestUtils(unittest.TestCase):
def setUp(self):
self.app = create_app("testing")
self.app_ctx = self.app.app_context()
self.app_ctx.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_ctx.pop()
def test_get_or_create(self):
user1, created1 = get_or_create(User, name="foo", social_id="bar")
db.session.add(user1)
db.session.commit()
user2, created2 = get_or_create(User, name="foo", social_id="bar")
assert created1
assert not created2
assert user1 == user2
|
|
a7f4a0395848fa96b584d24576c13a0533c1296d
|
merge_sort.py
|
merge_sort.py
|
#!/usr/bin/env python3
def merge_sorted_list(alist, blist):
""" Use merge sort to sort sorted alist and blist, and return merged sorted list of alist. """
if not isinstance(alist, list):
raise TypeError("{} should be of type list".format(alist))
if not isinstance(blist, list):
raise TypeError("{} should be of type list".format(blist))
sorted_list = []
alist_length = len(alist)
blist_length = len(blist)
i = 0
j = 0
while (i < alist_length) or (j < blist_length):
if i == alist_length:
sorted_list.append(blist[j])
j = j + 1
elif j == blist_length:
sorted_list.append(alist[i])
i = i + 1
elif alist[i] <= blist[j]:
sorted_list.append(alist[i])
i = i + 1
else:
sorted_list.append(blist[j])
j = j + 1
return sorted_list
def merge_sort_list(alist):
""" Use merge sort to sort alist and return sorted list of alist. """
if not isinstance(alist, list):
raise TypeError("{} should be of type list".format(alist))
length = len(alist)
if length == 2:
if alist[0] > alist[1]:
tmp = alist[0]
alist[0] = alist[1]
alist[1] = tmp
return alist
elif length == 1:
return alist
half_length = int(length / 2)
first_half_list = alist[:half_length]
second_half_list = alist[half_length:]
sorted_list = merge_sorted_list(merge_sort_list(first_half_list), merge_sort_list(second_half_list))
return sorted_list
if __name__ == "__main__":
source_list = [3, 2, 1, 5, 6, 10, 9, 8, 4, 7, 0]
print("source list before sort = {}".format(source_list))
sorted_list = merge_sort_list(source_list)
print("sorted_list = {}".format(sorted_list))
|
Add merge sort practice of python int list.
|
Add merge sort practice of python int list.
|
Python
|
apache-2.0
|
sjh/python
|
Add merge sort practice of python int list.
|
#!/usr/bin/env python3
def merge_sorted_list(alist, blist):
""" Use merge sort to sort sorted alist and blist, and return merged sorted list of alist. """
if not isinstance(alist, list):
raise TypeError("{} should be of type list".format(alist))
if not isinstance(blist, list):
raise TypeError("{} should be of type list".format(blist))
sorted_list = []
alist_length = len(alist)
blist_length = len(blist)
i = 0
j = 0
while (i < alist_length) or (j < blist_length):
if i == alist_length:
sorted_list.append(blist[j])
j = j + 1
elif j == blist_length:
sorted_list.append(alist[i])
i = i + 1
elif alist[i] <= blist[j]:
sorted_list.append(alist[i])
i = i + 1
else:
sorted_list.append(blist[j])
j = j + 1
return sorted_list
def merge_sort_list(alist):
""" Use merge sort to sort alist and return sorted list of alist. """
if not isinstance(alist, list):
raise TypeError("{} should be of type list".format(alist))
length = len(alist)
if length == 2:
if alist[0] > alist[1]:
tmp = alist[0]
alist[0] = alist[1]
alist[1] = tmp
return alist
elif length == 1:
return alist
half_length = int(length / 2)
first_half_list = alist[:half_length]
second_half_list = alist[half_length:]
sorted_list = merge_sorted_list(merge_sort_list(first_half_list), merge_sort_list(second_half_list))
return sorted_list
if __name__ == "__main__":
source_list = [3, 2, 1, 5, 6, 10, 9, 8, 4, 7, 0]
print("source list before sort = {}".format(source_list))
sorted_list = merge_sort_list(source_list)
print("sorted_list = {}".format(sorted_list))
|
<commit_before><commit_msg>Add merge sort practice of python int list.<commit_after>
|
#!/usr/bin/env python3
def merge_sorted_list(alist, blist):
""" Use merge sort to sort sorted alist and blist, and return merged sorted list of alist. """
if not isinstance(alist, list):
raise TypeError("{} should be of type list".format(alist))
if not isinstance(blist, list):
raise TypeError("{} should be of type list".format(blist))
sorted_list = []
alist_length = len(alist)
blist_length = len(blist)
i = 0
j = 0
while (i < alist_length) or (j < blist_length):
if i == alist_length:
sorted_list.append(blist[j])
j = j + 1
elif j == blist_length:
sorted_list.append(alist[i])
i = i + 1
elif alist[i] <= blist[j]:
sorted_list.append(alist[i])
i = i + 1
else:
sorted_list.append(blist[j])
j = j + 1
return sorted_list
def merge_sort_list(alist):
""" Use merge sort to sort alist and return sorted list of alist. """
if not isinstance(alist, list):
raise TypeError("{} should be of type list".format(alist))
length = len(alist)
if length == 2:
if alist[0] > alist[1]:
tmp = alist[0]
alist[0] = alist[1]
alist[1] = tmp
return alist
elif length == 1:
return alist
half_length = int(length / 2)
first_half_list = alist[:half_length]
second_half_list = alist[half_length:]
sorted_list = merge_sorted_list(merge_sort_list(first_half_list), merge_sort_list(second_half_list))
return sorted_list
if __name__ == "__main__":
source_list = [3, 2, 1, 5, 6, 10, 9, 8, 4, 7, 0]
print("source list before sort = {}".format(source_list))
sorted_list = merge_sort_list(source_list)
print("sorted_list = {}".format(sorted_list))
|
Add merge sort practice of python int list.#!/usr/bin/env python3
def merge_sorted_list(alist, blist):
""" Use merge sort to sort sorted alist and blist, and return merged sorted list of alist. """
if not isinstance(alist, list):
raise TypeError("{} should be of type list".format(alist))
if not isinstance(blist, list):
raise TypeError("{} should be of type list".format(blist))
sorted_list = []
alist_length = len(alist)
blist_length = len(blist)
i = 0
j = 0
while (i < alist_length) or (j < blist_length):
if i == alist_length:
sorted_list.append(blist[j])
j = j + 1
elif j == blist_length:
sorted_list.append(alist[i])
i = i + 1
elif alist[i] <= blist[j]:
sorted_list.append(alist[i])
i = i + 1
else:
sorted_list.append(blist[j])
j = j + 1
return sorted_list
def merge_sort_list(alist):
""" Use merge sort to sort alist and return sorted list of alist. """
if not isinstance(alist, list):
raise TypeError("{} should be of type list".format(alist))
length = len(alist)
if length == 2:
if alist[0] > alist[1]:
tmp = alist[0]
alist[0] = alist[1]
alist[1] = tmp
return alist
elif length == 1:
return alist
half_length = int(length / 2)
first_half_list = alist[:half_length]
second_half_list = alist[half_length:]
sorted_list = merge_sorted_list(merge_sort_list(first_half_list), merge_sort_list(second_half_list))
return sorted_list
if __name__ == "__main__":
source_list = [3, 2, 1, 5, 6, 10, 9, 8, 4, 7, 0]
print("source list before sort = {}".format(source_list))
sorted_list = merge_sort_list(source_list)
print("sorted_list = {}".format(sorted_list))
|
<commit_before><commit_msg>Add merge sort practice of python int list.<commit_after>#!/usr/bin/env python3
def merge_sorted_list(alist, blist):
""" Use merge sort to sort sorted alist and blist, and return merged sorted list of alist. """
if not isinstance(alist, list):
raise TypeError("{} should be of type list".format(alist))
if not isinstance(blist, list):
raise TypeError("{} should be of type list".format(blist))
sorted_list = []
alist_length = len(alist)
blist_length = len(blist)
i = 0
j = 0
while (i < alist_length) or (j < blist_length):
if i == alist_length:
sorted_list.append(blist[j])
j = j + 1
elif j == blist_length:
sorted_list.append(alist[i])
i = i + 1
elif alist[i] <= blist[j]:
sorted_list.append(alist[i])
i = i + 1
else:
sorted_list.append(blist[j])
j = j + 1
return sorted_list
def merge_sort_list(alist):
""" Use merge sort to sort alist and return sorted list of alist. """
if not isinstance(alist, list):
raise TypeError("{} should be of type list".format(alist))
length = len(alist)
if length == 2:
if alist[0] > alist[1]:
tmp = alist[0]
alist[0] = alist[1]
alist[1] = tmp
return alist
elif length == 1:
return alist
half_length = int(length / 2)
first_half_list = alist[:half_length]
second_half_list = alist[half_length:]
sorted_list = merge_sorted_list(merge_sort_list(first_half_list), merge_sort_list(second_half_list))
return sorted_list
if __name__ == "__main__":
source_list = [3, 2, 1, 5, 6, 10, 9, 8, 4, 7, 0]
print("source list before sort = {}".format(source_list))
sorted_list = merge_sort_list(source_list)
print("sorted_list = {}".format(sorted_list))
|
|
e3c6bc1a8ac54fa4613fdd96f9e5aeebfed32c68
|
src/main/translator-xml/PMLToXML.py
|
src/main/translator-xml/PMLToXML.py
|
#!/usr/bin/env/python
import sys
import os.path
import subprocess
# Read in a pml file and save to an xml file
def translate_pml_file(xml_file, pml_file):
pml_path = os.path.abspath(pml_file.name)
xml_path = os.path.abspath(xml_file.name)
# Call XML generator
return_code = subprocess.call("Pmlxml %s %s" % (xml_path, pml_path), shell=True)
if return_code != 0:
print "Error occured reading PML file, exiting."
sys.exit(1)
def main():
import argparse
parser = argparse.ArgumentParser(description="Program to output the ast of a PML program in XML format")
parser.add_argument('-x', '--xml', required=True, type=file, help="Output abstract syntax tree in XML format")
parser.add_argument('-p', '--pml', required=True, type=file, help="Input PML file")
try:
args = parser.parse_args()
translate_pml_file(args.xml, args.pml)
except IOError, msg:
parser.error(str(msg))
if __name__ == "__main__":
main()
|
Add program to convert from PML to XML
|
Add program to convert from PML to XML
|
Python
|
mit
|
CS4098/GroupProject,CS4098/GroupProject,CS4098/GroupProject
|
Add program to convert from PML to XML
|
#!/usr/bin/env/python
import sys
import os.path
import subprocess
# Read in a pml file and save to an xml file
def translate_pml_file(xml_file, pml_file):
pml_path = os.path.abspath(pml_file.name)
xml_path = os.path.abspath(xml_file.name)
# Call XML generator
return_code = subprocess.call("Pmlxml %s %s" % (xml_path, pml_path), shell=True)
if return_code != 0:
print "Error occured reading PML file, exiting."
sys.exit(1)
def main():
import argparse
parser = argparse.ArgumentParser(description="Program to output the ast of a PML program in XML format")
parser.add_argument('-x', '--xml', required=True, type=file, help="Output abstract syntax tree in XML format")
parser.add_argument('-p', '--pml', required=True, type=file, help="Input PML file")
try:
args = parser.parse_args()
translate_pml_file(args.xml, args.pml)
except IOError, msg:
parser.error(str(msg))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add program to convert from PML to XML<commit_after>
|
#!/usr/bin/env/python
import sys
import os.path
import subprocess
# Read in a pml file and save to an xml file
def translate_pml_file(xml_file, pml_file):
pml_path = os.path.abspath(pml_file.name)
xml_path = os.path.abspath(xml_file.name)
# Call XML generator
return_code = subprocess.call("Pmlxml %s %s" % (xml_path, pml_path), shell=True)
if return_code != 0:
print "Error occured reading PML file, exiting."
sys.exit(1)
def main():
import argparse
parser = argparse.ArgumentParser(description="Program to output the ast of a PML program in XML format")
parser.add_argument('-x', '--xml', required=True, type=file, help="Output abstract syntax tree in XML format")
parser.add_argument('-p', '--pml', required=True, type=file, help="Input PML file")
try:
args = parser.parse_args()
translate_pml_file(args.xml, args.pml)
except IOError, msg:
parser.error(str(msg))
if __name__ == "__main__":
main()
|
Add program to convert from PML to XML#!/usr/bin/env/python
import sys
import os.path
import subprocess
# Read in a pml file and save to an xml file
def translate_pml_file(xml_file, pml_file):
pml_path = os.path.abspath(pml_file.name)
xml_path = os.path.abspath(xml_file.name)
# Call XML generator
return_code = subprocess.call("Pmlxml %s %s" % (xml_path, pml_path), shell=True)
if return_code != 0:
print "Error occured reading PML file, exiting."
sys.exit(1)
def main():
import argparse
parser = argparse.ArgumentParser(description="Program to output the ast of a PML program in XML format")
parser.add_argument('-x', '--xml', required=True, type=file, help="Output abstract syntax tree in XML format")
parser.add_argument('-p', '--pml', required=True, type=file, help="Input PML file")
try:
args = parser.parse_args()
translate_pml_file(args.xml, args.pml)
except IOError, msg:
parser.error(str(msg))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add program to convert from PML to XML<commit_after>#!/usr/bin/env/python
import sys
import os.path
import subprocess
# Read in a pml file and save to an xml file
def translate_pml_file(xml_file, pml_file):
pml_path = os.path.abspath(pml_file.name)
xml_path = os.path.abspath(xml_file.name)
# Call XML generator
return_code = subprocess.call("Pmlxml %s %s" % (xml_path, pml_path), shell=True)
if return_code != 0:
print "Error occured reading PML file, exiting."
sys.exit(1)
def main():
import argparse
parser = argparse.ArgumentParser(description="Program to output the ast of a PML program in XML format")
parser.add_argument('-x', '--xml', required=True, type=file, help="Output abstract syntax tree in XML format")
parser.add_argument('-p', '--pml', required=True, type=file, help="Input PML file")
try:
args = parser.parse_args()
translate_pml_file(args.xml, args.pml)
except IOError, msg:
parser.error(str(msg))
if __name__ == "__main__":
main()
|
|
b3466bc6ce36383eba573457cba34087fd54c84a
|
touchdown/tests/test_aws_waf_rule.py
|
touchdown/tests/test_aws_waf_rule.py
|
# Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from botocore.stub import Stubber
from touchdown import frontends
from touchdown.core import goals, workspace
from touchdown.core.map import SerialMap
class TestBucketDescribe(unittest.TestCase):
def setUp(self):
self.workspace = workspace.Workspace()
self.aws = self.workspace.add_aws(access_key_id='dummy', secret_access_key='dummy', region='eu-west-1')
self.goal = goals.create(
"apply",
self.workspace,
frontends.ConsoleFrontend(interactive=False),
map=SerialMap
)
def test_annotate_object(self):
rule = self.aws.add_rule(name="myrule")
desc = self.goal.get_service(rule, "describe")
stub = Stubber(desc.client)
stub.add_response(
'get_rule',
{'Rule': {
'RuleId': 'ZzZzZz',
'Predicates': [],
}},
{'RuleId': 'ZzZzZz'},
)
with stub:
obj = desc.annotate_object({
"RuleId": "ZzZzZz"
})
self.assertEqual(obj["RuleId"], "ZzZzZz")
|
Test 'annotate_object' for a WAF rule
|
Test 'annotate_object' for a WAF rule
|
Python
|
apache-2.0
|
yaybu/touchdown
|
Test 'annotate_object' for a WAF rule
|
# Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from botocore.stub import Stubber
from touchdown import frontends
from touchdown.core import goals, workspace
from touchdown.core.map import SerialMap
class TestBucketDescribe(unittest.TestCase):
def setUp(self):
self.workspace = workspace.Workspace()
self.aws = self.workspace.add_aws(access_key_id='dummy', secret_access_key='dummy', region='eu-west-1')
self.goal = goals.create(
"apply",
self.workspace,
frontends.ConsoleFrontend(interactive=False),
map=SerialMap
)
def test_annotate_object(self):
rule = self.aws.add_rule(name="myrule")
desc = self.goal.get_service(rule, "describe")
stub = Stubber(desc.client)
stub.add_response(
'get_rule',
{'Rule': {
'RuleId': 'ZzZzZz',
'Predicates': [],
}},
{'RuleId': 'ZzZzZz'},
)
with stub:
obj = desc.annotate_object({
"RuleId": "ZzZzZz"
})
self.assertEqual(obj["RuleId"], "ZzZzZz")
|
<commit_before><commit_msg>Test 'annotate_object' for a WAF rule<commit_after>
|
# Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from botocore.stub import Stubber
from touchdown import frontends
from touchdown.core import goals, workspace
from touchdown.core.map import SerialMap
class TestBucketDescribe(unittest.TestCase):
def setUp(self):
self.workspace = workspace.Workspace()
self.aws = self.workspace.add_aws(access_key_id='dummy', secret_access_key='dummy', region='eu-west-1')
self.goal = goals.create(
"apply",
self.workspace,
frontends.ConsoleFrontend(interactive=False),
map=SerialMap
)
def test_annotate_object(self):
rule = self.aws.add_rule(name="myrule")
desc = self.goal.get_service(rule, "describe")
stub = Stubber(desc.client)
stub.add_response(
'get_rule',
{'Rule': {
'RuleId': 'ZzZzZz',
'Predicates': [],
}},
{'RuleId': 'ZzZzZz'},
)
with stub:
obj = desc.annotate_object({
"RuleId": "ZzZzZz"
})
self.assertEqual(obj["RuleId"], "ZzZzZz")
|
Test 'annotate_object' for a WAF rule# Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from botocore.stub import Stubber
from touchdown import frontends
from touchdown.core import goals, workspace
from touchdown.core.map import SerialMap
class TestBucketDescribe(unittest.TestCase):
def setUp(self):
self.workspace = workspace.Workspace()
self.aws = self.workspace.add_aws(access_key_id='dummy', secret_access_key='dummy', region='eu-west-1')
self.goal = goals.create(
"apply",
self.workspace,
frontends.ConsoleFrontend(interactive=False),
map=SerialMap
)
def test_annotate_object(self):
rule = self.aws.add_rule(name="myrule")
desc = self.goal.get_service(rule, "describe")
stub = Stubber(desc.client)
stub.add_response(
'get_rule',
{'Rule': {
'RuleId': 'ZzZzZz',
'Predicates': [],
}},
{'RuleId': 'ZzZzZz'},
)
with stub:
obj = desc.annotate_object({
"RuleId": "ZzZzZz"
})
self.assertEqual(obj["RuleId"], "ZzZzZz")
|
<commit_before><commit_msg>Test 'annotate_object' for a WAF rule<commit_after># Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from botocore.stub import Stubber
from touchdown import frontends
from touchdown.core import goals, workspace
from touchdown.core.map import SerialMap
class TestBucketDescribe(unittest.TestCase):
def setUp(self):
self.workspace = workspace.Workspace()
self.aws = self.workspace.add_aws(access_key_id='dummy', secret_access_key='dummy', region='eu-west-1')
self.goal = goals.create(
"apply",
self.workspace,
frontends.ConsoleFrontend(interactive=False),
map=SerialMap
)
def test_annotate_object(self):
rule = self.aws.add_rule(name="myrule")
desc = self.goal.get_service(rule, "describe")
stub = Stubber(desc.client)
stub.add_response(
'get_rule',
{'Rule': {
'RuleId': 'ZzZzZz',
'Predicates': [],
}},
{'RuleId': 'ZzZzZz'},
)
with stub:
obj = desc.annotate_object({
"RuleId": "ZzZzZz"
})
self.assertEqual(obj["RuleId"], "ZzZzZz")
|
|
394dfe501ea20d0b4437c99eb856cf024a9ec0d8
|
volunteers/management/commands/import_init_data.py
|
volunteers/management/commands/import_init_data.py
|
from django.core.management.base import BaseCommand #, CommandError
from volunteers.models import Edition
class Command(BaseCommand):
def handle(self, *args, **options):
Edition.init_generic_tasks()
|
Add management command for init data import
|
Add management command for init data import
|
Python
|
agpl-3.0
|
FOSDEM/volunteers,jrial/fosdem-volunteers,jrial/fosdem-volunteers,FOSDEM/volunteers,FOSDEM/volunteers,jrial/fosdem-volunteers,FOSDEM/volunteers,jrial/fosdem-volunteers
|
Add management command for init data import
|
from django.core.management.base import BaseCommand #, CommandError
from volunteers.models import Edition
class Command(BaseCommand):
def handle(self, *args, **options):
Edition.init_generic_tasks()
|
<commit_before><commit_msg>Add management command for init data import<commit_after>
|
from django.core.management.base import BaseCommand #, CommandError
from volunteers.models import Edition
class Command(BaseCommand):
def handle(self, *args, **options):
Edition.init_generic_tasks()
|
Add management command for init data importfrom django.core.management.base import BaseCommand #, CommandError
from volunteers.models import Edition
class Command(BaseCommand):
def handle(self, *args, **options):
Edition.init_generic_tasks()
|
<commit_before><commit_msg>Add management command for init data import<commit_after>from django.core.management.base import BaseCommand #, CommandError
from volunteers.models import Edition
class Command(BaseCommand):
def handle(self, *args, **options):
Edition.init_generic_tasks()
|
|
f02132a0d60bfe37febe4dd42fb17a5dd96c4698
|
test/functional/abc_mining_basic.py
|
test/functional/abc_mining_basic.py
|
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Tests for Bitcoin ABC mining RPCs
"""
from test_framework.messages import (
COIN,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
)
from decimal import Decimal
AXION_ACTIVATION_TIME = 2000000600
class AbcMiningRPCTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [[
'-enableminerfund',
'-axionactivationtime={}'.format(AXION_ACTIVATION_TIME),
], []]
def run_test(self):
node = self.nodes[0]
address = node.get_deterministic_priv_key().address
# Move MTP forward to axion activation
node.setmocktime(AXION_ACTIVATION_TIME)
node.generatetoaddress(6, address)
assert_equal(
node.getblockchaininfo()['mediantime'],
AXION_ACTIVATION_TIME)
# Assert the results of getblocktemplate have expected values. Keys not
# in 'expected' are not checked.
def assert_getblocktemplate(expected):
blockTemplate = node.getblocktemplate()
for key, value in expected.items():
assert_equal(blockTemplate[key], value)
def get_best_coinbase():
return node.getblock(node.getbestblockhash(), 2)['tx'][0]
coinbase = get_best_coinbase()
assert_equal(len(coinbase['vout']), 1)
block_reward = coinbase['vout'][0]['value']
# We don't need to test all fields in getblocktemplate since many of
# them are covered in mining_basic.py
assert_equal(node.getmempoolinfo()['size'], 0)
assert_getblocktemplate({
# Although the coinbase value need not necessarily be the same as
# the last block due to halvings and fees, we know this to be true
# since we are not crossing a halving boundary and there are no
# transactions in the mempool.
'coinbasevalue': block_reward * COIN,
})
# First block with the new rules
node.generatetoaddress(1, address)
# We expect the coinbase to have multiple outputs now
coinbase = get_best_coinbase()
assert_greater_than_or_equal(len(coinbase['vout']), 2)
total = Decimal()
for o in coinbase['vout']:
total += o['value']
assert_equal(total, block_reward)
assert_getblocktemplate({
# Again, we assume the coinbase value is the same as prior blocks.
'coinbasevalue': block_reward * COIN,
})
if __name__ == '__main__':
AbcMiningRPCTest().main()
|
Add a test for ABC-specific getblocktemplate behavior
|
Add a test for ABC-specific getblocktemplate behavior
Summary:
First step in T1014
As we modify behavior in getblocktemplate, we really need better test coverage.
D7201 (f8360b5e981) is one such change. This patch tests that behavior specifically
but this test can be expanded to cover other changes that we may have made or will
make in the future.
Test Plan:
Before D7201, this test should fail
```
git checkout f8360b5e981^
git cherry-pick this-patch
test_runner.py abc_mining_basic
```
But the test passes today:
```
git checkout this-patch
test_runner.py abc_mining_basic
test_runner.py --with-axionactivation abc_mining_basic
```
Reviewers: #bitcoin_abc, Fabien, deadalnix
Reviewed By: #bitcoin_abc, Fabien, deadalnix
Subscribers: Fabien
Differential Revision: https://reviews.bitcoinabc.org/D7230
|
Python
|
mit
|
Bitcoin-ABC/bitcoin-abc,Bitcoin-ABC/bitcoin-abc,Bitcoin-ABC/bitcoin-abc,Bitcoin-ABC/bitcoin-abc,Bitcoin-ABC/bitcoin-abc,Bitcoin-ABC/bitcoin-abc,Bitcoin-ABC/bitcoin-abc,Bitcoin-ABC/bitcoin-abc,Bitcoin-ABC/bitcoin-abc,Bitcoin-ABC/bitcoin-abc,Bitcoin-ABC/bitcoin-abc,Bitcoin-ABC/bitcoin-abc,Bitcoin-ABC/bitcoin-abc
|
Add a test for ABC-specific getblocktemplate behavior
Summary:
First step in T1014
As we modify behavior in getblocktemplate, we really need better test coverage.
D7201 (f8360b5e981) is one such change. This patch tests that behavior specifically
but this test can be expanded to cover other changes that we may have made or will
make in the future.
Test Plan:
Before D7201, this test should fail
```
git checkout f8360b5e981^
git cherry-pick this-patch
test_runner.py abc_mining_basic
```
But the test passes today:
```
git checkout this-patch
test_runner.py abc_mining_basic
test_runner.py --with-axionactivation abc_mining_basic
```
Reviewers: #bitcoin_abc, Fabien, deadalnix
Reviewed By: #bitcoin_abc, Fabien, deadalnix
Subscribers: Fabien
Differential Revision: https://reviews.bitcoinabc.org/D7230
|
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Tests for Bitcoin ABC mining RPCs
"""
from test_framework.messages import (
COIN,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
)
from decimal import Decimal
AXION_ACTIVATION_TIME = 2000000600
class AbcMiningRPCTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [[
'-enableminerfund',
'-axionactivationtime={}'.format(AXION_ACTIVATION_TIME),
], []]
def run_test(self):
node = self.nodes[0]
address = node.get_deterministic_priv_key().address
# Move MTP forward to axion activation
node.setmocktime(AXION_ACTIVATION_TIME)
node.generatetoaddress(6, address)
assert_equal(
node.getblockchaininfo()['mediantime'],
AXION_ACTIVATION_TIME)
# Assert the results of getblocktemplate have expected values. Keys not
# in 'expected' are not checked.
def assert_getblocktemplate(expected):
blockTemplate = node.getblocktemplate()
for key, value in expected.items():
assert_equal(blockTemplate[key], value)
def get_best_coinbase():
return node.getblock(node.getbestblockhash(), 2)['tx'][0]
coinbase = get_best_coinbase()
assert_equal(len(coinbase['vout']), 1)
block_reward = coinbase['vout'][0]['value']
# We don't need to test all fields in getblocktemplate since many of
# them are covered in mining_basic.py
assert_equal(node.getmempoolinfo()['size'], 0)
assert_getblocktemplate({
# Although the coinbase value need not necessarily be the same as
# the last block due to halvings and fees, we know this to be true
# since we are not crossing a halving boundary and there are no
# transactions in the mempool.
'coinbasevalue': block_reward * COIN,
})
# First block with the new rules
node.generatetoaddress(1, address)
# We expect the coinbase to have multiple outputs now
coinbase = get_best_coinbase()
assert_greater_than_or_equal(len(coinbase['vout']), 2)
total = Decimal()
for o in coinbase['vout']:
total += o['value']
assert_equal(total, block_reward)
assert_getblocktemplate({
# Again, we assume the coinbase value is the same as prior blocks.
'coinbasevalue': block_reward * COIN,
})
if __name__ == '__main__':
AbcMiningRPCTest().main()
|
<commit_before><commit_msg>Add a test for ABC-specific getblocktemplate behavior
Summary:
First step in T1014
As we modify behavior in getblocktemplate, we really need better test coverage.
D7201 (f8360b5e981) is one such change. This patch tests that behavior specifically
but this test can be expanded to cover other changes that we may have made or will
make in the future.
Test Plan:
Before D7201, this test should fail
```
git checkout f8360b5e981^
git cherry-pick this-patch
test_runner.py abc_mining_basic
```
But the test passes today:
```
git checkout this-patch
test_runner.py abc_mining_basic
test_runner.py --with-axionactivation abc_mining_basic
```
Reviewers: #bitcoin_abc, Fabien, deadalnix
Reviewed By: #bitcoin_abc, Fabien, deadalnix
Subscribers: Fabien
Differential Revision: https://reviews.bitcoinabc.org/D7230<commit_after>
|
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Tests for Bitcoin ABC mining RPCs
"""
from test_framework.messages import (
COIN,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
)
from decimal import Decimal
AXION_ACTIVATION_TIME = 2000000600
class AbcMiningRPCTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [[
'-enableminerfund',
'-axionactivationtime={}'.format(AXION_ACTIVATION_TIME),
], []]
def run_test(self):
node = self.nodes[0]
address = node.get_deterministic_priv_key().address
# Move MTP forward to axion activation
node.setmocktime(AXION_ACTIVATION_TIME)
node.generatetoaddress(6, address)
assert_equal(
node.getblockchaininfo()['mediantime'],
AXION_ACTIVATION_TIME)
# Assert the results of getblocktemplate have expected values. Keys not
# in 'expected' are not checked.
def assert_getblocktemplate(expected):
blockTemplate = node.getblocktemplate()
for key, value in expected.items():
assert_equal(blockTemplate[key], value)
def get_best_coinbase():
return node.getblock(node.getbestblockhash(), 2)['tx'][0]
coinbase = get_best_coinbase()
assert_equal(len(coinbase['vout']), 1)
block_reward = coinbase['vout'][0]['value']
# We don't need to test all fields in getblocktemplate since many of
# them are covered in mining_basic.py
assert_equal(node.getmempoolinfo()['size'], 0)
assert_getblocktemplate({
# Although the coinbase value need not necessarily be the same as
# the last block due to halvings and fees, we know this to be true
# since we are not crossing a halving boundary and there are no
# transactions in the mempool.
'coinbasevalue': block_reward * COIN,
})
# First block with the new rules
node.generatetoaddress(1, address)
# We expect the coinbase to have multiple outputs now
coinbase = get_best_coinbase()
assert_greater_than_or_equal(len(coinbase['vout']), 2)
total = Decimal()
for o in coinbase['vout']:
total += o['value']
assert_equal(total, block_reward)
assert_getblocktemplate({
# Again, we assume the coinbase value is the same as prior blocks.
'coinbasevalue': block_reward * COIN,
})
if __name__ == '__main__':
AbcMiningRPCTest().main()
|
Add a test for ABC-specific getblocktemplate behavior
Summary:
First step in T1014
As we modify behavior in getblocktemplate, we really need better test coverage.
D7201 (f8360b5e981) is one such change. This patch tests that behavior specifically
but this test can be expanded to cover other changes that we may have made or will
make in the future.
Test Plan:
Before D7201, this test should fail
```
git checkout f8360b5e981^
git cherry-pick this-patch
test_runner.py abc_mining_basic
```
But the test passes today:
```
git checkout this-patch
test_runner.py abc_mining_basic
test_runner.py --with-axionactivation abc_mining_basic
```
Reviewers: #bitcoin_abc, Fabien, deadalnix
Reviewed By: #bitcoin_abc, Fabien, deadalnix
Subscribers: Fabien
Differential Revision: https://reviews.bitcoinabc.org/D7230#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Tests for Bitcoin ABC mining RPCs
"""
from test_framework.messages import (
COIN,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
)
from decimal import Decimal
AXION_ACTIVATION_TIME = 2000000600
class AbcMiningRPCTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [[
'-enableminerfund',
'-axionactivationtime={}'.format(AXION_ACTIVATION_TIME),
], []]
def run_test(self):
node = self.nodes[0]
address = node.get_deterministic_priv_key().address
# Move MTP forward to axion activation
node.setmocktime(AXION_ACTIVATION_TIME)
node.generatetoaddress(6, address)
assert_equal(
node.getblockchaininfo()['mediantime'],
AXION_ACTIVATION_TIME)
# Assert the results of getblocktemplate have expected values. Keys not
# in 'expected' are not checked.
def assert_getblocktemplate(expected):
blockTemplate = node.getblocktemplate()
for key, value in expected.items():
assert_equal(blockTemplate[key], value)
def get_best_coinbase():
return node.getblock(node.getbestblockhash(), 2)['tx'][0]
coinbase = get_best_coinbase()
assert_equal(len(coinbase['vout']), 1)
block_reward = coinbase['vout'][0]['value']
# We don't need to test all fields in getblocktemplate since many of
# them are covered in mining_basic.py
assert_equal(node.getmempoolinfo()['size'], 0)
assert_getblocktemplate({
# Although the coinbase value need not necessarily be the same as
# the last block due to halvings and fees, we know this to be true
# since we are not crossing a halving boundary and there are no
# transactions in the mempool.
'coinbasevalue': block_reward * COIN,
})
# First block with the new rules
node.generatetoaddress(1, address)
# We expect the coinbase to have multiple outputs now
coinbase = get_best_coinbase()
assert_greater_than_or_equal(len(coinbase['vout']), 2)
total = Decimal()
for o in coinbase['vout']:
total += o['value']
assert_equal(total, block_reward)
assert_getblocktemplate({
# Again, we assume the coinbase value is the same as prior blocks.
'coinbasevalue': block_reward * COIN,
})
if __name__ == '__main__':
AbcMiningRPCTest().main()
|
<commit_before><commit_msg>Add a test for ABC-specific getblocktemplate behavior
Summary:
First step in T1014
As we modify behavior in getblocktemplate, we really need better test coverage.
D7201 (f8360b5e981) is one such change. This patch tests that behavior specifically
but this test can be expanded to cover other changes that we may have made or will
make in the future.
Test Plan:
Before D7201, this test should fail
```
git checkout f8360b5e981^
git cherry-pick this-patch
test_runner.py abc_mining_basic
```
But the test passes today:
```
git checkout this-patch
test_runner.py abc_mining_basic
test_runner.py --with-axionactivation abc_mining_basic
```
Reviewers: #bitcoin_abc, Fabien, deadalnix
Reviewed By: #bitcoin_abc, Fabien, deadalnix
Subscribers: Fabien
Differential Revision: https://reviews.bitcoinabc.org/D7230<commit_after>#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Tests for Bitcoin ABC mining RPCs
"""
from test_framework.messages import (
COIN,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
)
from decimal import Decimal
AXION_ACTIVATION_TIME = 2000000600
class AbcMiningRPCTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [[
'-enableminerfund',
'-axionactivationtime={}'.format(AXION_ACTIVATION_TIME),
], []]
def run_test(self):
node = self.nodes[0]
address = node.get_deterministic_priv_key().address
# Move MTP forward to axion activation
node.setmocktime(AXION_ACTIVATION_TIME)
node.generatetoaddress(6, address)
assert_equal(
node.getblockchaininfo()['mediantime'],
AXION_ACTIVATION_TIME)
# Assert the results of getblocktemplate have expected values. Keys not
# in 'expected' are not checked.
def assert_getblocktemplate(expected):
blockTemplate = node.getblocktemplate()
for key, value in expected.items():
assert_equal(blockTemplate[key], value)
def get_best_coinbase():
return node.getblock(node.getbestblockhash(), 2)['tx'][0]
coinbase = get_best_coinbase()
assert_equal(len(coinbase['vout']), 1)
block_reward = coinbase['vout'][0]['value']
# We don't need to test all fields in getblocktemplate since many of
# them are covered in mining_basic.py
assert_equal(node.getmempoolinfo()['size'], 0)
assert_getblocktemplate({
# Although the coinbase value need not necessarily be the same as
# the last block due to halvings and fees, we know this to be true
# since we are not crossing a halving boundary and there are no
# transactions in the mempool.
'coinbasevalue': block_reward * COIN,
})
# First block with the new rules
node.generatetoaddress(1, address)
# We expect the coinbase to have multiple outputs now
coinbase = get_best_coinbase()
assert_greater_than_or_equal(len(coinbase['vout']), 2)
total = Decimal()
for o in coinbase['vout']:
total += o['value']
assert_equal(total, block_reward)
assert_getblocktemplate({
# Again, we assume the coinbase value is the same as prior blocks.
'coinbasevalue': block_reward * COIN,
})
if __name__ == '__main__':
AbcMiningRPCTest().main()
|
|
01a27d35b6d14d6e5c59646442c22e0d1f98c0cf
|
examples/ultracoldNeutralPlasma.py
|
examples/ultracoldNeutralPlasma.py
|
import ucilib.Sim as Sim
import ucilib.BorisUpdater as BorisUpdater
import numpy as np
# Some helpful constants.
fund_charge = 1.602176565e-19
# Mass of Be^+ ions.
ion_mass = 8.9465 * 1.673e-27
# Create a simulation with n particles.
n = 10000
s = Sim.Sim()
s.ptcls.set_nptcls(n)
# 1/e radius of cloud.
s.ptcls.rmax = 2.0e-4
s.ptcls.init_ptcls(charge = fund_charge, mass = ion_mass)
# Turn the first n/2 particles into electrons by setting their mass and
# charge.
s.ptcls.q()[:(n/2)] = -fund_charge * np.ones(n/2)
s.ptcls.m()[:(n/2)] = 9.1e-31 * np.ones(n/2)
# Finally we set the updater.
s.updater = BorisUpdater.BorisUpdater(s.ctx, s.queue)
|
Add an example simulation setup for an ultracold neutral plasma.
|
Add an example simulation setup for an ultracold neutral plasma.
|
Python
|
mit
|
Tech-XCorp/ultracold-ions,Tech-XCorp/ultracold-ions,hosseinsadeghi/ultracold-ions,hosseinsadeghi/ultracold-ions
|
Add an example simulation setup for an ultracold neutral plasma.
|
import ucilib.Sim as Sim
import ucilib.BorisUpdater as BorisUpdater
import numpy as np
# Some helpful constants.
fund_charge = 1.602176565e-19
# Mass of Be^+ ions.
ion_mass = 8.9465 * 1.673e-27
# Create a simulation with n particles.
n = 10000
s = Sim.Sim()
s.ptcls.set_nptcls(n)
# 1/e radius of cloud.
s.ptcls.rmax = 2.0e-4
s.ptcls.init_ptcls(charge = fund_charge, mass = ion_mass)
# Turn the first n/2 particles into electrons by setting their mass and
# charge.
s.ptcls.q()[:(n/2)] = -fund_charge * np.ones(n/2)
s.ptcls.m()[:(n/2)] = 9.1e-31 * np.ones(n/2)
# Finally we set the updater.
s.updater = BorisUpdater.BorisUpdater(s.ctx, s.queue)
|
<commit_before><commit_msg>Add an example simulation setup for an ultracold neutral plasma.<commit_after>
|
import ucilib.Sim as Sim
import ucilib.BorisUpdater as BorisUpdater
import numpy as np
# Some helpful constants.
fund_charge = 1.602176565e-19
# Mass of Be^+ ions.
ion_mass = 8.9465 * 1.673e-27
# Create a simulation with n particles.
n = 10000
s = Sim.Sim()
s.ptcls.set_nptcls(n)
# 1/e radius of cloud.
s.ptcls.rmax = 2.0e-4
s.ptcls.init_ptcls(charge = fund_charge, mass = ion_mass)
# Turn the first n/2 particles into electrons by setting their mass and
# charge.
s.ptcls.q()[:(n/2)] = -fund_charge * np.ones(n/2)
s.ptcls.m()[:(n/2)] = 9.1e-31 * np.ones(n/2)
# Finally we set the updater.
s.updater = BorisUpdater.BorisUpdater(s.ctx, s.queue)
|
Add an example simulation setup for an ultracold neutral plasma.import ucilib.Sim as Sim
import ucilib.BorisUpdater as BorisUpdater
import numpy as np
# Some helpful constants.
fund_charge = 1.602176565e-19
# Mass of Be^+ ions.
ion_mass = 8.9465 * 1.673e-27
# Create a simulation with n particles.
n = 10000
s = Sim.Sim()
s.ptcls.set_nptcls(n)
# 1/e radius of cloud.
s.ptcls.rmax = 2.0e-4
s.ptcls.init_ptcls(charge = fund_charge, mass = ion_mass)
# Turn the first n/2 particles into electrons by setting their mass and
# charge.
s.ptcls.q()[:(n/2)] = -fund_charge * np.ones(n/2)
s.ptcls.m()[:(n/2)] = 9.1e-31 * np.ones(n/2)
# Finally we set the updater.
s.updater = BorisUpdater.BorisUpdater(s.ctx, s.queue)
|
<commit_before><commit_msg>Add an example simulation setup for an ultracold neutral plasma.<commit_after>import ucilib.Sim as Sim
import ucilib.BorisUpdater as BorisUpdater
import numpy as np
# Some helpful constants.
fund_charge = 1.602176565e-19
# Mass of Be^+ ions.
ion_mass = 8.9465 * 1.673e-27
# Create a simulation with n particles.
n = 10000
s = Sim.Sim()
s.ptcls.set_nptcls(n)
# 1/e radius of cloud.
s.ptcls.rmax = 2.0e-4
s.ptcls.init_ptcls(charge = fund_charge, mass = ion_mass)
# Turn the first n/2 particles into electrons by setting their mass and
# charge.
s.ptcls.q()[:(n/2)] = -fund_charge * np.ones(n/2)
s.ptcls.m()[:(n/2)] = 9.1e-31 * np.ones(n/2)
# Finally we set the updater.
s.updater = BorisUpdater.BorisUpdater(s.ctx, s.queue)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.