commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
41795bf65f6d834007c7f352fd079084f5ed940f
|
calc.py
|
calc.py
|
# -*- coding: utf-8 -*-
def add(x, y):
"""
引数xとyを加算した結果を返す
>>> add(2, 3)
5
"""
return x + y
|
Implement sample module and doctest
|
Implement sample module and doctest
|
Python
|
mit
|
raimon49/python-local-wheels-sample
|
Implement sample module and doctest
|
# -*- coding: utf-8 -*-
def add(x, y):
"""
引数xとyを加算した結果を返す
>>> add(2, 3)
5
"""
return x + y
|
<commit_before><commit_msg>Implement sample module and doctest<commit_after>
|
# -*- coding: utf-8 -*-
def add(x, y):
"""
引数xとyを加算した結果を返す
>>> add(2, 3)
5
"""
return x + y
|
Implement sample module and doctest# -*- coding: utf-8 -*-
def add(x, y):
"""
引数xとyを加算した結果を返す
>>> add(2, 3)
5
"""
return x + y
|
<commit_before><commit_msg>Implement sample module and doctest<commit_after># -*- coding: utf-8 -*-
def add(x, y):
"""
引数xとyを加算した結果を返す
>>> add(2, 3)
5
"""
return x + y
|
|
41ca7f51bc169dee1e371143615f2f0ae4880523
|
examples/defining_new_state_relation.py
|
examples/defining_new_state_relation.py
|
import numpy as np
import matplotlib.pyplot as plt
from math import log
from rsfmodel import rsf
# This is really just the Ruina realtion, but let's pretend we invented it!
# We'll inherit attributes from rsf.StateRelation, but you wouldn't have to.
# It does provide velocity contribution calcualtion for us though!
class MyStateRelation(rsf.StateRelation):
# Need to provide a steady state calcualtion method
def _set_steady_state(self, system):
self.state = self.Dc/system.vref
def evolve_state(self, system):
if self.state is None:
self.state = _set_steady_state(self, system)
return -1 * (system.v * self.state / self.Dc) * log(system.v * self.state / self.Dc)
model = rsf.Model()
# Set model initial conditions
model.mu0 = 0.6 # Friction initial (at the reference velocity)
model.a = 0.01 # Empirical coefficient for the direct effect
model.k = 1e-3 # Normalized System stiffness (friction/micron)
model.v = 1. # Initial slider velocity, generally is vlp(t=0)
model.vref = 1. # Reference velocity, generally vlp(t=0)
state1 = MyStateRelation(model)
state1.b = 0.005 # Empirical coefficient for the evolution effect
state1.Dc = 10. # Critical slip distance
model.state_relations = [state1] # Which state relation we want to use
# We want to solve for 40 seconds at 100Hz
model.time = np.arange(0, 40.01, 0.01)
# We want to slide at 1 um/s for 10 s, then at 10 um/s for 31
lp_velocity = np.ones_like(model.time)
lp_velocity[10*100:] = 10. # Velocity after 10 seconds is 10 um/s
# Set the model load point velocity, must be same shape as model.model_time
model.loadpoint_velocity = lp_velocity
# Run the model!
model.solve()
# Make the phase plot
rsf.phasePlot(model)
# Make a plot in displacement
rsf.dispPlot(model)
# Make a plot in time
rsf.timePlot(model)
|
Add example of how to define your own state relation
|
Add example of how to define your own state relation
|
Python
|
mit
|
jrleeman/rsfmodel
|
Add example of how to define your own state relation
|
import numpy as np
import matplotlib.pyplot as plt
from math import log
from rsfmodel import rsf
# This is really just the Ruina realtion, but let's pretend we invented it!
# We'll inherit attributes from rsf.StateRelation, but you wouldn't have to.
# It does provide velocity contribution calcualtion for us though!
class MyStateRelation(rsf.StateRelation):
# Need to provide a steady state calcualtion method
def _set_steady_state(self, system):
self.state = self.Dc/system.vref
def evolve_state(self, system):
if self.state is None:
self.state = _set_steady_state(self, system)
return -1 * (system.v * self.state / self.Dc) * log(system.v * self.state / self.Dc)
model = rsf.Model()
# Set model initial conditions
model.mu0 = 0.6 # Friction initial (at the reference velocity)
model.a = 0.01 # Empirical coefficient for the direct effect
model.k = 1e-3 # Normalized System stiffness (friction/micron)
model.v = 1. # Initial slider velocity, generally is vlp(t=0)
model.vref = 1. # Reference velocity, generally vlp(t=0)
state1 = MyStateRelation(model)
state1.b = 0.005 # Empirical coefficient for the evolution effect
state1.Dc = 10. # Critical slip distance
model.state_relations = [state1] # Which state relation we want to use
# We want to solve for 40 seconds at 100Hz
model.time = np.arange(0, 40.01, 0.01)
# We want to slide at 1 um/s for 10 s, then at 10 um/s for 31
lp_velocity = np.ones_like(model.time)
lp_velocity[10*100:] = 10. # Velocity after 10 seconds is 10 um/s
# Set the model load point velocity, must be same shape as model.model_time
model.loadpoint_velocity = lp_velocity
# Run the model!
model.solve()
# Make the phase plot
rsf.phasePlot(model)
# Make a plot in displacement
rsf.dispPlot(model)
# Make a plot in time
rsf.timePlot(model)
|
<commit_before><commit_msg>Add example of how to define your own state relation<commit_after>
|
import numpy as np
import matplotlib.pyplot as plt
from math import log
from rsfmodel import rsf
# This is really just the Ruina realtion, but let's pretend we invented it!
# We'll inherit attributes from rsf.StateRelation, but you wouldn't have to.
# It does provide velocity contribution calcualtion for us though!
class MyStateRelation(rsf.StateRelation):
# Need to provide a steady state calcualtion method
def _set_steady_state(self, system):
self.state = self.Dc/system.vref
def evolve_state(self, system):
if self.state is None:
self.state = _set_steady_state(self, system)
return -1 * (system.v * self.state / self.Dc) * log(system.v * self.state / self.Dc)
model = rsf.Model()
# Set model initial conditions
model.mu0 = 0.6 # Friction initial (at the reference velocity)
model.a = 0.01 # Empirical coefficient for the direct effect
model.k = 1e-3 # Normalized System stiffness (friction/micron)
model.v = 1. # Initial slider velocity, generally is vlp(t=0)
model.vref = 1. # Reference velocity, generally vlp(t=0)
state1 = MyStateRelation(model)
state1.b = 0.005 # Empirical coefficient for the evolution effect
state1.Dc = 10. # Critical slip distance
model.state_relations = [state1] # Which state relation we want to use
# We want to solve for 40 seconds at 100Hz
model.time = np.arange(0, 40.01, 0.01)
# We want to slide at 1 um/s for 10 s, then at 10 um/s for 31
lp_velocity = np.ones_like(model.time)
lp_velocity[10*100:] = 10. # Velocity after 10 seconds is 10 um/s
# Set the model load point velocity, must be same shape as model.model_time
model.loadpoint_velocity = lp_velocity
# Run the model!
model.solve()
# Make the phase plot
rsf.phasePlot(model)
# Make a plot in displacement
rsf.dispPlot(model)
# Make a plot in time
rsf.timePlot(model)
|
Add example of how to define your own state relationimport numpy as np
import matplotlib.pyplot as plt
from math import log
from rsfmodel import rsf
# This is really just the Ruina realtion, but let's pretend we invented it!
# We'll inherit attributes from rsf.StateRelation, but you wouldn't have to.
# It does provide velocity contribution calcualtion for us though!
class MyStateRelation(rsf.StateRelation):
# Need to provide a steady state calcualtion method
def _set_steady_state(self, system):
self.state = self.Dc/system.vref
def evolve_state(self, system):
if self.state is None:
self.state = _set_steady_state(self, system)
return -1 * (system.v * self.state / self.Dc) * log(system.v * self.state / self.Dc)
model = rsf.Model()
# Set model initial conditions
model.mu0 = 0.6 # Friction initial (at the reference velocity)
model.a = 0.01 # Empirical coefficient for the direct effect
model.k = 1e-3 # Normalized System stiffness (friction/micron)
model.v = 1. # Initial slider velocity, generally is vlp(t=0)
model.vref = 1. # Reference velocity, generally vlp(t=0)
state1 = MyStateRelation(model)
state1.b = 0.005 # Empirical coefficient for the evolution effect
state1.Dc = 10. # Critical slip distance
model.state_relations = [state1] # Which state relation we want to use
# We want to solve for 40 seconds at 100Hz
model.time = np.arange(0, 40.01, 0.01)
# We want to slide at 1 um/s for 10 s, then at 10 um/s for 31
lp_velocity = np.ones_like(model.time)
lp_velocity[10*100:] = 10. # Velocity after 10 seconds is 10 um/s
# Set the model load point velocity, must be same shape as model.model_time
model.loadpoint_velocity = lp_velocity
# Run the model!
model.solve()
# Make the phase plot
rsf.phasePlot(model)
# Make a plot in displacement
rsf.dispPlot(model)
# Make a plot in time
rsf.timePlot(model)
|
<commit_before><commit_msg>Add example of how to define your own state relation<commit_after>import numpy as np
import matplotlib.pyplot as plt
from math import log
from rsfmodel import rsf
# This is really just the Ruina realtion, but let's pretend we invented it!
# We'll inherit attributes from rsf.StateRelation, but you wouldn't have to.
# It does provide velocity contribution calcualtion for us though!
class MyStateRelation(rsf.StateRelation):
# Need to provide a steady state calcualtion method
def _set_steady_state(self, system):
self.state = self.Dc/system.vref
def evolve_state(self, system):
if self.state is None:
self.state = _set_steady_state(self, system)
return -1 * (system.v * self.state / self.Dc) * log(system.v * self.state / self.Dc)
model = rsf.Model()
# Set model initial conditions
model.mu0 = 0.6 # Friction initial (at the reference velocity)
model.a = 0.01 # Empirical coefficient for the direct effect
model.k = 1e-3 # Normalized System stiffness (friction/micron)
model.v = 1. # Initial slider velocity, generally is vlp(t=0)
model.vref = 1. # Reference velocity, generally vlp(t=0)
state1 = MyStateRelation(model)
state1.b = 0.005 # Empirical coefficient for the evolution effect
state1.Dc = 10. # Critical slip distance
model.state_relations = [state1] # Which state relation we want to use
# We want to solve for 40 seconds at 100Hz
model.time = np.arange(0, 40.01, 0.01)
# We want to slide at 1 um/s for 10 s, then at 10 um/s for 31
lp_velocity = np.ones_like(model.time)
lp_velocity[10*100:] = 10. # Velocity after 10 seconds is 10 um/s
# Set the model load point velocity, must be same shape as model.model_time
model.loadpoint_velocity = lp_velocity
# Run the model!
model.solve()
# Make the phase plot
rsf.phasePlot(model)
# Make a plot in displacement
rsf.dispPlot(model)
# Make a plot in time
rsf.timePlot(model)
|
|
d8491825d38b6b9b393723467fd50c41be8e610f
|
bluebottle/events/migrations/0015_auto_20200226_0838.py
|
bluebottle/events/migrations/0015_auto_20200226_0838.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2020-02-26 07:38
from __future__ import unicode_literals
from datetime import datetime
from timezonefinder import TimezoneFinder
import pytz
from django.db import migrations
from django.utils import timezone
tf = TimezoneFinder()
def set_timezone(apps, schema_editor):
Event = apps.get_model('events', 'Event')
for event in Event.objects.filter(start__isnull=False, location__isnull=False):
tz_name = tf.timezone_at(
lng=event.location.position.x,
lat=event.location.position.y
)
tz = pytz.timezone(tz_name)
start = event.start.astimezone(timezone.get_current_timezone())
event.start = tz.localize(
datetime(
start.year,
start.month,
start.day,
start.hour,
start.minute,
)
)
event.save()
class Migration(migrations.Migration):
dependencies = [
('events', '0014_auto_20200217_1107'),
]
operations = [
migrations.RunPython(set_timezone)
]
|
Add migration to set start time to local timzeone
|
Add migration to set start time to local timzeone
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
Add migration to set start time to local timzeone
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2020-02-26 07:38
from __future__ import unicode_literals
from datetime import datetime
from timezonefinder import TimezoneFinder
import pytz
from django.db import migrations
from django.utils import timezone
tf = TimezoneFinder()
def set_timezone(apps, schema_editor):
Event = apps.get_model('events', 'Event')
for event in Event.objects.filter(start__isnull=False, location__isnull=False):
tz_name = tf.timezone_at(
lng=event.location.position.x,
lat=event.location.position.y
)
tz = pytz.timezone(tz_name)
start = event.start.astimezone(timezone.get_current_timezone())
event.start = tz.localize(
datetime(
start.year,
start.month,
start.day,
start.hour,
start.minute,
)
)
event.save()
class Migration(migrations.Migration):
dependencies = [
('events', '0014_auto_20200217_1107'),
]
operations = [
migrations.RunPython(set_timezone)
]
|
<commit_before><commit_msg>Add migration to set start time to local timzeone<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2020-02-26 07:38
from __future__ import unicode_literals
from datetime import datetime
from timezonefinder import TimezoneFinder
import pytz
from django.db import migrations
from django.utils import timezone
tf = TimezoneFinder()
def set_timezone(apps, schema_editor):
Event = apps.get_model('events', 'Event')
for event in Event.objects.filter(start__isnull=False, location__isnull=False):
tz_name = tf.timezone_at(
lng=event.location.position.x,
lat=event.location.position.y
)
tz = pytz.timezone(tz_name)
start = event.start.astimezone(timezone.get_current_timezone())
event.start = tz.localize(
datetime(
start.year,
start.month,
start.day,
start.hour,
start.minute,
)
)
event.save()
class Migration(migrations.Migration):
dependencies = [
('events', '0014_auto_20200217_1107'),
]
operations = [
migrations.RunPython(set_timezone)
]
|
Add migration to set start time to local timzeone# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2020-02-26 07:38
from __future__ import unicode_literals
from datetime import datetime
from timezonefinder import TimezoneFinder
import pytz
from django.db import migrations
from django.utils import timezone
tf = TimezoneFinder()
def set_timezone(apps, schema_editor):
Event = apps.get_model('events', 'Event')
for event in Event.objects.filter(start__isnull=False, location__isnull=False):
tz_name = tf.timezone_at(
lng=event.location.position.x,
lat=event.location.position.y
)
tz = pytz.timezone(tz_name)
start = event.start.astimezone(timezone.get_current_timezone())
event.start = tz.localize(
datetime(
start.year,
start.month,
start.day,
start.hour,
start.minute,
)
)
event.save()
class Migration(migrations.Migration):
dependencies = [
('events', '0014_auto_20200217_1107'),
]
operations = [
migrations.RunPython(set_timezone)
]
|
<commit_before><commit_msg>Add migration to set start time to local timzeone<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2020-02-26 07:38
from __future__ import unicode_literals
from datetime import datetime
from timezonefinder import TimezoneFinder
import pytz
from django.db import migrations
from django.utils import timezone
tf = TimezoneFinder()
def set_timezone(apps, schema_editor):
Event = apps.get_model('events', 'Event')
for event in Event.objects.filter(start__isnull=False, location__isnull=False):
tz_name = tf.timezone_at(
lng=event.location.position.x,
lat=event.location.position.y
)
tz = pytz.timezone(tz_name)
start = event.start.astimezone(timezone.get_current_timezone())
event.start = tz.localize(
datetime(
start.year,
start.month,
start.day,
start.hour,
start.minute,
)
)
event.save()
class Migration(migrations.Migration):
dependencies = [
('events', '0014_auto_20200217_1107'),
]
operations = [
migrations.RunPython(set_timezone)
]
|
|
50e50b3d3b98b9a3222194a8d3797ec9a9c91551
|
python-omega-client/sample_usage.py
|
python-omega-client/sample_usage.py
|
# coding: utf-8
from omega_client import OmegaClient
clt = OmegaClient('http://offlineforward.dataman-inc.net', 'admin@shurenyun.com', 'Dataman1234')
print clt.get_clusters()
print clt.get_cluster(630)
print clt.get_node_identifier(630)
print clt.post_nodes(630, id='83ec44c13e2a482aa4645713d3857ff6', name='test_node')
print clt.get_apps(630)
print clt.get_app(627, 750)
|
Add one sample usage file.
|
Add one sample usage file.
|
Python
|
apache-2.0
|
Dataman-Cloud/omega-client
|
Add one sample usage file.
|
# coding: utf-8
from omega_client import OmegaClient
clt = OmegaClient('http://offlineforward.dataman-inc.net', 'admin@shurenyun.com', 'Dataman1234')
print clt.get_clusters()
print clt.get_cluster(630)
print clt.get_node_identifier(630)
print clt.post_nodes(630, id='83ec44c13e2a482aa4645713d3857ff6', name='test_node')
print clt.get_apps(630)
print clt.get_app(627, 750)
|
<commit_before><commit_msg>Add one sample usage file.<commit_after>
|
# coding: utf-8
from omega_client import OmegaClient
clt = OmegaClient('http://offlineforward.dataman-inc.net', 'admin@shurenyun.com', 'Dataman1234')
print clt.get_clusters()
print clt.get_cluster(630)
print clt.get_node_identifier(630)
print clt.post_nodes(630, id='83ec44c13e2a482aa4645713d3857ff6', name='test_node')
print clt.get_apps(630)
print clt.get_app(627, 750)
|
Add one sample usage file.# coding: utf-8
from omega_client import OmegaClient
clt = OmegaClient('http://offlineforward.dataman-inc.net', 'admin@shurenyun.com', 'Dataman1234')
print clt.get_clusters()
print clt.get_cluster(630)
print clt.get_node_identifier(630)
print clt.post_nodes(630, id='83ec44c13e2a482aa4645713d3857ff6', name='test_node')
print clt.get_apps(630)
print clt.get_app(627, 750)
|
<commit_before><commit_msg>Add one sample usage file.<commit_after># coding: utf-8
from omega_client import OmegaClient
clt = OmegaClient('http://offlineforward.dataman-inc.net', 'admin@shurenyun.com', 'Dataman1234')
print clt.get_clusters()
print clt.get_cluster(630)
print clt.get_node_identifier(630)
print clt.post_nodes(630, id='83ec44c13e2a482aa4645713d3857ff6', name='test_node')
print clt.get_apps(630)
print clt.get_app(627, 750)
|
|
9029ebbefa019c462d8bf7228517c7767d221e46
|
tests/test_feeds.py
|
tests/test_feeds.py
|
import pytest
from django.core.urlresolvers import reverse
from name.models import Name, Location
pytestmark = pytest.mark.django_db
def test_feed_has_georss_namespace(client):
response = client.get(reverse('name_feed'))
assert 'xmlns:georss' in response.content
def test_feed_response_is_application_xml(client):
response = client.get(reverse('name_feed'))
assert response['Content-Type'] == 'application/xml'
def test_feed_item_has_location(client):
name = Name.objects.create(name="Test", name_type=0)
Location.objects.create(
status=0,
latitude=33.210241,
longitude=-97.148857,
belong_to_name=name)
response = client.get(reverse('name_feed'))
assert name.location_set.current_location.geo_point() in response.content
|
Add tests for additional functionality provided by NameAtomFeedType.
|
Add tests for additional functionality provided by NameAtomFeedType.
|
Python
|
bsd-3-clause
|
unt-libraries/django-name,damonkelley/django-name,unt-libraries/django-name,unt-libraries/django-name,damonkelley/django-name,damonkelley/django-name
|
Add tests for additional functionality provided by NameAtomFeedType.
|
import pytest
from django.core.urlresolvers import reverse
from name.models import Name, Location
pytestmark = pytest.mark.django_db
def test_feed_has_georss_namespace(client):
response = client.get(reverse('name_feed'))
assert 'xmlns:georss' in response.content
def test_feed_response_is_application_xml(client):
response = client.get(reverse('name_feed'))
assert response['Content-Type'] == 'application/xml'
def test_feed_item_has_location(client):
name = Name.objects.create(name="Test", name_type=0)
Location.objects.create(
status=0,
latitude=33.210241,
longitude=-97.148857,
belong_to_name=name)
response = client.get(reverse('name_feed'))
assert name.location_set.current_location.geo_point() in response.content
|
<commit_before><commit_msg>Add tests for additional functionality provided by NameAtomFeedType.<commit_after>
|
import pytest
from django.core.urlresolvers import reverse
from name.models import Name, Location
pytestmark = pytest.mark.django_db
def test_feed_has_georss_namespace(client):
response = client.get(reverse('name_feed'))
assert 'xmlns:georss' in response.content
def test_feed_response_is_application_xml(client):
response = client.get(reverse('name_feed'))
assert response['Content-Type'] == 'application/xml'
def test_feed_item_has_location(client):
name = Name.objects.create(name="Test", name_type=0)
Location.objects.create(
status=0,
latitude=33.210241,
longitude=-97.148857,
belong_to_name=name)
response = client.get(reverse('name_feed'))
assert name.location_set.current_location.geo_point() in response.content
|
Add tests for additional functionality provided by NameAtomFeedType.import pytest
from django.core.urlresolvers import reverse
from name.models import Name, Location
pytestmark = pytest.mark.django_db
def test_feed_has_georss_namespace(client):
response = client.get(reverse('name_feed'))
assert 'xmlns:georss' in response.content
def test_feed_response_is_application_xml(client):
response = client.get(reverse('name_feed'))
assert response['Content-Type'] == 'application/xml'
def test_feed_item_has_location(client):
name = Name.objects.create(name="Test", name_type=0)
Location.objects.create(
status=0,
latitude=33.210241,
longitude=-97.148857,
belong_to_name=name)
response = client.get(reverse('name_feed'))
assert name.location_set.current_location.geo_point() in response.content
|
<commit_before><commit_msg>Add tests for additional functionality provided by NameAtomFeedType.<commit_after>import pytest
from django.core.urlresolvers import reverse
from name.models import Name, Location
pytestmark = pytest.mark.django_db
def test_feed_has_georss_namespace(client):
response = client.get(reverse('name_feed'))
assert 'xmlns:georss' in response.content
def test_feed_response_is_application_xml(client):
response = client.get(reverse('name_feed'))
assert response['Content-Type'] == 'application/xml'
def test_feed_item_has_location(client):
name = Name.objects.create(name="Test", name_type=0)
Location.objects.create(
status=0,
latitude=33.210241,
longitude=-97.148857,
belong_to_name=name)
response = client.get(reverse('name_feed'))
assert name.location_set.current_location.geo_point() in response.content
|
|
c78c4b4bd56453fe1f3a7db71222c12336c2dcf5
|
future/tests/test_str_is_unicode.py
|
future/tests/test_str_is_unicode.py
|
from __future__ import absolute_import
from future import str_is_unicode
import unittest
class TestIterators(unittest.TestCase):
def test_str(self):
self.assertIsNot(str, bytes) # Py2: assertIsNot only in 2.7
self.assertEqual(str('blah'), u'blah') # Py3.3 and Py2 only
unittest.main()
|
Add tests for str_is_unicode module
|
Add tests for str_is_unicode module
|
Python
|
mit
|
michaelpacer/python-future,michaelpacer/python-future,krischer/python-future,QuLogic/python-future,QuLogic/python-future,PythonCharmers/python-future,PythonCharmers/python-future,krischer/python-future
|
Add tests for str_is_unicode module
|
from __future__ import absolute_import
from future import str_is_unicode
import unittest
class TestIterators(unittest.TestCase):
def test_str(self):
self.assertIsNot(str, bytes) # Py2: assertIsNot only in 2.7
self.assertEqual(str('blah'), u'blah') # Py3.3 and Py2 only
unittest.main()
|
<commit_before><commit_msg>Add tests for str_is_unicode module<commit_after>
|
from __future__ import absolute_import
from future import str_is_unicode
import unittest
class TestIterators(unittest.TestCase):
def test_str(self):
self.assertIsNot(str, bytes) # Py2: assertIsNot only in 2.7
self.assertEqual(str('blah'), u'blah') # Py3.3 and Py2 only
unittest.main()
|
Add tests for str_is_unicode modulefrom __future__ import absolute_import
from future import str_is_unicode
import unittest
class TestIterators(unittest.TestCase):
def test_str(self):
self.assertIsNot(str, bytes) # Py2: assertIsNot only in 2.7
self.assertEqual(str('blah'), u'blah') # Py3.3 and Py2 only
unittest.main()
|
<commit_before><commit_msg>Add tests for str_is_unicode module<commit_after>from __future__ import absolute_import
from future import str_is_unicode
import unittest
class TestIterators(unittest.TestCase):
def test_str(self):
self.assertIsNot(str, bytes) # Py2: assertIsNot only in 2.7
self.assertEqual(str('blah'), u'blah') # Py3.3 and Py2 only
unittest.main()
|
|
8bf20ed375dba6caef2095f175863c2953daa67e
|
tests/utils_test.py
|
tests/utils_test.py
|
import datetime
import json
import unittest
from clippings.utils import DatetimeJSONEncoder
DATE = datetime.datetime(2016, 1, 2, 3, 4, 5)
DATE_STRING = "2016-01-02T03:04:05"
class DatetimeJSONEncoderTest(unittest.TestCase):
def test_datetime_encoder_format(self):
dictionary = {"now": DATE}
expected_json_string = json.dumps({"now": DATE_STRING})
json_string = json.dumps(dictionary, cls=DatetimeJSONEncoder)
self.assertEqual(expected_json_string, json_string)
def test_datetime_encoder_typeerror(self):
undumpable_dictionary = {"set": set()}
# Ensure we let the parent raise TypeError
with self.assertRaises(TypeError):
json_string = json.dumps(undumpable_dictionary, cls=DatetimeJSONEncoder)
|
Add basic test for DatetimeJSONEncoder
|
Add basic test for DatetimeJSONEncoder
|
Python
|
mit
|
samueldg/clippings
|
Add basic test for DatetimeJSONEncoder
|
import datetime
import json
import unittest
from clippings.utils import DatetimeJSONEncoder
DATE = datetime.datetime(2016, 1, 2, 3, 4, 5)
DATE_STRING = "2016-01-02T03:04:05"
class DatetimeJSONEncoderTest(unittest.TestCase):
def test_datetime_encoder_format(self):
dictionary = {"now": DATE}
expected_json_string = json.dumps({"now": DATE_STRING})
json_string = json.dumps(dictionary, cls=DatetimeJSONEncoder)
self.assertEqual(expected_json_string, json_string)
def test_datetime_encoder_typeerror(self):
undumpable_dictionary = {"set": set()}
# Ensure we let the parent raise TypeError
with self.assertRaises(TypeError):
json_string = json.dumps(undumpable_dictionary, cls=DatetimeJSONEncoder)
|
<commit_before><commit_msg>Add basic test for DatetimeJSONEncoder<commit_after>
|
import datetime
import json
import unittest
from clippings.utils import DatetimeJSONEncoder
DATE = datetime.datetime(2016, 1, 2, 3, 4, 5)
DATE_STRING = "2016-01-02T03:04:05"
class DatetimeJSONEncoderTest(unittest.TestCase):
def test_datetime_encoder_format(self):
dictionary = {"now": DATE}
expected_json_string = json.dumps({"now": DATE_STRING})
json_string = json.dumps(dictionary, cls=DatetimeJSONEncoder)
self.assertEqual(expected_json_string, json_string)
def test_datetime_encoder_typeerror(self):
undumpable_dictionary = {"set": set()}
# Ensure we let the parent raise TypeError
with self.assertRaises(TypeError):
json_string = json.dumps(undumpable_dictionary, cls=DatetimeJSONEncoder)
|
Add basic test for DatetimeJSONEncoderimport datetime
import json
import unittest
from clippings.utils import DatetimeJSONEncoder
DATE = datetime.datetime(2016, 1, 2, 3, 4, 5)
DATE_STRING = "2016-01-02T03:04:05"
class DatetimeJSONEncoderTest(unittest.TestCase):
def test_datetime_encoder_format(self):
dictionary = {"now": DATE}
expected_json_string = json.dumps({"now": DATE_STRING})
json_string = json.dumps(dictionary, cls=DatetimeJSONEncoder)
self.assertEqual(expected_json_string, json_string)
def test_datetime_encoder_typeerror(self):
undumpable_dictionary = {"set": set()}
# Ensure we let the parent raise TypeError
with self.assertRaises(TypeError):
json_string = json.dumps(undumpable_dictionary, cls=DatetimeJSONEncoder)
|
<commit_before><commit_msg>Add basic test for DatetimeJSONEncoder<commit_after>import datetime
import json
import unittest
from clippings.utils import DatetimeJSONEncoder
DATE = datetime.datetime(2016, 1, 2, 3, 4, 5)
DATE_STRING = "2016-01-02T03:04:05"
class DatetimeJSONEncoderTest(unittest.TestCase):
def test_datetime_encoder_format(self):
dictionary = {"now": DATE}
expected_json_string = json.dumps({"now": DATE_STRING})
json_string = json.dumps(dictionary, cls=DatetimeJSONEncoder)
self.assertEqual(expected_json_string, json_string)
def test_datetime_encoder_typeerror(self):
undumpable_dictionary = {"set": set()}
# Ensure we let the parent raise TypeError
with self.assertRaises(TypeError):
json_string = json.dumps(undumpable_dictionary, cls=DatetimeJSONEncoder)
|
|
6e718a103c1a820125a50cd80a67fac6c810aa87
|
CodeFights/isSumConsecutive2.py
|
CodeFights/isSumConsecutive2.py
|
#!/usr/local/bin/python
# Code Fights Is Sum Consecutive 2 Problem
def isSumConsecutive2(n):
count = 0
nums = list(range(1, n))
for i in range(len(nums)):
for j in range(i + 1, len(nums)):
tmp = sum(nums[i:j])
if tmp == n:
count += 1
if tmp > n:
break
return count
def main():
tests = [
# [9, 2],
# [8, 0],
[15, 3]
]
for t in tests:
res = isSumConsecutive2(t[0])
ans = t[1]
if ans == res:
print("PASSED: isSumConsecutive2({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isSumConsecutive2({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights is sum consecutive 2 problem
|
Solve Code Fights is sum consecutive 2 problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights is sum consecutive 2 problem
|
#!/usr/local/bin/python
# Code Fights Is Sum Consecutive 2 Problem
def isSumConsecutive2(n):
count = 0
nums = list(range(1, n))
for i in range(len(nums)):
for j in range(i + 1, len(nums)):
tmp = sum(nums[i:j])
if tmp == n:
count += 1
if tmp > n:
break
return count
def main():
tests = [
# [9, 2],
# [8, 0],
[15, 3]
]
for t in tests:
res = isSumConsecutive2(t[0])
ans = t[1]
if ans == res:
print("PASSED: isSumConsecutive2({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isSumConsecutive2({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights is sum consecutive 2 problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Is Sum Consecutive 2 Problem
def isSumConsecutive2(n):
count = 0
nums = list(range(1, n))
for i in range(len(nums)):
for j in range(i + 1, len(nums)):
tmp = sum(nums[i:j])
if tmp == n:
count += 1
if tmp > n:
break
return count
def main():
tests = [
# [9, 2],
# [8, 0],
[15, 3]
]
for t in tests:
res = isSumConsecutive2(t[0])
ans = t[1]
if ans == res:
print("PASSED: isSumConsecutive2({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isSumConsecutive2({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights is sum consecutive 2 problem#!/usr/local/bin/python
# Code Fights Is Sum Consecutive 2 Problem
def isSumConsecutive2(n):
count = 0
nums = list(range(1, n))
for i in range(len(nums)):
for j in range(i + 1, len(nums)):
tmp = sum(nums[i:j])
if tmp == n:
count += 1
if tmp > n:
break
return count
def main():
tests = [
# [9, 2],
# [8, 0],
[15, 3]
]
for t in tests:
res = isSumConsecutive2(t[0])
ans = t[1]
if ans == res:
print("PASSED: isSumConsecutive2({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isSumConsecutive2({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights is sum consecutive 2 problem<commit_after>#!/usr/local/bin/python
# Code Fights Is Sum Consecutive 2 Problem
def isSumConsecutive2(n):
count = 0
nums = list(range(1, n))
for i in range(len(nums)):
for j in range(i + 1, len(nums)):
tmp = sum(nums[i:j])
if tmp == n:
count += 1
if tmp > n:
break
return count
def main():
tests = [
# [9, 2],
# [8, 0],
[15, 3]
]
for t in tests:
res = isSumConsecutive2(t[0])
ans = t[1]
if ans == res:
print("PASSED: isSumConsecutive2({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isSumConsecutive2({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
|
178f3d84310c7c6caabb93260c962e0663713b87
|
st2common/tests/unit/test_util_compact.py
|
st2common/tests/unit/test_util_compact.py
|
# -*- coding: utf-8 -*-
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common.util.compat import to_ascii
__all__ = [
'CompatUtilsTestCase'
]
class CompatUtilsTestCase(unittest2.TestCase):
def test_to_ascii(self):
expected_values = [
('already ascii', 'already ascii'),
(u'foo', 'foo'),
('٩(̾●̮̮̃̾•̃̾)۶', '()'),
('\xd9\xa9', '')
]
for input_value, expected_value in expected_values:
result = to_ascii(input_value)
self.assertEqual(result, expected_value)
|
Add tests for to_ascii function.
|
Add tests for to_ascii function.
|
Python
|
apache-2.0
|
Plexxi/st2,StackStorm/st2,StackStorm/st2,nzlosh/st2,nzlosh/st2,Plexxi/st2,nzlosh/st2,Plexxi/st2,nzlosh/st2,Plexxi/st2,StackStorm/st2,StackStorm/st2
|
Add tests for to_ascii function.
|
# -*- coding: utf-8 -*-
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common.util.compat import to_ascii
__all__ = [
'CompatUtilsTestCase'
]
class CompatUtilsTestCase(unittest2.TestCase):
def test_to_ascii(self):
expected_values = [
('already ascii', 'already ascii'),
(u'foo', 'foo'),
('٩(̾●̮̮̃̾•̃̾)۶', '()'),
('\xd9\xa9', '')
]
for input_value, expected_value in expected_values:
result = to_ascii(input_value)
self.assertEqual(result, expected_value)
|
<commit_before><commit_msg>Add tests for to_ascii function.<commit_after>
|
# -*- coding: utf-8 -*-
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common.util.compat import to_ascii
__all__ = [
'CompatUtilsTestCase'
]
class CompatUtilsTestCase(unittest2.TestCase):
def test_to_ascii(self):
expected_values = [
('already ascii', 'already ascii'),
(u'foo', 'foo'),
('٩(̾●̮̮̃̾•̃̾)۶', '()'),
('\xd9\xa9', '')
]
for input_value, expected_value in expected_values:
result = to_ascii(input_value)
self.assertEqual(result, expected_value)
|
Add tests for to_ascii function.# -*- coding: utf-8 -*-
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common.util.compat import to_ascii
__all__ = [
'CompatUtilsTestCase'
]
class CompatUtilsTestCase(unittest2.TestCase):
def test_to_ascii(self):
expected_values = [
('already ascii', 'already ascii'),
(u'foo', 'foo'),
('٩(̾●̮̮̃̾•̃̾)۶', '()'),
('\xd9\xa9', '')
]
for input_value, expected_value in expected_values:
result = to_ascii(input_value)
self.assertEqual(result, expected_value)
|
<commit_before><commit_msg>Add tests for to_ascii function.<commit_after># -*- coding: utf-8 -*-
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common.util.compat import to_ascii
__all__ = [
'CompatUtilsTestCase'
]
class CompatUtilsTestCase(unittest2.TestCase):
def test_to_ascii(self):
expected_values = [
('already ascii', 'already ascii'),
(u'foo', 'foo'),
('٩(̾●̮̮̃̾•̃̾)۶', '()'),
('\xd9\xa9', '')
]
for input_value, expected_value in expected_values:
result = to_ascii(input_value)
self.assertEqual(result, expected_value)
|
|
332c99e06505e084fb094c43ffd01ff58d53366b
|
utcdatetime/tests/test_time.py
|
utcdatetime/tests/test_time.py
|
import utcdatetime
from nose.tools import assert_equal
import datetime
TEST_CASES = [
(
utcdatetime.utcdatetime(2015, 5, 11, 16, 43, 10, 45),
datetime.time(16, 43, 10, 45)
),
]
def test_time_method():
for utc_dt, expected_time in TEST_CASES:
yield _assert_time_equals, utc_dt, expected_time
def _assert_time_equals(utc_dt, expected_time):
got = utc_dt.time()
assert_equal(expected_time, got)
|
Add test for utcdatetime.time() method
|
Add test for utcdatetime.time() method
|
Python
|
mit
|
paulfurley/python-utcdatetime,paulfurley/python-utcdatetime
|
Add test for utcdatetime.time() method
|
import utcdatetime
from nose.tools import assert_equal
import datetime
TEST_CASES = [
(
utcdatetime.utcdatetime(2015, 5, 11, 16, 43, 10, 45),
datetime.time(16, 43, 10, 45)
),
]
def test_time_method():
for utc_dt, expected_time in TEST_CASES:
yield _assert_time_equals, utc_dt, expected_time
def _assert_time_equals(utc_dt, expected_time):
got = utc_dt.time()
assert_equal(expected_time, got)
|
<commit_before><commit_msg>Add test for utcdatetime.time() method<commit_after>
|
import utcdatetime
from nose.tools import assert_equal
import datetime
TEST_CASES = [
(
utcdatetime.utcdatetime(2015, 5, 11, 16, 43, 10, 45),
datetime.time(16, 43, 10, 45)
),
]
def test_time_method():
for utc_dt, expected_time in TEST_CASES:
yield _assert_time_equals, utc_dt, expected_time
def _assert_time_equals(utc_dt, expected_time):
got = utc_dt.time()
assert_equal(expected_time, got)
|
Add test for utcdatetime.time() methodimport utcdatetime
from nose.tools import assert_equal
import datetime
TEST_CASES = [
(
utcdatetime.utcdatetime(2015, 5, 11, 16, 43, 10, 45),
datetime.time(16, 43, 10, 45)
),
]
def test_time_method():
for utc_dt, expected_time in TEST_CASES:
yield _assert_time_equals, utc_dt, expected_time
def _assert_time_equals(utc_dt, expected_time):
got = utc_dt.time()
assert_equal(expected_time, got)
|
<commit_before><commit_msg>Add test for utcdatetime.time() method<commit_after>import utcdatetime
from nose.tools import assert_equal
import datetime
TEST_CASES = [
(
utcdatetime.utcdatetime(2015, 5, 11, 16, 43, 10, 45),
datetime.time(16, 43, 10, 45)
),
]
def test_time_method():
for utc_dt, expected_time in TEST_CASES:
yield _assert_time_equals, utc_dt, expected_time
def _assert_time_equals(utc_dt, expected_time):
got = utc_dt.time()
assert_equal(expected_time, got)
|
|
4dd04ac5e74c1eaa341f4360a9d931ac6308288f
|
bin/cover_title.py
|
bin/cover_title.py
|
#!/usr/bin/env python
import sys
import os
import yaml
import re
file = sys.argv[1]
metafile = re.sub("-.*$", ".yml", file)
project = os.environ["PROJECT"]
metadata = open(metafile, 'r').read()
yamldata = yaml.load(metadata)
if "title" in yamldata:
title = yamldata["title"]
else:
title = "ERROR: No meta Data"
words = title.split(" ")
max = 0
for word in words:
len = word.__len__()
if len > max:
max = len
lines = [ "" ]
for word in words:
if lines[-1].__len__() == 0:
lines[-1] = word
elif lines[-1].__len__() + word.__len__() <= max + 2:
lines[-1] += " " + word
else:
lines.append(word)
for line in lines:
print(line)
|
Add script for breaking titles into lines
|
Add script for breaking titles into lines
|
Python
|
agpl-3.0
|
alerque/casile,alerque/casile,alerque/casile,alerque/casile,alerque/casile
|
Add script for breaking titles into lines
|
#!/usr/bin/env python
import sys
import os
import yaml
import re
file = sys.argv[1]
metafile = re.sub("-.*$", ".yml", file)
project = os.environ["PROJECT"]
metadata = open(metafile, 'r').read()
yamldata = yaml.load(metadata)
if "title" in yamldata:
title = yamldata["title"]
else:
title = "ERROR: No meta Data"
words = title.split(" ")
max = 0
for word in words:
len = word.__len__()
if len > max:
max = len
lines = [ "" ]
for word in words:
if lines[-1].__len__() == 0:
lines[-1] = word
elif lines[-1].__len__() + word.__len__() <= max + 2:
lines[-1] += " " + word
else:
lines.append(word)
for line in lines:
print(line)
|
<commit_before><commit_msg>Add script for breaking titles into lines<commit_after>
|
#!/usr/bin/env python
import sys
import os
import yaml
import re
file = sys.argv[1]
metafile = re.sub("-.*$", ".yml", file)
project = os.environ["PROJECT"]
metadata = open(metafile, 'r').read()
yamldata = yaml.load(metadata)
if "title" in yamldata:
title = yamldata["title"]
else:
title = "ERROR: No meta Data"
words = title.split(" ")
max = 0
for word in words:
len = word.__len__()
if len > max:
max = len
lines = [ "" ]
for word in words:
if lines[-1].__len__() == 0:
lines[-1] = word
elif lines[-1].__len__() + word.__len__() <= max + 2:
lines[-1] += " " + word
else:
lines.append(word)
for line in lines:
print(line)
|
Add script for breaking titles into lines#!/usr/bin/env python
import sys
import os
import yaml
import re
file = sys.argv[1]
metafile = re.sub("-.*$", ".yml", file)
project = os.environ["PROJECT"]
metadata = open(metafile, 'r').read()
yamldata = yaml.load(metadata)
if "title" in yamldata:
title = yamldata["title"]
else:
title = "ERROR: No meta Data"
words = title.split(" ")
max = 0
for word in words:
len = word.__len__()
if len > max:
max = len
lines = [ "" ]
for word in words:
if lines[-1].__len__() == 0:
lines[-1] = word
elif lines[-1].__len__() + word.__len__() <= max + 2:
lines[-1] += " " + word
else:
lines.append(word)
for line in lines:
print(line)
|
<commit_before><commit_msg>Add script for breaking titles into lines<commit_after>#!/usr/bin/env python
import sys
import os
import yaml
import re
file = sys.argv[1]
metafile = re.sub("-.*$", ".yml", file)
project = os.environ["PROJECT"]
metadata = open(metafile, 'r').read()
yamldata = yaml.load(metadata)
if "title" in yamldata:
title = yamldata["title"]
else:
title = "ERROR: No meta Data"
words = title.split(" ")
max = 0
for word in words:
len = word.__len__()
if len > max:
max = len
lines = [ "" ]
for word in words:
if lines[-1].__len__() == 0:
lines[-1] = word
elif lines[-1].__len__() + word.__len__() <= max + 2:
lines[-1] += " " + word
else:
lines.append(word)
for line in lines:
print(line)
|
|
859a9aa684b793a31dbf0b1f8e559d5cd40a152e
|
tests/props_test.py
|
tests/props_test.py
|
from fixture import GeneratorTest
from google.appengine.ext import testbed, ndb
class PropsTest(GeneratorTest):
def testLotsaModelsGenerated(self):
for klass in self.klasses:
k = klass._get_kind()
assert ndb.Model._lookup_model(k) == klass, klass
|
from fixture import GeneratorTest
from google.appengine.ext import testbed, ndb
import gaend.generator as generator
import re
class PropsTest(GeneratorTest):
def testEntityToPropsAndBack(self):
for klass in self.klasses:
# Create entity1 of this klass
kind = klass._get_kind()
# 'BlobPropertyRepeatedModel' -> 'Blob'
prop_name = re.match(r'(.+)Property', kind).group(1)
prop_name = prop_name[0].lower() + prop_name[1:]
prop_klass = klass._properties[prop_name + 'Property'].__class__
prop_type = generator.PROPERTIES[prop_klass]
for v in generator.VALUES[prop_type]:
print v
# Convert entity1 to props form
# Create entity2 derived from props
# Assure that first entity1 is equal to entity2
|
Enumerate through choices of property values
|
Enumerate through choices of property values
|
Python
|
mit
|
samedhi/gaend,talkiq/gaend,talkiq/gaend,samedhi/gaend
|
from fixture import GeneratorTest
from google.appengine.ext import testbed, ndb
class PropsTest(GeneratorTest):
def testLotsaModelsGenerated(self):
for klass in self.klasses:
k = klass._get_kind()
assert ndb.Model._lookup_model(k) == klass, klass
Enumerate through choices of property values
|
from fixture import GeneratorTest
from google.appengine.ext import testbed, ndb
import gaend.generator as generator
import re
class PropsTest(GeneratorTest):
def testEntityToPropsAndBack(self):
for klass in self.klasses:
# Create entity1 of this klass
kind = klass._get_kind()
# 'BlobPropertyRepeatedModel' -> 'Blob'
prop_name = re.match(r'(.+)Property', kind).group(1)
prop_name = prop_name[0].lower() + prop_name[1:]
prop_klass = klass._properties[prop_name + 'Property'].__class__
prop_type = generator.PROPERTIES[prop_klass]
for v in generator.VALUES[prop_type]:
print v
# Convert entity1 to props form
# Create entity2 derived from props
# Assure that first entity1 is equal to entity2
|
<commit_before>from fixture import GeneratorTest
from google.appengine.ext import testbed, ndb
class PropsTest(GeneratorTest):
def testLotsaModelsGenerated(self):
for klass in self.klasses:
k = klass._get_kind()
assert ndb.Model._lookup_model(k) == klass, klass
<commit_msg>Enumerate through choices of property values<commit_after>
|
from fixture import GeneratorTest
from google.appengine.ext import testbed, ndb
import gaend.generator as generator
import re
class PropsTest(GeneratorTest):
def testEntityToPropsAndBack(self):
for klass in self.klasses:
# Create entity1 of this klass
kind = klass._get_kind()
# 'BlobPropertyRepeatedModel' -> 'Blob'
prop_name = re.match(r'(.+)Property', kind).group(1)
prop_name = prop_name[0].lower() + prop_name[1:]
prop_klass = klass._properties[prop_name + 'Property'].__class__
prop_type = generator.PROPERTIES[prop_klass]
for v in generator.VALUES[prop_type]:
print v
# Convert entity1 to props form
# Create entity2 derived from props
# Assure that first entity1 is equal to entity2
|
from fixture import GeneratorTest
from google.appengine.ext import testbed, ndb
class PropsTest(GeneratorTest):
def testLotsaModelsGenerated(self):
for klass in self.klasses:
k = klass._get_kind()
assert ndb.Model._lookup_model(k) == klass, klass
Enumerate through choices of property valuesfrom fixture import GeneratorTest
from google.appengine.ext import testbed, ndb
import gaend.generator as generator
import re
class PropsTest(GeneratorTest):
def testEntityToPropsAndBack(self):
for klass in self.klasses:
# Create entity1 of this klass
kind = klass._get_kind()
# 'BlobPropertyRepeatedModel' -> 'Blob'
prop_name = re.match(r'(.+)Property', kind).group(1)
prop_name = prop_name[0].lower() + prop_name[1:]
prop_klass = klass._properties[prop_name + 'Property'].__class__
prop_type = generator.PROPERTIES[prop_klass]
for v in generator.VALUES[prop_type]:
print v
# Convert entity1 to props form
# Create entity2 derived from props
# Assure that first entity1 is equal to entity2
|
<commit_before>from fixture import GeneratorTest
from google.appengine.ext import testbed, ndb
class PropsTest(GeneratorTest):
def testLotsaModelsGenerated(self):
for klass in self.klasses:
k = klass._get_kind()
assert ndb.Model._lookup_model(k) == klass, klass
<commit_msg>Enumerate through choices of property values<commit_after>from fixture import GeneratorTest
from google.appengine.ext import testbed, ndb
import gaend.generator as generator
import re
class PropsTest(GeneratorTest):
def testEntityToPropsAndBack(self):
for klass in self.klasses:
# Create entity1 of this klass
kind = klass._get_kind()
# 'BlobPropertyRepeatedModel' -> 'Blob'
prop_name = re.match(r'(.+)Property', kind).group(1)
prop_name = prop_name[0].lower() + prop_name[1:]
prop_klass = klass._properties[prop_name + 'Property'].__class__
prop_type = generator.PROPERTIES[prop_klass]
for v in generator.VALUES[prop_type]:
print v
# Convert entity1 to props form
# Create entity2 derived from props
# Assure that first entity1 is equal to entity2
|
6ba3fe75f5939a58aee0f3835139b46f1ea8b46f
|
config/trace_pox_l2_multi.py
|
config/trace_pox_l2_multi.py
|
from config.experiment_config_lib import ControllerConfig
from sts.topology import StarTopology, BufferedPatchPanel, MeshTopology, GridTopology, BinaryLeafTreeTopology
from sts.controller_manager import UserSpaceControllerPatchPanel
from sts.control_flow.fuzzer import Fuzzer
from sts.control_flow.interactive import Interactive
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.happensbefore.hb_logger import HappensBeforeLogger
from config.application_events import AppCircuitPusher
# Use POX as our controller
start_cmd = ('''./pox.py --verbose '''
'''openflow.discovery forwarding.l2_multi '''
'''openflow.of_01 --address=__address__ --port=__port__ ''')
controllers = [ControllerConfig(start_cmd, cwd="pox/")]
topology_class = StarTopology
topology_params = "num_hosts=3"
# topology_class = MeshTopology
# topology_params = "num_switches=5"
# Where should the output files be written to
results_dir = "traces/pox_l2_multi-star3-traffic-failures"
apps = None
# include all defaults
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
patch_panel_class=BufferedPatchPanel,
controller_patch_panel_class=UserSpaceControllerPatchPanel,
dataplane_trace=None,
snapshot_service=None,
multiplex_sockets=False,
violation_persistence_threshold=None,
kill_controllers_on_exit=True,
interpose_on_controllers=False,
ignore_interposition=False,
hb_logger_class=HappensBeforeLogger,
hb_logger_params=results_dir,
apps=apps)
# Manual, interactive mode
# control_flow = Interactive(simulation_config, input_logger=InputLogger())
control_flow = Fuzzer(simulation_config,
input_logger=InputLogger(),
initialization_rounds=20,
send_all_to_all=False,
check_interval=1,
delay=0.1,
halt_on_violation=True,
# invariant_check_name="check_everything",
invariant_check_name="InvariantChecker.check_liveness",
apps=apps)
|
Add config for POX l2_multi traces
|
Add config for POX l2_multi traces
|
Python
|
apache-2.0
|
jmiserez/sts,jmiserez/sts
|
Add config for POX l2_multi traces
|
from config.experiment_config_lib import ControllerConfig
from sts.topology import StarTopology, BufferedPatchPanel, MeshTopology, GridTopology, BinaryLeafTreeTopology
from sts.controller_manager import UserSpaceControllerPatchPanel
from sts.control_flow.fuzzer import Fuzzer
from sts.control_flow.interactive import Interactive
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.happensbefore.hb_logger import HappensBeforeLogger
from config.application_events import AppCircuitPusher
# Use POX as our controller
start_cmd = ('''./pox.py --verbose '''
'''openflow.discovery forwarding.l2_multi '''
'''openflow.of_01 --address=__address__ --port=__port__ ''')
controllers = [ControllerConfig(start_cmd, cwd="pox/")]
topology_class = StarTopology
topology_params = "num_hosts=3"
# topology_class = MeshTopology
# topology_params = "num_switches=5"
# Where should the output files be written to
results_dir = "traces/pox_l2_multi-star3-traffic-failures"
apps = None
# include all defaults
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
patch_panel_class=BufferedPatchPanel,
controller_patch_panel_class=UserSpaceControllerPatchPanel,
dataplane_trace=None,
snapshot_service=None,
multiplex_sockets=False,
violation_persistence_threshold=None,
kill_controllers_on_exit=True,
interpose_on_controllers=False,
ignore_interposition=False,
hb_logger_class=HappensBeforeLogger,
hb_logger_params=results_dir,
apps=apps)
# Manual, interactive mode
# control_flow = Interactive(simulation_config, input_logger=InputLogger())
control_flow = Fuzzer(simulation_config,
input_logger=InputLogger(),
initialization_rounds=20,
send_all_to_all=False,
check_interval=1,
delay=0.1,
halt_on_violation=True,
# invariant_check_name="check_everything",
invariant_check_name="InvariantChecker.check_liveness",
apps=apps)
|
<commit_before><commit_msg>Add config for POX l2_multi traces<commit_after>
|
from config.experiment_config_lib import ControllerConfig
from sts.topology import StarTopology, BufferedPatchPanel, MeshTopology, GridTopology, BinaryLeafTreeTopology
from sts.controller_manager import UserSpaceControllerPatchPanel
from sts.control_flow.fuzzer import Fuzzer
from sts.control_flow.interactive import Interactive
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.happensbefore.hb_logger import HappensBeforeLogger
from config.application_events import AppCircuitPusher
# Use POX as our controller
start_cmd = ('''./pox.py --verbose '''
'''openflow.discovery forwarding.l2_multi '''
'''openflow.of_01 --address=__address__ --port=__port__ ''')
controllers = [ControllerConfig(start_cmd, cwd="pox/")]
topology_class = StarTopology
topology_params = "num_hosts=3"
# topology_class = MeshTopology
# topology_params = "num_switches=5"
# Where should the output files be written to
results_dir = "traces/pox_l2_multi-star3-traffic-failures"
apps = None
# include all defaults
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
patch_panel_class=BufferedPatchPanel,
controller_patch_panel_class=UserSpaceControllerPatchPanel,
dataplane_trace=None,
snapshot_service=None,
multiplex_sockets=False,
violation_persistence_threshold=None,
kill_controllers_on_exit=True,
interpose_on_controllers=False,
ignore_interposition=False,
hb_logger_class=HappensBeforeLogger,
hb_logger_params=results_dir,
apps=apps)
# Manual, interactive mode
# control_flow = Interactive(simulation_config, input_logger=InputLogger())
control_flow = Fuzzer(simulation_config,
input_logger=InputLogger(),
initialization_rounds=20,
send_all_to_all=False,
check_interval=1,
delay=0.1,
halt_on_violation=True,
# invariant_check_name="check_everything",
invariant_check_name="InvariantChecker.check_liveness",
apps=apps)
|
Add config for POX l2_multi tracesfrom config.experiment_config_lib import ControllerConfig
from sts.topology import StarTopology, BufferedPatchPanel, MeshTopology, GridTopology, BinaryLeafTreeTopology
from sts.controller_manager import UserSpaceControllerPatchPanel
from sts.control_flow.fuzzer import Fuzzer
from sts.control_flow.interactive import Interactive
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.happensbefore.hb_logger import HappensBeforeLogger
from config.application_events import AppCircuitPusher
# Use POX as our controller
start_cmd = ('''./pox.py --verbose '''
'''openflow.discovery forwarding.l2_multi '''
'''openflow.of_01 --address=__address__ --port=__port__ ''')
controllers = [ControllerConfig(start_cmd, cwd="pox/")]
topology_class = StarTopology
topology_params = "num_hosts=3"
# topology_class = MeshTopology
# topology_params = "num_switches=5"
# Where should the output files be written to
results_dir = "traces/pox_l2_multi-star3-traffic-failures"
apps = None
# include all defaults
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
patch_panel_class=BufferedPatchPanel,
controller_patch_panel_class=UserSpaceControllerPatchPanel,
dataplane_trace=None,
snapshot_service=None,
multiplex_sockets=False,
violation_persistence_threshold=None,
kill_controllers_on_exit=True,
interpose_on_controllers=False,
ignore_interposition=False,
hb_logger_class=HappensBeforeLogger,
hb_logger_params=results_dir,
apps=apps)
# Manual, interactive mode
# control_flow = Interactive(simulation_config, input_logger=InputLogger())
control_flow = Fuzzer(simulation_config,
input_logger=InputLogger(),
initialization_rounds=20,
send_all_to_all=False,
check_interval=1,
delay=0.1,
halt_on_violation=True,
# invariant_check_name="check_everything",
invariant_check_name="InvariantChecker.check_liveness",
apps=apps)
|
<commit_before><commit_msg>Add config for POX l2_multi traces<commit_after>from config.experiment_config_lib import ControllerConfig
from sts.topology import StarTopology, BufferedPatchPanel, MeshTopology, GridTopology, BinaryLeafTreeTopology
from sts.controller_manager import UserSpaceControllerPatchPanel
from sts.control_flow.fuzzer import Fuzzer
from sts.control_flow.interactive import Interactive
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.happensbefore.hb_logger import HappensBeforeLogger
from config.application_events import AppCircuitPusher
# Use POX as our controller
start_cmd = ('''./pox.py --verbose '''
'''openflow.discovery forwarding.l2_multi '''
'''openflow.of_01 --address=__address__ --port=__port__ ''')
controllers = [ControllerConfig(start_cmd, cwd="pox/")]
topology_class = StarTopology
topology_params = "num_hosts=3"
# topology_class = MeshTopology
# topology_params = "num_switches=5"
# Where should the output files be written to
results_dir = "traces/pox_l2_multi-star3-traffic-failures"
apps = None
# include all defaults
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
patch_panel_class=BufferedPatchPanel,
controller_patch_panel_class=UserSpaceControllerPatchPanel,
dataplane_trace=None,
snapshot_service=None,
multiplex_sockets=False,
violation_persistence_threshold=None,
kill_controllers_on_exit=True,
interpose_on_controllers=False,
ignore_interposition=False,
hb_logger_class=HappensBeforeLogger,
hb_logger_params=results_dir,
apps=apps)
# Manual, interactive mode
# control_flow = Interactive(simulation_config, input_logger=InputLogger())
control_flow = Fuzzer(simulation_config,
input_logger=InputLogger(),
initialization_rounds=20,
send_all_to_all=False,
check_interval=1,
delay=0.1,
halt_on_violation=True,
# invariant_check_name="check_everything",
invariant_check_name="InvariantChecker.check_liveness",
apps=apps)
|
|
8f41b0df8fa62b9bca031defe6eb91fae0219e56
|
writeboards/urls.py
|
writeboards/urls.py
|
from django.conf.urls.defaults import *
from models import Writeboard
writeboard_list_dict = {
'queryset': Writeboard.objects.all(),
}
urlpatterns = patterns('',
(r'$','django.views.generic.list_detail.object_list',
writeboard_list_dict),
)
|
Add writeboard list via generic views
|
Add writeboard list via generic views
|
Python
|
mit
|
rizumu/django-paste-organizer
|
Add writeboard list via generic views
|
from django.conf.urls.defaults import *
from models import Writeboard
writeboard_list_dict = {
'queryset': Writeboard.objects.all(),
}
urlpatterns = patterns('',
(r'$','django.views.generic.list_detail.object_list',
writeboard_list_dict),
)
|
<commit_before><commit_msg>Add writeboard list via generic views <commit_after>
|
from django.conf.urls.defaults import *
from models import Writeboard
writeboard_list_dict = {
'queryset': Writeboard.objects.all(),
}
urlpatterns = patterns('',
(r'$','django.views.generic.list_detail.object_list',
writeboard_list_dict),
)
|
Add writeboard list via generic views from django.conf.urls.defaults import *
from models import Writeboard
writeboard_list_dict = {
'queryset': Writeboard.objects.all(),
}
urlpatterns = patterns('',
(r'$','django.views.generic.list_detail.object_list',
writeboard_list_dict),
)
|
<commit_before><commit_msg>Add writeboard list via generic views <commit_after>from django.conf.urls.defaults import *
from models import Writeboard
writeboard_list_dict = {
'queryset': Writeboard.objects.all(),
}
urlpatterns = patterns('',
(r'$','django.views.generic.list_detail.object_list',
writeboard_list_dict),
)
|
|
162d7f14cbc3f705b018669448eda369473a6b5a
|
tools/get_digest.py
|
tools/get_digest.py
|
import sys
import hashlib
FILE_BUFFER_SIZE = 4096
def get_digest(file_path, digest_func='md5'):
digester = getattr(hashlib, digest_func, None)
if digester is None:
raise ValueError('Unknow digest method: ' + digest_func)
h = digester()
with open(file_path, 'rb') as f:
while True:
buffer_ = f.read(FILE_BUFFER_SIZE)
if buffer_ == '':
break
h.update(buffer_)
return h.hexdigest()
if __name__ == "__main__":
file_path = sys.argv[1]
print 'Digest of %s = %s' % (file_path, get_digest(file_path))
|
Add utility script to get a file's digest, useful for debugging purpose
|
Add utility script to get a file's digest, useful for debugging purpose
|
Python
|
lgpl-2.1
|
DirkHoffmann/nuxeo-drive,arameshkumar/base-nuxeo-drive,arameshkumar/nuxeo-drive,arameshkumar/nuxeo-drive,DirkHoffmann/nuxeo-drive,loopingz/nuxeo-drive,DirkHoffmann/nuxeo-drive,arameshkumar/base-nuxeo-drive,loopingz/nuxeo-drive,loopingz/nuxeo-drive,DirkHoffmann/nuxeo-drive,IsaacYangSLA/nuxeo-drive,rsoumyassdi/nuxeo-drive,arameshkumar/nuxeo-drive,ssdi-drive/nuxeo-drive,loopingz/nuxeo-drive,rsoumyassdi/nuxeo-drive,IsaacYangSLA/nuxeo-drive,DirkHoffmann/nuxeo-drive,ssdi-drive/nuxeo-drive,IsaacYangSLA/nuxeo-drive,arameshkumar/nuxeo-drive,IsaacYangSLA/nuxeo-drive,IsaacYangSLA/nuxeo-drive,ssdi-drive/nuxeo-drive,arameshkumar/base-nuxeo-drive,loopingz/nuxeo-drive,rsoumyassdi/nuxeo-drive,arameshkumar/base-nuxeo-drive,rsoumyassdi/nuxeo-drive
|
Add utility script to get a file's digest, useful for debugging purpose
|
import sys
import hashlib
FILE_BUFFER_SIZE = 4096
def get_digest(file_path, digest_func='md5'):
digester = getattr(hashlib, digest_func, None)
if digester is None:
raise ValueError('Unknow digest method: ' + digest_func)
h = digester()
with open(file_path, 'rb') as f:
while True:
buffer_ = f.read(FILE_BUFFER_SIZE)
if buffer_ == '':
break
h.update(buffer_)
return h.hexdigest()
if __name__ == "__main__":
file_path = sys.argv[1]
print 'Digest of %s = %s' % (file_path, get_digest(file_path))
|
<commit_before><commit_msg>Add utility script to get a file's digest, useful for debugging purpose<commit_after>
|
import sys
import hashlib
FILE_BUFFER_SIZE = 4096
def get_digest(file_path, digest_func='md5'):
digester = getattr(hashlib, digest_func, None)
if digester is None:
raise ValueError('Unknow digest method: ' + digest_func)
h = digester()
with open(file_path, 'rb') as f:
while True:
buffer_ = f.read(FILE_BUFFER_SIZE)
if buffer_ == '':
break
h.update(buffer_)
return h.hexdigest()
if __name__ == "__main__":
file_path = sys.argv[1]
print 'Digest of %s = %s' % (file_path, get_digest(file_path))
|
Add utility script to get a file's digest, useful for debugging purposeimport sys
import hashlib
FILE_BUFFER_SIZE = 4096
def get_digest(file_path, digest_func='md5'):
digester = getattr(hashlib, digest_func, None)
if digester is None:
raise ValueError('Unknow digest method: ' + digest_func)
h = digester()
with open(file_path, 'rb') as f:
while True:
buffer_ = f.read(FILE_BUFFER_SIZE)
if buffer_ == '':
break
h.update(buffer_)
return h.hexdigest()
if __name__ == "__main__":
file_path = sys.argv[1]
print 'Digest of %s = %s' % (file_path, get_digest(file_path))
|
<commit_before><commit_msg>Add utility script to get a file's digest, useful for debugging purpose<commit_after>import sys
import hashlib
FILE_BUFFER_SIZE = 4096
def get_digest(file_path, digest_func='md5'):
digester = getattr(hashlib, digest_func, None)
if digester is None:
raise ValueError('Unknow digest method: ' + digest_func)
h = digester()
with open(file_path, 'rb') as f:
while True:
buffer_ = f.read(FILE_BUFFER_SIZE)
if buffer_ == '':
break
h.update(buffer_)
return h.hexdigest()
if __name__ == "__main__":
file_path = sys.argv[1]
print 'Digest of %s = %s' % (file_path, get_digest(file_path))
|
|
b98c6deaa713504a06a01f3397b42b0d310a7fb4
|
build/changelog.py
|
build/changelog.py
|
#!/usr/bin/env python
"""
changelog.py helps generate the CHANGELOG.md message for a particular release.
"""
import argparse
import subprocess
import shlex
import re
def run(cmd, *args, **kwargs):
return subprocess.check_output(shlex.split(cmd), *args, **kwargs)
def get_commit_ids(from_commit, to_commit):
cmd = "git log --format=%H --no-merges {from_commit}..{to_commit}"
commit_ids = run(cmd.format(from_commit=from_commit,
to_commit=to_commit)).splitlines()
return commit_ids
def get_commit_message(commit_id):
cmd = "git log --format=%B --max-count=1 {commit_id}".format(
commit_id=commit_id)
return run(cmd)
def fixes_issue_id(commit_message):
match = re.search(r"Fixes #(\d+)", commit_message)
if match:
return match.group(1)
def get_subject(commit_message):
return commit_message.splitlines()[0]
def get_changelog_message(commit_message):
issue_id = fixes_issue_id(commit_message)
if issue_id:
subject = get_subject(commit_message)
return "Fixes", "{subject} (#{issue_id})".format(subject=subject, issue_id=issue_id)
return None, get_subject(commit_message)
def get_latest_tag():
cmd = "git describe --tags --first-parent"
return run(cmd).split('-')[0]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("from_version", nargs="?",
default=get_latest_tag(), help="start of changes")
parser.add_argument("to_commit", nargs="?",
default="HEAD", help="end of changes")
return parser.parse_args()
def main():
args = parse_args()
changelog = {}
for commit_id in get_commit_ids(args.from_version, args.to_commit):
commit_message = get_commit_message(commit_id)
group, line = get_changelog_message(commit_message)
changelog.setdefault(group, []).append(line)
if "Fixes" in changelog:
print "### Fixes"
print ""
for line in changelog["Fixes"]:
print "- {}".format(line)
print ""
if None in changelog:
print "### Miscellaneous"
print ""
for line in changelog[None]:
print "- {}".format(line)
if __name__ == "__main__":
main()
|
Add script to help generate CHANGELOG.md
|
Add script to help generate CHANGELOG.md
|
Python
|
apache-2.0
|
timothyhinrichs/opa,tsandall/opa,open-policy-agent/opa,tsandall/opa,open-policy-agent/opa,Eva-xiaohui-luo/opa,open-policy-agent/opa,timothyhinrichs/opa,open-policy-agent/opa,Eva-xiaohui-luo/opa,tsandall/opa,open-policy-agent/opa,timothyhinrichs/opa,Eva-xiaohui-luo/opa,tsandall/opa,open-policy-agent/opa,timothyhinrichs/opa,tsandall/opa,tsandall/opa,timothyhinrichs/opa
|
Add script to help generate CHANGELOG.md
|
#!/usr/bin/env python
"""
changelog.py helps generate the CHANGELOG.md message for a particular release.
"""
import argparse
import subprocess
import shlex
import re
def run(cmd, *args, **kwargs):
return subprocess.check_output(shlex.split(cmd), *args, **kwargs)
def get_commit_ids(from_commit, to_commit):
cmd = "git log --format=%H --no-merges {from_commit}..{to_commit}"
commit_ids = run(cmd.format(from_commit=from_commit,
to_commit=to_commit)).splitlines()
return commit_ids
def get_commit_message(commit_id):
cmd = "git log --format=%B --max-count=1 {commit_id}".format(
commit_id=commit_id)
return run(cmd)
def fixes_issue_id(commit_message):
match = re.search(r"Fixes #(\d+)", commit_message)
if match:
return match.group(1)
def get_subject(commit_message):
return commit_message.splitlines()[0]
def get_changelog_message(commit_message):
issue_id = fixes_issue_id(commit_message)
if issue_id:
subject = get_subject(commit_message)
return "Fixes", "{subject} (#{issue_id})".format(subject=subject, issue_id=issue_id)
return None, get_subject(commit_message)
def get_latest_tag():
cmd = "git describe --tags --first-parent"
return run(cmd).split('-')[0]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("from_version", nargs="?",
default=get_latest_tag(), help="start of changes")
parser.add_argument("to_commit", nargs="?",
default="HEAD", help="end of changes")
return parser.parse_args()
def main():
args = parse_args()
changelog = {}
for commit_id in get_commit_ids(args.from_version, args.to_commit):
commit_message = get_commit_message(commit_id)
group, line = get_changelog_message(commit_message)
changelog.setdefault(group, []).append(line)
if "Fixes" in changelog:
print "### Fixes"
print ""
for line in changelog["Fixes"]:
print "- {}".format(line)
print ""
if None in changelog:
print "### Miscellaneous"
print ""
for line in changelog[None]:
print "- {}".format(line)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to help generate CHANGELOG.md<commit_after>
|
#!/usr/bin/env python
"""
changelog.py helps generate the CHANGELOG.md message for a particular release.
"""
import argparse
import subprocess
import shlex
import re
def run(cmd, *args, **kwargs):
return subprocess.check_output(shlex.split(cmd), *args, **kwargs)
def get_commit_ids(from_commit, to_commit):
cmd = "git log --format=%H --no-merges {from_commit}..{to_commit}"
commit_ids = run(cmd.format(from_commit=from_commit,
to_commit=to_commit)).splitlines()
return commit_ids
def get_commit_message(commit_id):
cmd = "git log --format=%B --max-count=1 {commit_id}".format(
commit_id=commit_id)
return run(cmd)
def fixes_issue_id(commit_message):
match = re.search(r"Fixes #(\d+)", commit_message)
if match:
return match.group(1)
def get_subject(commit_message):
return commit_message.splitlines()[0]
def get_changelog_message(commit_message):
issue_id = fixes_issue_id(commit_message)
if issue_id:
subject = get_subject(commit_message)
return "Fixes", "{subject} (#{issue_id})".format(subject=subject, issue_id=issue_id)
return None, get_subject(commit_message)
def get_latest_tag():
cmd = "git describe --tags --first-parent"
return run(cmd).split('-')[0]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("from_version", nargs="?",
default=get_latest_tag(), help="start of changes")
parser.add_argument("to_commit", nargs="?",
default="HEAD", help="end of changes")
return parser.parse_args()
def main():
args = parse_args()
changelog = {}
for commit_id in get_commit_ids(args.from_version, args.to_commit):
commit_message = get_commit_message(commit_id)
group, line = get_changelog_message(commit_message)
changelog.setdefault(group, []).append(line)
if "Fixes" in changelog:
print "### Fixes"
print ""
for line in changelog["Fixes"]:
print "- {}".format(line)
print ""
if None in changelog:
print "### Miscellaneous"
print ""
for line in changelog[None]:
print "- {}".format(line)
if __name__ == "__main__":
main()
|
Add script to help generate CHANGELOG.md#!/usr/bin/env python
"""
changelog.py helps generate the CHANGELOG.md message for a particular release.
"""
import argparse
import subprocess
import shlex
import re
def run(cmd, *args, **kwargs):
return subprocess.check_output(shlex.split(cmd), *args, **kwargs)
def get_commit_ids(from_commit, to_commit):
cmd = "git log --format=%H --no-merges {from_commit}..{to_commit}"
commit_ids = run(cmd.format(from_commit=from_commit,
to_commit=to_commit)).splitlines()
return commit_ids
def get_commit_message(commit_id):
cmd = "git log --format=%B --max-count=1 {commit_id}".format(
commit_id=commit_id)
return run(cmd)
def fixes_issue_id(commit_message):
match = re.search(r"Fixes #(\d+)", commit_message)
if match:
return match.group(1)
def get_subject(commit_message):
return commit_message.splitlines()[0]
def get_changelog_message(commit_message):
issue_id = fixes_issue_id(commit_message)
if issue_id:
subject = get_subject(commit_message)
return "Fixes", "{subject} (#{issue_id})".format(subject=subject, issue_id=issue_id)
return None, get_subject(commit_message)
def get_latest_tag():
cmd = "git describe --tags --first-parent"
return run(cmd).split('-')[0]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("from_version", nargs="?",
default=get_latest_tag(), help="start of changes")
parser.add_argument("to_commit", nargs="?",
default="HEAD", help="end of changes")
return parser.parse_args()
def main():
args = parse_args()
changelog = {}
for commit_id in get_commit_ids(args.from_version, args.to_commit):
commit_message = get_commit_message(commit_id)
group, line = get_changelog_message(commit_message)
changelog.setdefault(group, []).append(line)
if "Fixes" in changelog:
print "### Fixes"
print ""
for line in changelog["Fixes"]:
print "- {}".format(line)
print ""
if None in changelog:
print "### Miscellaneous"
print ""
for line in changelog[None]:
print "- {}".format(line)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to help generate CHANGELOG.md<commit_after>#!/usr/bin/env python
"""
changelog.py helps generate the CHANGELOG.md message for a particular release.
"""
import argparse
import subprocess
import shlex
import re
def run(cmd, *args, **kwargs):
return subprocess.check_output(shlex.split(cmd), *args, **kwargs)
def get_commit_ids(from_commit, to_commit):
cmd = "git log --format=%H --no-merges {from_commit}..{to_commit}"
commit_ids = run(cmd.format(from_commit=from_commit,
to_commit=to_commit)).splitlines()
return commit_ids
def get_commit_message(commit_id):
cmd = "git log --format=%B --max-count=1 {commit_id}".format(
commit_id=commit_id)
return run(cmd)
def fixes_issue_id(commit_message):
match = re.search(r"Fixes #(\d+)", commit_message)
if match:
return match.group(1)
def get_subject(commit_message):
return commit_message.splitlines()[0]
def get_changelog_message(commit_message):
issue_id = fixes_issue_id(commit_message)
if issue_id:
subject = get_subject(commit_message)
return "Fixes", "{subject} (#{issue_id})".format(subject=subject, issue_id=issue_id)
return None, get_subject(commit_message)
def get_latest_tag():
cmd = "git describe --tags --first-parent"
return run(cmd).split('-')[0]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("from_version", nargs="?",
default=get_latest_tag(), help="start of changes")
parser.add_argument("to_commit", nargs="?",
default="HEAD", help="end of changes")
return parser.parse_args()
def main():
args = parse_args()
changelog = {}
for commit_id in get_commit_ids(args.from_version, args.to_commit):
commit_message = get_commit_message(commit_id)
group, line = get_changelog_message(commit_message)
changelog.setdefault(group, []).append(line)
if "Fixes" in changelog:
print "### Fixes"
print ""
for line in changelog["Fixes"]:
print "- {}".format(line)
print ""
if None in changelog:
print "### Miscellaneous"
print ""
for line in changelog[None]:
print "- {}".format(line)
if __name__ == "__main__":
main()
|
|
cb149a64cf969edef79528f052f96bc4a847a11c
|
bin/run_benchmark.py
|
bin/run_benchmark.py
|
import datetime
import itertools
import os
import subprocess
# Modify parameters here
out_directory = datetime.datetime.now().strftime('benchmark_%Y-%m-%d_%H-%M-%S')
dimension = 3
size = 50
ppc = 1
temperature = 0.0
iterations = 1
representations = ["SoA", "AoS"]
storages = ["unordered", "ordered"]
# add other combinations here
combination_keys = ["-r", "-e"]
combination_values = list(itertools.product(representations, storages))
# End of parameters
# Enumerate all combinations of parameters and run
if not os.path.exists(out_directory):
os.makedirs(out_directory)
args_base = ("benchmark", "-d", str(dimension), "-g", str(size), "-p", str(ppc), "-t", str(temperature), "-i", str(iterations))
for i in range(0, len(combination_values)):
file_name = ""
args_combination = ()
for j in range(0, len(combination_values[i])):
args_combination += (combination_keys[j], combination_values[i][j])
file_name += combination_values[i][j] + "_"
args = args_base + args_combination
popen = subprocess.Popen(args, stdout=subprocess.PIPE, universal_newlines=True)
popen.wait()
file_name = file_name[:-1] + ".txt"
f = open(os.path.join(out_directory, file_name), "w")
f.write(str(popen.stdout.read()))
|
Add a script to enumerate configurations for benchmarking.
|
Add a script to enumerate configurations for benchmarking.
|
Python
|
mit
|
pictools/pica,pictools/pica,pictools/pica
|
Add a script to enumerate configurations for benchmarking.
|
import datetime
import itertools
import os
import subprocess
# Modify parameters here
out_directory = datetime.datetime.now().strftime('benchmark_%Y-%m-%d_%H-%M-%S')
dimension = 3
size = 50
ppc = 1
temperature = 0.0
iterations = 1
representations = ["SoA", "AoS"]
storages = ["unordered", "ordered"]
# add other combinations here
combination_keys = ["-r", "-e"]
combination_values = list(itertools.product(representations, storages))
# End of parameters
# Enumerate all combinations of parameters and run
if not os.path.exists(out_directory):
os.makedirs(out_directory)
args_base = ("benchmark", "-d", str(dimension), "-g", str(size), "-p", str(ppc), "-t", str(temperature), "-i", str(iterations))
for i in range(0, len(combination_values)):
file_name = ""
args_combination = ()
for j in range(0, len(combination_values[i])):
args_combination += (combination_keys[j], combination_values[i][j])
file_name += combination_values[i][j] + "_"
args = args_base + args_combination
popen = subprocess.Popen(args, stdout=subprocess.PIPE, universal_newlines=True)
popen.wait()
file_name = file_name[:-1] + ".txt"
f = open(os.path.join(out_directory, file_name), "w")
f.write(str(popen.stdout.read()))
|
<commit_before><commit_msg>Add a script to enumerate configurations for benchmarking.<commit_after>
|
import datetime
import itertools
import os
import subprocess
# Modify parameters here
out_directory = datetime.datetime.now().strftime('benchmark_%Y-%m-%d_%H-%M-%S')
dimension = 3
size = 50
ppc = 1
temperature = 0.0
iterations = 1
representations = ["SoA", "AoS"]
storages = ["unordered", "ordered"]
# add other combinations here
combination_keys = ["-r", "-e"]
combination_values = list(itertools.product(representations, storages))
# End of parameters
# Enumerate all combinations of parameters and run
if not os.path.exists(out_directory):
os.makedirs(out_directory)
args_base = ("benchmark", "-d", str(dimension), "-g", str(size), "-p", str(ppc), "-t", str(temperature), "-i", str(iterations))
for i in range(0, len(combination_values)):
file_name = ""
args_combination = ()
for j in range(0, len(combination_values[i])):
args_combination += (combination_keys[j], combination_values[i][j])
file_name += combination_values[i][j] + "_"
args = args_base + args_combination
popen = subprocess.Popen(args, stdout=subprocess.PIPE, universal_newlines=True)
popen.wait()
file_name = file_name[:-1] + ".txt"
f = open(os.path.join(out_directory, file_name), "w")
f.write(str(popen.stdout.read()))
|
Add a script to enumerate configurations for benchmarking.import datetime
import itertools
import os
import subprocess
# Modify parameters here
out_directory = datetime.datetime.now().strftime('benchmark_%Y-%m-%d_%H-%M-%S')
dimension = 3
size = 50
ppc = 1
temperature = 0.0
iterations = 1
representations = ["SoA", "AoS"]
storages = ["unordered", "ordered"]
# add other combinations here
combination_keys = ["-r", "-e"]
combination_values = list(itertools.product(representations, storages))
# End of parameters
# Enumerate all combinations of parameters and run
if not os.path.exists(out_directory):
os.makedirs(out_directory)
args_base = ("benchmark", "-d", str(dimension), "-g", str(size), "-p", str(ppc), "-t", str(temperature), "-i", str(iterations))
for i in range(0, len(combination_values)):
file_name = ""
args_combination = ()
for j in range(0, len(combination_values[i])):
args_combination += (combination_keys[j], combination_values[i][j])
file_name += combination_values[i][j] + "_"
args = args_base + args_combination
popen = subprocess.Popen(args, stdout=subprocess.PIPE, universal_newlines=True)
popen.wait()
file_name = file_name[:-1] + ".txt"
f = open(os.path.join(out_directory, file_name), "w")
f.write(str(popen.stdout.read()))
|
<commit_before><commit_msg>Add a script to enumerate configurations for benchmarking.<commit_after>import datetime
import itertools
import os
import subprocess
# Modify parameters here
out_directory = datetime.datetime.now().strftime('benchmark_%Y-%m-%d_%H-%M-%S')
dimension = 3
size = 50
ppc = 1
temperature = 0.0
iterations = 1
representations = ["SoA", "AoS"]
storages = ["unordered", "ordered"]
# add other combinations here
combination_keys = ["-r", "-e"]
combination_values = list(itertools.product(representations, storages))
# End of parameters
# Enumerate all combinations of parameters and run
if not os.path.exists(out_directory):
os.makedirs(out_directory)
args_base = ("benchmark", "-d", str(dimension), "-g", str(size), "-p", str(ppc), "-t", str(temperature), "-i", str(iterations))
for i in range(0, len(combination_values)):
file_name = ""
args_combination = ()
for j in range(0, len(combination_values[i])):
args_combination += (combination_keys[j], combination_values[i][j])
file_name += combination_values[i][j] + "_"
args = args_base + args_combination
popen = subprocess.Popen(args, stdout=subprocess.PIPE, universal_newlines=True)
popen.wait()
file_name = file_name[:-1] + ".txt"
f = open(os.path.join(out_directory, file_name), "w")
f.write(str(popen.stdout.read()))
|
|
5a97d88326e9d365afed7ed798720d97ba6fcc97
|
tests/test_compat.py
|
tests/test_compat.py
|
import unittest
from mock import Mock, patch
from collectd_haproxy import compat
class CompatTests(unittest.TestCase):
@patch.object(compat, "PY3", False)
def test_iteritems_python2_uses_iteritems(self):
dictionary = Mock()
self.assertEqual(
compat.iteritems(dictionary),
dictionary.iteritems.return_value
)
@patch.object(compat, "PY3", True)
def test_iteritems_python3_uses_items(self):
dictionary = Mock()
self.assertEqual(
compat.iteritems(dictionary),
dictionary.items.return_value
)
@patch.object(compat, "PY3", True)
def test_coerce_long_python3_uses_int(self):
self.assertEqual(compat.coerce_long("123"), int("123"))
|
Add tests for the compat module.
|
Add tests for the compat module.
|
Python
|
mit
|
wglass/collectd-haproxy
|
Add tests for the compat module.
|
import unittest
from mock import Mock, patch
from collectd_haproxy import compat
class CompatTests(unittest.TestCase):
@patch.object(compat, "PY3", False)
def test_iteritems_python2_uses_iteritems(self):
dictionary = Mock()
self.assertEqual(
compat.iteritems(dictionary),
dictionary.iteritems.return_value
)
@patch.object(compat, "PY3", True)
def test_iteritems_python3_uses_items(self):
dictionary = Mock()
self.assertEqual(
compat.iteritems(dictionary),
dictionary.items.return_value
)
@patch.object(compat, "PY3", True)
def test_coerce_long_python3_uses_int(self):
self.assertEqual(compat.coerce_long("123"), int("123"))
|
<commit_before><commit_msg>Add tests for the compat module.<commit_after>
|
import unittest
from mock import Mock, patch
from collectd_haproxy import compat
class CompatTests(unittest.TestCase):
@patch.object(compat, "PY3", False)
def test_iteritems_python2_uses_iteritems(self):
dictionary = Mock()
self.assertEqual(
compat.iteritems(dictionary),
dictionary.iteritems.return_value
)
@patch.object(compat, "PY3", True)
def test_iteritems_python3_uses_items(self):
dictionary = Mock()
self.assertEqual(
compat.iteritems(dictionary),
dictionary.items.return_value
)
@patch.object(compat, "PY3", True)
def test_coerce_long_python3_uses_int(self):
self.assertEqual(compat.coerce_long("123"), int("123"))
|
Add tests for the compat module.import unittest
from mock import Mock, patch
from collectd_haproxy import compat
class CompatTests(unittest.TestCase):
@patch.object(compat, "PY3", False)
def test_iteritems_python2_uses_iteritems(self):
dictionary = Mock()
self.assertEqual(
compat.iteritems(dictionary),
dictionary.iteritems.return_value
)
@patch.object(compat, "PY3", True)
def test_iteritems_python3_uses_items(self):
dictionary = Mock()
self.assertEqual(
compat.iteritems(dictionary),
dictionary.items.return_value
)
@patch.object(compat, "PY3", True)
def test_coerce_long_python3_uses_int(self):
self.assertEqual(compat.coerce_long("123"), int("123"))
|
<commit_before><commit_msg>Add tests for the compat module.<commit_after>import unittest
from mock import Mock, patch
from collectd_haproxy import compat
class CompatTests(unittest.TestCase):
@patch.object(compat, "PY3", False)
def test_iteritems_python2_uses_iteritems(self):
dictionary = Mock()
self.assertEqual(
compat.iteritems(dictionary),
dictionary.iteritems.return_value
)
@patch.object(compat, "PY3", True)
def test_iteritems_python3_uses_items(self):
dictionary = Mock()
self.assertEqual(
compat.iteritems(dictionary),
dictionary.items.return_value
)
@patch.object(compat, "PY3", True)
def test_coerce_long_python3_uses_int(self):
self.assertEqual(compat.coerce_long("123"), int("123"))
|
|
7f4e15c3bdc9e53f15670d88538d8a9723532ceb
|
incubation/parse_subtitles_py/print_subtitles.py
|
incubation/parse_subtitles_py/print_subtitles.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Jérémie DECOCK (http://www.jdhp.org)
# http://en.wikipedia.org/wiki/SubRip
# http://forum.doom9.org/showthread.php?p=470941#post470941
#
# srt format:
# Subtitle number
# Start time --> End time
# Text of subtitle (one or more lines)
# Blank line
#
# The time format used is
# hours:minutes:seconds,milliseconds
import argparse
import os
LINES_IGNORED = ("www.addic7ed.com\n",
"Synced by YYeTs, corrected by gloriabg\n",
"Subtitles downloaded from www.OpenSubtitles.org\n",
"Sync and corrections by n17t01\n")
WORDS_IGNORED = ("<u>", "<i>", "<b>", "</u>", "</i>", "</b>")
def main():
# PARSE OPTIONS #######################################
parser = argparse.ArgumentParser(description='...')
parser.add_argument('filenames', nargs='+', metavar='FILE', help='file to read')
args = parser.parse_args()
# PROCESS FILES #######################################
for filename in args.filenames:
print(os.linesep, (8 * "*"), filename, (8 * "*"), os.linesep)
fd = open(filename)
read_mode = "srt_number"
for line in fd.readlines():
if read_mode == "srt_number":
# TODO if... else error
read_mode = "srt_time"
elif read_mode == "srt_time":
# TODO if... else error
read_mode = "srt_text"
elif line in ("\n", "\r", "\r\n"):
# TODO if... else error
read_mode = "srt_number"
elif line not in LINES_IGNORED:
for word in line.split():
if word not in WORDS_IGNORED:
print(word, end=' ')
print("")
fd.close()
if __name__ == "__main__":
main()
|
Add a project in the 'incubator'.
|
Add a project in the 'incubator'.
|
Python
|
mit
|
jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets
|
Add a project in the 'incubator'.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Jérémie DECOCK (http://www.jdhp.org)
# http://en.wikipedia.org/wiki/SubRip
# http://forum.doom9.org/showthread.php?p=470941#post470941
#
# srt format:
# Subtitle number
# Start time --> End time
# Text of subtitle (one or more lines)
# Blank line
#
# The time format used is
# hours:minutes:seconds,milliseconds
import argparse
import os
LINES_IGNORED = ("www.addic7ed.com\n",
"Synced by YYeTs, corrected by gloriabg\n",
"Subtitles downloaded from www.OpenSubtitles.org\n",
"Sync and corrections by n17t01\n")
WORDS_IGNORED = ("<u>", "<i>", "<b>", "</u>", "</i>", "</b>")
def main():
# PARSE OPTIONS #######################################
parser = argparse.ArgumentParser(description='...')
parser.add_argument('filenames', nargs='+', metavar='FILE', help='file to read')
args = parser.parse_args()
# PROCESS FILES #######################################
for filename in args.filenames:
print(os.linesep, (8 * "*"), filename, (8 * "*"), os.linesep)
fd = open(filename)
read_mode = "srt_number"
for line in fd.readlines():
if read_mode == "srt_number":
# TODO if... else error
read_mode = "srt_time"
elif read_mode == "srt_time":
# TODO if... else error
read_mode = "srt_text"
elif line in ("\n", "\r", "\r\n"):
# TODO if... else error
read_mode = "srt_number"
elif line not in LINES_IGNORED:
for word in line.split():
if word not in WORDS_IGNORED:
print(word, end=' ')
print("")
fd.close()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a project in the 'incubator'.<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Jérémie DECOCK (http://www.jdhp.org)
# http://en.wikipedia.org/wiki/SubRip
# http://forum.doom9.org/showthread.php?p=470941#post470941
#
# srt format:
# Subtitle number
# Start time --> End time
# Text of subtitle (one or more lines)
# Blank line
#
# The time format used is
# hours:minutes:seconds,milliseconds
import argparse
import os
LINES_IGNORED = ("www.addic7ed.com\n",
"Synced by YYeTs, corrected by gloriabg\n",
"Subtitles downloaded from www.OpenSubtitles.org\n",
"Sync and corrections by n17t01\n")
WORDS_IGNORED = ("<u>", "<i>", "<b>", "</u>", "</i>", "</b>")
def main():
# PARSE OPTIONS #######################################
parser = argparse.ArgumentParser(description='...')
parser.add_argument('filenames', nargs='+', metavar='FILE', help='file to read')
args = parser.parse_args()
# PROCESS FILES #######################################
for filename in args.filenames:
print(os.linesep, (8 * "*"), filename, (8 * "*"), os.linesep)
fd = open(filename)
read_mode = "srt_number"
for line in fd.readlines():
if read_mode == "srt_number":
# TODO if... else error
read_mode = "srt_time"
elif read_mode == "srt_time":
# TODO if... else error
read_mode = "srt_text"
elif line in ("\n", "\r", "\r\n"):
# TODO if... else error
read_mode = "srt_number"
elif line not in LINES_IGNORED:
for word in line.split():
if word not in WORDS_IGNORED:
print(word, end=' ')
print("")
fd.close()
if __name__ == "__main__":
main()
|
Add a project in the 'incubator'.#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Jérémie DECOCK (http://www.jdhp.org)
# http://en.wikipedia.org/wiki/SubRip
# http://forum.doom9.org/showthread.php?p=470941#post470941
#
# srt format:
# Subtitle number
# Start time --> End time
# Text of subtitle (one or more lines)
# Blank line
#
# The time format used is
# hours:minutes:seconds,milliseconds
import argparse
import os
LINES_IGNORED = ("www.addic7ed.com\n",
"Synced by YYeTs, corrected by gloriabg\n",
"Subtitles downloaded from www.OpenSubtitles.org\n",
"Sync and corrections by n17t01\n")
WORDS_IGNORED = ("<u>", "<i>", "<b>", "</u>", "</i>", "</b>")
def main():
# PARSE OPTIONS #######################################
parser = argparse.ArgumentParser(description='...')
parser.add_argument('filenames', nargs='+', metavar='FILE', help='file to read')
args = parser.parse_args()
# PROCESS FILES #######################################
for filename in args.filenames:
print(os.linesep, (8 * "*"), filename, (8 * "*"), os.linesep)
fd = open(filename)
read_mode = "srt_number"
for line in fd.readlines():
if read_mode == "srt_number":
# TODO if... else error
read_mode = "srt_time"
elif read_mode == "srt_time":
# TODO if... else error
read_mode = "srt_text"
elif line in ("\n", "\r", "\r\n"):
# TODO if... else error
read_mode = "srt_number"
elif line not in LINES_IGNORED:
for word in line.split():
if word not in WORDS_IGNORED:
print(word, end=' ')
print("")
fd.close()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a project in the 'incubator'.<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Jérémie DECOCK (http://www.jdhp.org)
# http://en.wikipedia.org/wiki/SubRip
# http://forum.doom9.org/showthread.php?p=470941#post470941
#
# srt format:
# Subtitle number
# Start time --> End time
# Text of subtitle (one or more lines)
# Blank line
#
# The time format used is
# hours:minutes:seconds,milliseconds
import argparse
import os
LINES_IGNORED = ("www.addic7ed.com\n",
"Synced by YYeTs, corrected by gloriabg\n",
"Subtitles downloaded from www.OpenSubtitles.org\n",
"Sync and corrections by n17t01\n")
WORDS_IGNORED = ("<u>", "<i>", "<b>", "</u>", "</i>", "</b>")
def main():
# PARSE OPTIONS #######################################
parser = argparse.ArgumentParser(description='...')
parser.add_argument('filenames', nargs='+', metavar='FILE', help='file to read')
args = parser.parse_args()
# PROCESS FILES #######################################
for filename in args.filenames:
print(os.linesep, (8 * "*"), filename, (8 * "*"), os.linesep)
fd = open(filename)
read_mode = "srt_number"
for line in fd.readlines():
if read_mode == "srt_number":
# TODO if... else error
read_mode = "srt_time"
elif read_mode == "srt_time":
# TODO if... else error
read_mode = "srt_text"
elif line in ("\n", "\r", "\r\n"):
# TODO if... else error
read_mode = "srt_number"
elif line not in LINES_IGNORED:
for word in line.split():
if word not in WORDS_IGNORED:
print(word, end=' ')
print("")
fd.close()
if __name__ == "__main__":
main()
|
|
8404143e9335374979d028ffabbc9f7369a11e80
|
admin/genkwh_remesa_cobrament_cron.py
|
admin/genkwh_remesa_cobrament_cron.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from erppeek import Client
import configdb
import datetime
'''
Script que agafa les inversions en esborrany i genera les factures de cobrament i les afegeix a la remesa.
python admin/genkwh_remesa_cobrament_cron.py
'''
def crear_remesa_generation(O):i
gen = iO.GenerationkwhInvestment
wiz_gen = O.WizardGenerationkwhInvestmentPayment
inv_to_do = gen.search([('draft','=',True)])
for inv in inv_to_do:
wiz_gen.do_payment(inv)
#INIT
O = Client(**configdb.erppeek)
crear_remesa_generation(O)
# vim: et ts=4 sw=4
|
Add script to do investment generation payment order
|
Add script to do investment generation payment order
|
Python
|
agpl-3.0
|
Som-Energia/invoice-janitor
|
Add script to do investment generation payment order
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from erppeek import Client
import configdb
import datetime
'''
Script que agafa les inversions en esborrany i genera les factures de cobrament i les afegeix a la remesa.
python admin/genkwh_remesa_cobrament_cron.py
'''
def crear_remesa_generation(O):i
gen = iO.GenerationkwhInvestment
wiz_gen = O.WizardGenerationkwhInvestmentPayment
inv_to_do = gen.search([('draft','=',True)])
for inv in inv_to_do:
wiz_gen.do_payment(inv)
#INIT
O = Client(**configdb.erppeek)
crear_remesa_generation(O)
# vim: et ts=4 sw=4
|
<commit_before><commit_msg>Add script to do investment generation payment order<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from erppeek import Client
import configdb
import datetime
'''
Script que agafa les inversions en esborrany i genera les factures de cobrament i les afegeix a la remesa.
python admin/genkwh_remesa_cobrament_cron.py
'''
def crear_remesa_generation(O):i
gen = iO.GenerationkwhInvestment
wiz_gen = O.WizardGenerationkwhInvestmentPayment
inv_to_do = gen.search([('draft','=',True)])
for inv in inv_to_do:
wiz_gen.do_payment(inv)
#INIT
O = Client(**configdb.erppeek)
crear_remesa_generation(O)
# vim: et ts=4 sw=4
|
Add script to do investment generation payment order#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from erppeek import Client
import configdb
import datetime
'''
Script que agafa les inversions en esborrany i genera les factures de cobrament i les afegeix a la remesa.
python admin/genkwh_remesa_cobrament_cron.py
'''
def crear_remesa_generation(O):i
gen = iO.GenerationkwhInvestment
wiz_gen = O.WizardGenerationkwhInvestmentPayment
inv_to_do = gen.search([('draft','=',True)])
for inv in inv_to_do:
wiz_gen.do_payment(inv)
#INIT
O = Client(**configdb.erppeek)
crear_remesa_generation(O)
# vim: et ts=4 sw=4
|
<commit_before><commit_msg>Add script to do investment generation payment order<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from erppeek import Client
import configdb
import datetime
'''
Script que agafa les inversions en esborrany i genera les factures de cobrament i les afegeix a la remesa.
python admin/genkwh_remesa_cobrament_cron.py
'''
def crear_remesa_generation(O):i
gen = iO.GenerationkwhInvestment
wiz_gen = O.WizardGenerationkwhInvestmentPayment
inv_to_do = gen.search([('draft','=',True)])
for inv in inv_to_do:
wiz_gen.do_payment(inv)
#INIT
O = Client(**configdb.erppeek)
crear_remesa_generation(O)
# vim: et ts=4 sw=4
|
|
a53670aaf5440d1f4a0306cbbaacf8aae4baef26
|
extract_exif.py
|
extract_exif.py
|
#!/usr/bin/env python
import sys,exifread
def main():
if len(sys.argv) != 2:
print("Usage: %s <JPG>" % sys.argv[0])
sys.exit(1)
JPG = sys.argv[1]
jpgfh = open(JPG, 'rb')
tags = exifread.process_file(jpgfh)
dttag = 'EXIF DateTimeOriginal'
dtsubtag = 'EXIF SubSecTimeOriginal'
if dttag in tags:
print(tags[dttag])
if dtsubtag in tags:
print(tags[dtsubtag])
jpgfh.close()
if __name__ == '__main__':
main()
|
Add a EXIF extract utility. Later the EXIF "EXIF DateTimeOriginal" and "EXIF SubsecTimeOriginal" will be used to identify the same pic by taken time and subsec.
|
Add a EXIF extract utility. Later the EXIF "EXIF DateTimeOriginal" and
"EXIF SubsecTimeOriginal" will be used to identify the same pic by
taken time and subsec.
|
Python
|
apache-2.0
|
feifeijs/find_the_same_file
|
Add a EXIF extract utility. Later the EXIF "EXIF DateTimeOriginal" and
"EXIF SubsecTimeOriginal" will be used to identify the same pic by
taken time and subsec.
|
#!/usr/bin/env python
import sys,exifread
def main():
if len(sys.argv) != 2:
print("Usage: %s <JPG>" % sys.argv[0])
sys.exit(1)
JPG = sys.argv[1]
jpgfh = open(JPG, 'rb')
tags = exifread.process_file(jpgfh)
dttag = 'EXIF DateTimeOriginal'
dtsubtag = 'EXIF SubSecTimeOriginal'
if dttag in tags:
print(tags[dttag])
if dtsubtag in tags:
print(tags[dtsubtag])
jpgfh.close()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a EXIF extract utility. Later the EXIF "EXIF DateTimeOriginal" and
"EXIF SubsecTimeOriginal" will be used to identify the same pic by
taken time and subsec.<commit_after>
|
#!/usr/bin/env python
import sys,exifread
def main():
if len(sys.argv) != 2:
print("Usage: %s <JPG>" % sys.argv[0])
sys.exit(1)
JPG = sys.argv[1]
jpgfh = open(JPG, 'rb')
tags = exifread.process_file(jpgfh)
dttag = 'EXIF DateTimeOriginal'
dtsubtag = 'EXIF SubSecTimeOriginal'
if dttag in tags:
print(tags[dttag])
if dtsubtag in tags:
print(tags[dtsubtag])
jpgfh.close()
if __name__ == '__main__':
main()
|
Add a EXIF extract utility. Later the EXIF "EXIF DateTimeOriginal" and
"EXIF SubsecTimeOriginal" will be used to identify the same pic by
taken time and subsec.#!/usr/bin/env python
import sys,exifread
def main():
if len(sys.argv) != 2:
print("Usage: %s <JPG>" % sys.argv[0])
sys.exit(1)
JPG = sys.argv[1]
jpgfh = open(JPG, 'rb')
tags = exifread.process_file(jpgfh)
dttag = 'EXIF DateTimeOriginal'
dtsubtag = 'EXIF SubSecTimeOriginal'
if dttag in tags:
print(tags[dttag])
if dtsubtag in tags:
print(tags[dtsubtag])
jpgfh.close()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a EXIF extract utility. Later the EXIF "EXIF DateTimeOriginal" and
"EXIF SubsecTimeOriginal" will be used to identify the same pic by
taken time and subsec.<commit_after>#!/usr/bin/env python
import sys,exifread
def main():
if len(sys.argv) != 2:
print("Usage: %s <JPG>" % sys.argv[0])
sys.exit(1)
JPG = sys.argv[1]
jpgfh = open(JPG, 'rb')
tags = exifread.process_file(jpgfh)
dttag = 'EXIF DateTimeOriginal'
dtsubtag = 'EXIF SubSecTimeOriginal'
if dttag in tags:
print(tags[dttag])
if dtsubtag in tags:
print(tags[dtsubtag])
jpgfh.close()
if __name__ == '__main__':
main()
|
|
464abf7c047471ce31ef701d896e5be8077fd269
|
call_by_object.py
|
call_by_object.py
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
u""" From first entry of https://github.com/taizilongxu/interview_python.
Conditional call by object in python.
Add id() function to illustrate if we are refering to the same or different objects in action.
This is to show that Python is using pass by object with different behaviours for mutable or immutable
parameters, which is different from call by value or call by reference.
"""
def call_by_object_immutable(a):
u""" immutale example of integer argument.
Python creates new immutable object in function namespace.
"""
a = 2
print "a in function id = {}".format(id(a))
def call_by_object_mutable(a):
u""" http://effbot.org/zone/call-by-object.htm mutale example of list argument. """
a.append(2)
print "a in function id = {}".format(id(a))
if __name__ == "__main__":
immu_a = 1
print "before calling immu_a = {}, id = {}".format(immu_a, id(immu_a))
call_by_object_immutable(immu_a)
print "after calling immu_a = {}, id = {}".format(immu_a, id(immu_a))
mu_a = [1, 2, 3]
print "before calling mu_a = {}, id = {}".format(mu_a, id(mu_a))
call_by_object_mutable(mu_a)
print "after calling mu_a = {}, id = {}".format(mu_a, id(mu_a))
|
Add call by object illustration example with object ids.
|
Add call by object illustration example with object ids.
Signed-off-by: SJ Huang <55a36c562e010d4b156739b1c231e1aa17113c8e@gmail.com>
|
Python
|
apache-2.0
|
sjh/python
|
Add call by object illustration example with object ids.
Signed-off-by: SJ Huang <55a36c562e010d4b156739b1c231e1aa17113c8e@gmail.com>
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
u""" From first entry of https://github.com/taizilongxu/interview_python.
Conditional call by object in python.
Add id() function to illustrate if we are refering to the same or different objects in action.
This is to show that Python is using pass by object with different behaviours for mutable or immutable
parameters, which is different from call by value or call by reference.
"""
def call_by_object_immutable(a):
u""" immutale example of integer argument.
Python creates new immutable object in function namespace.
"""
a = 2
print "a in function id = {}".format(id(a))
def call_by_object_mutable(a):
u""" http://effbot.org/zone/call-by-object.htm mutale example of list argument. """
a.append(2)
print "a in function id = {}".format(id(a))
if __name__ == "__main__":
immu_a = 1
print "before calling immu_a = {}, id = {}".format(immu_a, id(immu_a))
call_by_object_immutable(immu_a)
print "after calling immu_a = {}, id = {}".format(immu_a, id(immu_a))
mu_a = [1, 2, 3]
print "before calling mu_a = {}, id = {}".format(mu_a, id(mu_a))
call_by_object_mutable(mu_a)
print "after calling mu_a = {}, id = {}".format(mu_a, id(mu_a))
|
<commit_before><commit_msg>Add call by object illustration example with object ids.
Signed-off-by: SJ Huang <55a36c562e010d4b156739b1c231e1aa17113c8e@gmail.com><commit_after>
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
u""" From first entry of https://github.com/taizilongxu/interview_python.
Conditional call by object in python.
Add id() function to illustrate if we are refering to the same or different objects in action.
This is to show that Python is using pass by object with different behaviours for mutable or immutable
parameters, which is different from call by value or call by reference.
"""
def call_by_object_immutable(a):
u""" immutale example of integer argument.
Python creates new immutable object in function namespace.
"""
a = 2
print "a in function id = {}".format(id(a))
def call_by_object_mutable(a):
u""" http://effbot.org/zone/call-by-object.htm mutale example of list argument. """
a.append(2)
print "a in function id = {}".format(id(a))
if __name__ == "__main__":
immu_a = 1
print "before calling immu_a = {}, id = {}".format(immu_a, id(immu_a))
call_by_object_immutable(immu_a)
print "after calling immu_a = {}, id = {}".format(immu_a, id(immu_a))
mu_a = [1, 2, 3]
print "before calling mu_a = {}, id = {}".format(mu_a, id(mu_a))
call_by_object_mutable(mu_a)
print "after calling mu_a = {}, id = {}".format(mu_a, id(mu_a))
|
Add call by object illustration example with object ids.
Signed-off-by: SJ Huang <55a36c562e010d4b156739b1c231e1aa17113c8e@gmail.com>#!/usr/bin/env python
# _*_ coding: utf-8 _*_
u""" From first entry of https://github.com/taizilongxu/interview_python.
Conditional call by object in python.
Add id() function to illustrate if we are refering to the same or different objects in action.
This is to show that Python is using pass by object with different behaviours for mutable or immutable
parameters, which is different from call by value or call by reference.
"""
def call_by_object_immutable(a):
u""" immutale example of integer argument.
Python creates new immutable object in function namespace.
"""
a = 2
print "a in function id = {}".format(id(a))
def call_by_object_mutable(a):
u""" http://effbot.org/zone/call-by-object.htm mutale example of list argument. """
a.append(2)
print "a in function id = {}".format(id(a))
if __name__ == "__main__":
immu_a = 1
print "before calling immu_a = {}, id = {}".format(immu_a, id(immu_a))
call_by_object_immutable(immu_a)
print "after calling immu_a = {}, id = {}".format(immu_a, id(immu_a))
mu_a = [1, 2, 3]
print "before calling mu_a = {}, id = {}".format(mu_a, id(mu_a))
call_by_object_mutable(mu_a)
print "after calling mu_a = {}, id = {}".format(mu_a, id(mu_a))
|
<commit_before><commit_msg>Add call by object illustration example with object ids.
Signed-off-by: SJ Huang <55a36c562e010d4b156739b1c231e1aa17113c8e@gmail.com><commit_after>#!/usr/bin/env python
# _*_ coding: utf-8 _*_
u""" From first entry of https://github.com/taizilongxu/interview_python.
Conditional call by object in python.
Add id() function to illustrate if we are refering to the same or different objects in action.
This is to show that Python is using pass by object with different behaviours for mutable or immutable
parameters, which is different from call by value or call by reference.
"""
def call_by_object_immutable(a):
u""" immutale example of integer argument.
Python creates new immutable object in function namespace.
"""
a = 2
print "a in function id = {}".format(id(a))
def call_by_object_mutable(a):
u""" http://effbot.org/zone/call-by-object.htm mutale example of list argument. """
a.append(2)
print "a in function id = {}".format(id(a))
if __name__ == "__main__":
immu_a = 1
print "before calling immu_a = {}, id = {}".format(immu_a, id(immu_a))
call_by_object_immutable(immu_a)
print "after calling immu_a = {}, id = {}".format(immu_a, id(immu_a))
mu_a = [1, 2, 3]
print "before calling mu_a = {}, id = {}".format(mu_a, id(mu_a))
call_by_object_mutable(mu_a)
print "after calling mu_a = {}, id = {}".format(mu_a, id(mu_a))
|
|
912322e083751ce13ec0a9b1469bc3c9eef8f39c
|
pombola/core/migrations/0002_add_related_name.py
|
pombola/core/migrations/0002_add_related_name.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='identifier',
name='content_type',
field=models.ForeignKey(related_name='pombola_identifier_set', to='contenttypes.ContentType'),
preserve_default=True,
),
]
|
Add corresponding migration for the Identifier related_name conflict
|
Add corresponding migration for the Identifier related_name conflict
|
Python
|
agpl-3.0
|
mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola
|
Add corresponding migration for the Identifier related_name conflict
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='identifier',
name='content_type',
field=models.ForeignKey(related_name='pombola_identifier_set', to='contenttypes.ContentType'),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add corresponding migration for the Identifier related_name conflict<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='identifier',
name='content_type',
field=models.ForeignKey(related_name='pombola_identifier_set', to='contenttypes.ContentType'),
preserve_default=True,
),
]
|
Add corresponding migration for the Identifier related_name conflict# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='identifier',
name='content_type',
field=models.ForeignKey(related_name='pombola_identifier_set', to='contenttypes.ContentType'),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add corresponding migration for the Identifier related_name conflict<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='identifier',
name='content_type',
field=models.ForeignKey(related_name='pombola_identifier_set', to='contenttypes.ContentType'),
preserve_default=True,
),
]
|
|
ab639b991e9b44f533adf44eebbec20d62f560f6
|
run_time/src/gae_server_test/test_web_handler.py
|
run_time/src/gae_server_test/test_web_handler.py
|
import webapp2
import unittest
import webtest
from gae_server.incremental_fonts import IncrementalFonts
class AppTest(unittest.TestCase):
def setUp(self):
# Create a WSGI application.
app = webapp2.WSGIApplication([('/', IncrementalFonts)])
# Wrap the app with WebTest’s TestApp.
self.testapp = webtest.TestApp(app)
# Test the handler.
def testIncrementalFonts(self):
response = self.testapp.get('/')
self.assertEqual(response.status_int, 200)
self.assertEqual(response.normal_body, 'Hello World!')
self.assertEqual(response.content_type, 'text/plain')
|
Test for server handler is added
|
Test for server handler is added
|
Python
|
apache-2.0
|
bstell/TachyFont,bstell/TachyFont,googlefonts/TachyFont,googlei18n/TachyFont,moyogo/tachyfont,googlei18n/TachyFont,googlefonts/TachyFont,moyogo/tachyfont,googlefonts/TachyFont,bstell/TachyFont,googlefonts/TachyFont,bstell/TachyFont,googlei18n/TachyFont,moyogo/tachyfont,googlei18n/TachyFont,moyogo/tachyfont,bstell/TachyFont,googlefonts/TachyFont,moyogo/tachyfont,googlei18n/TachyFont
|
Test for server handler is added
|
import webapp2
import unittest
import webtest
from gae_server.incremental_fonts import IncrementalFonts
class AppTest(unittest.TestCase):
def setUp(self):
# Create a WSGI application.
app = webapp2.WSGIApplication([('/', IncrementalFonts)])
# Wrap the app with WebTest’s TestApp.
self.testapp = webtest.TestApp(app)
# Test the handler.
def testIncrementalFonts(self):
response = self.testapp.get('/')
self.assertEqual(response.status_int, 200)
self.assertEqual(response.normal_body, 'Hello World!')
self.assertEqual(response.content_type, 'text/plain')
|
<commit_before><commit_msg>Test for server handler is added<commit_after>
|
import webapp2
import unittest
import webtest
from gae_server.incremental_fonts import IncrementalFonts
class AppTest(unittest.TestCase):
def setUp(self):
# Create a WSGI application.
app = webapp2.WSGIApplication([('/', IncrementalFonts)])
# Wrap the app with WebTest’s TestApp.
self.testapp = webtest.TestApp(app)
# Test the handler.
def testIncrementalFonts(self):
response = self.testapp.get('/')
self.assertEqual(response.status_int, 200)
self.assertEqual(response.normal_body, 'Hello World!')
self.assertEqual(response.content_type, 'text/plain')
|
Test for server handler is addedimport webapp2
import unittest
import webtest
from gae_server.incremental_fonts import IncrementalFonts
class AppTest(unittest.TestCase):
def setUp(self):
# Create a WSGI application.
app = webapp2.WSGIApplication([('/', IncrementalFonts)])
# Wrap the app with WebTest’s TestApp.
self.testapp = webtest.TestApp(app)
# Test the handler.
def testIncrementalFonts(self):
response = self.testapp.get('/')
self.assertEqual(response.status_int, 200)
self.assertEqual(response.normal_body, 'Hello World!')
self.assertEqual(response.content_type, 'text/plain')
|
<commit_before><commit_msg>Test for server handler is added<commit_after>import webapp2
import unittest
import webtest
from gae_server.incremental_fonts import IncrementalFonts
class AppTest(unittest.TestCase):
def setUp(self):
# Create a WSGI application.
app = webapp2.WSGIApplication([('/', IncrementalFonts)])
# Wrap the app with WebTest’s TestApp.
self.testapp = webtest.TestApp(app)
# Test the handler.
def testIncrementalFonts(self):
response = self.testapp.get('/')
self.assertEqual(response.status_int, 200)
self.assertEqual(response.normal_body, 'Hello World!')
self.assertEqual(response.content_type, 'text/plain')
|
|
e40c295967e8d0b1a190c173dedebefe9eb89462
|
Python/66_PlusOne.py
|
Python/66_PlusOne.py
|
class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
digits[len(digits)-1] += 1
if digits[len(digits)-1] < 10:
return digits
for i in xrange(len(digits)-1,0,-1):
if digits[i] == 10:
digits[i] = 0
digits[i-1] += 1
else:
break
if digits[0] == 10:
digits[0] = 0
digits = [1] + digits
return digits
digits = [0,9,7,9]
print Solution().plusOne(digits)
|
Add solution for 66 Plus One.
|
Add solution for 66 Plus One.
|
Python
|
mit
|
comicxmz001/LeetCode,comicxmz001/LeetCode
|
Add solution for 66 Plus One.
|
class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
digits[len(digits)-1] += 1
if digits[len(digits)-1] < 10:
return digits
for i in xrange(len(digits)-1,0,-1):
if digits[i] == 10:
digits[i] = 0
digits[i-1] += 1
else:
break
if digits[0] == 10:
digits[0] = 0
digits = [1] + digits
return digits
digits = [0,9,7,9]
print Solution().plusOne(digits)
|
<commit_before><commit_msg>Add solution for 66 Plus One.<commit_after>
|
class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
digits[len(digits)-1] += 1
if digits[len(digits)-1] < 10:
return digits
for i in xrange(len(digits)-1,0,-1):
if digits[i] == 10:
digits[i] = 0
digits[i-1] += 1
else:
break
if digits[0] == 10:
digits[0] = 0
digits = [1] + digits
return digits
digits = [0,9,7,9]
print Solution().plusOne(digits)
|
Add solution for 66 Plus One.class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
digits[len(digits)-1] += 1
if digits[len(digits)-1] < 10:
return digits
for i in xrange(len(digits)-1,0,-1):
if digits[i] == 10:
digits[i] = 0
digits[i-1] += 1
else:
break
if digits[0] == 10:
digits[0] = 0
digits = [1] + digits
return digits
digits = [0,9,7,9]
print Solution().plusOne(digits)
|
<commit_before><commit_msg>Add solution for 66 Plus One.<commit_after>class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
digits[len(digits)-1] += 1
if digits[len(digits)-1] < 10:
return digits
for i in xrange(len(digits)-1,0,-1):
if digits[i] == 10:
digits[i] = 0
digits[i-1] += 1
else:
break
if digits[0] == 10:
digits[0] = 0
digits = [1] + digits
return digits
digits = [0,9,7,9]
print Solution().plusOne(digits)
|
|
56e5eee44edfc1aae719a12eb5638316237fc55a
|
Python/twitterBot.py
|
Python/twitterBot.py
|
import tweepy
import markovify
import sys, random, time
ck = ''
cs = ''
ak = ''
ase = ''
auth = tweepy.OAuthHandler(ck, cs)
auth.set_access_token(ak, ase)
api = tweepy.API(auth)
# print(api.statuses_lookup(100))
# print(api.me())
with open('https://raw.githubusercontent.com/gastonstat/StarWars/master/Text_files/EpisodeIV_dialogues.txt') as f:
ep4 = f.read()
with open('https://raw.githubusercontent.com/gastonstat/StarWars/master/Text_files/EpisodeV_dialogues.txt') as f:
ep5 = f.read()
with open('https://raw.githubusercontent.com/gastonstat/StarWars/master/Text_files/EpisodeVI_dialogues.txt') as f:
ep6 = f.read()
ep4lines = ep4.split('\n')
ep5lines = ep5.split('\n')
ep6lines = ep6.split('\n')
mep4 = markovify.Text(ep4)
mep5 = markovify.Text(ep5)
mep6 = markovify.Text(ep6)
model1 = markovify.combine([mep4, mep5], [1, 1])
model2 = markovify.combine([model1, mep6], [2, 1])
for i in range(3):
print(model2.make_short_sentence(280))
# api.update_status(model2.make_short_sentence(280))
|
Add a Twitter bot that creates dialog from Star Wars text using Markov models
|
Add a Twitter bot that creates dialog from Star Wars text using Markov models
|
Python
|
mit
|
mckennapsean/code-examples,mckennapsean/code-examples,mckennapsean/code-examples,mckennapsean/code-examples,mckennapsean/code-examples,mckennapsean/code-examples,mckennapsean/code-examples
|
Add a Twitter bot that creates dialog from Star Wars text using Markov models
|
import tweepy
import markovify
import sys, random, time
ck = ''
cs = ''
ak = ''
ase = ''
auth = tweepy.OAuthHandler(ck, cs)
auth.set_access_token(ak, ase)
api = tweepy.API(auth)
# print(api.statuses_lookup(100))
# print(api.me())
with open('https://raw.githubusercontent.com/gastonstat/StarWars/master/Text_files/EpisodeIV_dialogues.txt') as f:
ep4 = f.read()
with open('https://raw.githubusercontent.com/gastonstat/StarWars/master/Text_files/EpisodeV_dialogues.txt') as f:
ep5 = f.read()
with open('https://raw.githubusercontent.com/gastonstat/StarWars/master/Text_files/EpisodeVI_dialogues.txt') as f:
ep6 = f.read()
ep4lines = ep4.split('\n')
ep5lines = ep5.split('\n')
ep6lines = ep6.split('\n')
mep4 = markovify.Text(ep4)
mep5 = markovify.Text(ep5)
mep6 = markovify.Text(ep6)
model1 = markovify.combine([mep4, mep5], [1, 1])
model2 = markovify.combine([model1, mep6], [2, 1])
for i in range(3):
print(model2.make_short_sentence(280))
# api.update_status(model2.make_short_sentence(280))
|
<commit_before><commit_msg>Add a Twitter bot that creates dialog from Star Wars text using Markov models<commit_after>
|
import tweepy
import markovify
import sys, random, time
ck = ''
cs = ''
ak = ''
ase = ''
auth = tweepy.OAuthHandler(ck, cs)
auth.set_access_token(ak, ase)
api = tweepy.API(auth)
# print(api.statuses_lookup(100))
# print(api.me())
with open('https://raw.githubusercontent.com/gastonstat/StarWars/master/Text_files/EpisodeIV_dialogues.txt') as f:
ep4 = f.read()
with open('https://raw.githubusercontent.com/gastonstat/StarWars/master/Text_files/EpisodeV_dialogues.txt') as f:
ep5 = f.read()
with open('https://raw.githubusercontent.com/gastonstat/StarWars/master/Text_files/EpisodeVI_dialogues.txt') as f:
ep6 = f.read()
ep4lines = ep4.split('\n')
ep5lines = ep5.split('\n')
ep6lines = ep6.split('\n')
mep4 = markovify.Text(ep4)
mep5 = markovify.Text(ep5)
mep6 = markovify.Text(ep6)
model1 = markovify.combine([mep4, mep5], [1, 1])
model2 = markovify.combine([model1, mep6], [2, 1])
for i in range(3):
print(model2.make_short_sentence(280))
# api.update_status(model2.make_short_sentence(280))
|
Add a Twitter bot that creates dialog from Star Wars text using Markov modelsimport tweepy
import markovify
import sys, random, time
ck = ''
cs = ''
ak = ''
ase = ''
auth = tweepy.OAuthHandler(ck, cs)
auth.set_access_token(ak, ase)
api = tweepy.API(auth)
# print(api.statuses_lookup(100))
# print(api.me())
with open('https://raw.githubusercontent.com/gastonstat/StarWars/master/Text_files/EpisodeIV_dialogues.txt') as f:
ep4 = f.read()
with open('https://raw.githubusercontent.com/gastonstat/StarWars/master/Text_files/EpisodeV_dialogues.txt') as f:
ep5 = f.read()
with open('https://raw.githubusercontent.com/gastonstat/StarWars/master/Text_files/EpisodeVI_dialogues.txt') as f:
ep6 = f.read()
ep4lines = ep4.split('\n')
ep5lines = ep5.split('\n')
ep6lines = ep6.split('\n')
mep4 = markovify.Text(ep4)
mep5 = markovify.Text(ep5)
mep6 = markovify.Text(ep6)
model1 = markovify.combine([mep4, mep5], [1, 1])
model2 = markovify.combine([model1, mep6], [2, 1])
for i in range(3):
print(model2.make_short_sentence(280))
# api.update_status(model2.make_short_sentence(280))
|
<commit_before><commit_msg>Add a Twitter bot that creates dialog from Star Wars text using Markov models<commit_after>import tweepy
import markovify
import sys, random, time
ck = ''
cs = ''
ak = ''
ase = ''
auth = tweepy.OAuthHandler(ck, cs)
auth.set_access_token(ak, ase)
api = tweepy.API(auth)
# print(api.statuses_lookup(100))
# print(api.me())
with open('https://raw.githubusercontent.com/gastonstat/StarWars/master/Text_files/EpisodeIV_dialogues.txt') as f:
ep4 = f.read()
with open('https://raw.githubusercontent.com/gastonstat/StarWars/master/Text_files/EpisodeV_dialogues.txt') as f:
ep5 = f.read()
with open('https://raw.githubusercontent.com/gastonstat/StarWars/master/Text_files/EpisodeVI_dialogues.txt') as f:
ep6 = f.read()
ep4lines = ep4.split('\n')
ep5lines = ep5.split('\n')
ep6lines = ep6.split('\n')
mep4 = markovify.Text(ep4)
mep5 = markovify.Text(ep5)
mep6 = markovify.Text(ep6)
model1 = markovify.combine([mep4, mep5], [1, 1])
model2 = markovify.combine([model1, mep6], [2, 1])
for i in range(3):
print(model2.make_short_sentence(280))
# api.update_status(model2.make_short_sentence(280))
|
|
c0955ea64452808d97f4cd741ea6eb6fc1eaee20
|
tests/test_character.py
|
tests/test_character.py
|
import npc
class TestCreation:
"""Test different instantiation behaviors"""
def test_dict(self):
char = npc.Character({"name": ["hello"]})
assert char["name"] == ["hello"]
def test_kwargs(self):
char = npc.Character(name=["hello"])
assert char["name"] == ["hello"]
def test_both(self):
char = npc.Character(attributes={"name": ["hello"], "profession": ["tailor"]}, name=["nope"])
assert char["name"] == ["nope"]
assert char["profession"] == ["tailor"]
class TestGetFirst:
def test_normal(self):
char = npc.Character(name=["hello", "goodbye"])
assert char.get_first("name") == "hello"
def test_desc(self):
char = npc.Character(description="Fee Fie Foe Fum")
assert char.get_first("description") == "Fee Fie Foe Fum"
def test_not_present(self):
char = npc.Character()
assert char.get_first("nope") == None
class TestGetRemaining:
def test_normal(self):
char = npc.Character(name=["hello", "goodbye"])
assert char.get_remaining("name") == ["goodbye"]
def test_desc(self):
char = npc.Character(description="Fee Fie Foe Fum")
assert char.get_remaining("description") == "Fee Fie Foe Fum"
def test_not_present(self):
char = npc.Character()
assert char.get_remaining("nope") == []
class TestAppend:
def test_normal(self):
char = npc.Character()
char.append("title", "The Stern")
assert char["title"] == ["The Stern"]
def test_desc(self):
char = npc.Character()
char.append("description", "Hello hello")
char.append("description", " baby, you called")
assert char["description"] == "Hello hello baby, you called"
def test_append_rank():
char = npc.Character()
char.append_rank("Knights of the Round Table", "Dancer")
assert char["rank"]["Knights of the Round Table"] == ["Dancer"]
|
Add tests for Character class
|
Add tests for Character class
|
Python
|
mit
|
aurule/npc,aurule/npc
|
Add tests for Character class
|
import npc
class TestCreation:
"""Test different instantiation behaviors"""
def test_dict(self):
char = npc.Character({"name": ["hello"]})
assert char["name"] == ["hello"]
def test_kwargs(self):
char = npc.Character(name=["hello"])
assert char["name"] == ["hello"]
def test_both(self):
char = npc.Character(attributes={"name": ["hello"], "profession": ["tailor"]}, name=["nope"])
assert char["name"] == ["nope"]
assert char["profession"] == ["tailor"]
class TestGetFirst:
def test_normal(self):
char = npc.Character(name=["hello", "goodbye"])
assert char.get_first("name") == "hello"
def test_desc(self):
char = npc.Character(description="Fee Fie Foe Fum")
assert char.get_first("description") == "Fee Fie Foe Fum"
def test_not_present(self):
char = npc.Character()
assert char.get_first("nope") == None
class TestGetRemaining:
def test_normal(self):
char = npc.Character(name=["hello", "goodbye"])
assert char.get_remaining("name") == ["goodbye"]
def test_desc(self):
char = npc.Character(description="Fee Fie Foe Fum")
assert char.get_remaining("description") == "Fee Fie Foe Fum"
def test_not_present(self):
char = npc.Character()
assert char.get_remaining("nope") == []
class TestAppend:
def test_normal(self):
char = npc.Character()
char.append("title", "The Stern")
assert char["title"] == ["The Stern"]
def test_desc(self):
char = npc.Character()
char.append("description", "Hello hello")
char.append("description", " baby, you called")
assert char["description"] == "Hello hello baby, you called"
def test_append_rank():
char = npc.Character()
char.append_rank("Knights of the Round Table", "Dancer")
assert char["rank"]["Knights of the Round Table"] == ["Dancer"]
|
<commit_before><commit_msg>Add tests for Character class<commit_after>
|
import npc
class TestCreation:
"""Test different instantiation behaviors"""
def test_dict(self):
char = npc.Character({"name": ["hello"]})
assert char["name"] == ["hello"]
def test_kwargs(self):
char = npc.Character(name=["hello"])
assert char["name"] == ["hello"]
def test_both(self):
char = npc.Character(attributes={"name": ["hello"], "profession": ["tailor"]}, name=["nope"])
assert char["name"] == ["nope"]
assert char["profession"] == ["tailor"]
class TestGetFirst:
def test_normal(self):
char = npc.Character(name=["hello", "goodbye"])
assert char.get_first("name") == "hello"
def test_desc(self):
char = npc.Character(description="Fee Fie Foe Fum")
assert char.get_first("description") == "Fee Fie Foe Fum"
def test_not_present(self):
char = npc.Character()
assert char.get_first("nope") == None
class TestGetRemaining:
def test_normal(self):
char = npc.Character(name=["hello", "goodbye"])
assert char.get_remaining("name") == ["goodbye"]
def test_desc(self):
char = npc.Character(description="Fee Fie Foe Fum")
assert char.get_remaining("description") == "Fee Fie Foe Fum"
def test_not_present(self):
char = npc.Character()
assert char.get_remaining("nope") == []
class TestAppend:
def test_normal(self):
char = npc.Character()
char.append("title", "The Stern")
assert char["title"] == ["The Stern"]
def test_desc(self):
char = npc.Character()
char.append("description", "Hello hello")
char.append("description", " baby, you called")
assert char["description"] == "Hello hello baby, you called"
def test_append_rank():
char = npc.Character()
char.append_rank("Knights of the Round Table", "Dancer")
assert char["rank"]["Knights of the Round Table"] == ["Dancer"]
|
Add tests for Character classimport npc
class TestCreation:
"""Test different instantiation behaviors"""
def test_dict(self):
char = npc.Character({"name": ["hello"]})
assert char["name"] == ["hello"]
def test_kwargs(self):
char = npc.Character(name=["hello"])
assert char["name"] == ["hello"]
def test_both(self):
char = npc.Character(attributes={"name": ["hello"], "profession": ["tailor"]}, name=["nope"])
assert char["name"] == ["nope"]
assert char["profession"] == ["tailor"]
class TestGetFirst:
def test_normal(self):
char = npc.Character(name=["hello", "goodbye"])
assert char.get_first("name") == "hello"
def test_desc(self):
char = npc.Character(description="Fee Fie Foe Fum")
assert char.get_first("description") == "Fee Fie Foe Fum"
def test_not_present(self):
char = npc.Character()
assert char.get_first("nope") == None
class TestGetRemaining:
def test_normal(self):
char = npc.Character(name=["hello", "goodbye"])
assert char.get_remaining("name") == ["goodbye"]
def test_desc(self):
char = npc.Character(description="Fee Fie Foe Fum")
assert char.get_remaining("description") == "Fee Fie Foe Fum"
def test_not_present(self):
char = npc.Character()
assert char.get_remaining("nope") == []
class TestAppend:
def test_normal(self):
char = npc.Character()
char.append("title", "The Stern")
assert char["title"] == ["The Stern"]
def test_desc(self):
char = npc.Character()
char.append("description", "Hello hello")
char.append("description", " baby, you called")
assert char["description"] == "Hello hello baby, you called"
def test_append_rank():
char = npc.Character()
char.append_rank("Knights of the Round Table", "Dancer")
assert char["rank"]["Knights of the Round Table"] == ["Dancer"]
|
<commit_before><commit_msg>Add tests for Character class<commit_after>import npc
class TestCreation:
"""Test different instantiation behaviors"""
def test_dict(self):
char = npc.Character({"name": ["hello"]})
assert char["name"] == ["hello"]
def test_kwargs(self):
char = npc.Character(name=["hello"])
assert char["name"] == ["hello"]
def test_both(self):
char = npc.Character(attributes={"name": ["hello"], "profession": ["tailor"]}, name=["nope"])
assert char["name"] == ["nope"]
assert char["profession"] == ["tailor"]
class TestGetFirst:
def test_normal(self):
char = npc.Character(name=["hello", "goodbye"])
assert char.get_first("name") == "hello"
def test_desc(self):
char = npc.Character(description="Fee Fie Foe Fum")
assert char.get_first("description") == "Fee Fie Foe Fum"
def test_not_present(self):
char = npc.Character()
assert char.get_first("nope") == None
class TestGetRemaining:
def test_normal(self):
char = npc.Character(name=["hello", "goodbye"])
assert char.get_remaining("name") == ["goodbye"]
def test_desc(self):
char = npc.Character(description="Fee Fie Foe Fum")
assert char.get_remaining("description") == "Fee Fie Foe Fum"
def test_not_present(self):
char = npc.Character()
assert char.get_remaining("nope") == []
class TestAppend:
def test_normal(self):
char = npc.Character()
char.append("title", "The Stern")
assert char["title"] == ["The Stern"]
def test_desc(self):
char = npc.Character()
char.append("description", "Hello hello")
char.append("description", " baby, you called")
assert char["description"] == "Hello hello baby, you called"
def test_append_rank():
char = npc.Character()
char.append_rank("Knights of the Round Table", "Dancer")
assert char["rank"]["Knights of the Round Table"] == ["Dancer"]
|
|
9b2a2f7aeb0f24c6c5e7cbb474cf377a89dd48d6
|
evaluation/collectStatistics.py
|
evaluation/collectStatistics.py
|
import packages.project as project
import packages.primitive as primitive
import packages.utils as utils
import packages.processing
import packages.io
import argparse
from matplotlib import pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
from scipy.stats import norm,kstest,skewtest,kurtosistest,normaltest
compareToGt = False
itmin=0
itmax=3
################################################################################
## Command line parsing
parser = argparse.ArgumentParser(description='Collect full run statistics.')
parser.add_argument('projectdir')
args = parser.parse_args()
projectdir = args.projectdir
if projectdir[-1] == '/':
projectdir = projectdir[:-1]
projectname = projectdir.split('/')[-1]
cloudfile = projectdir+'/cloud.ply'
cloud = packages.io.readPointCloudFromPly(cloudfile)
print 'Processing project ', projectname
sigma_ref = 1.
if compareToGt:
projectfile = projectdir+'/gt/'+projectname+'.prj'
gtlinesfile = projectdir+'/gt/primitives.csv'
gtassignfile = projectdir+'/gt/points_primitives.csv'
project = project.PyProject(projectfile)
gtlines = primitive.readPrimitivesFromFile(gtlinesfile)
gtassign = packages.io.readPointAssignementFromFiles(gtassignfile)
gtlines = packages.processing.removeUnassignedPrimitives(gtlines, gtassign)
gtassign = packages.processing.removeUnassignedPoint(gtlines, gtassign)
############################################################################
## Process noise
sigma_ref = project.kernels[0].stdev()
gtDistrib = utils.distanceToPrimitives(cloud, gtassign, gtlines)
print "Ground truth sigma ", np.sqrt(np.var(gtDistrib)) / sigma_ref
for it in range(itmin, itmax):
print "Processing iteration",it
linesfile_it = projectdir+'/primitives_merged_it'+str(it)+'.csv'
assignfile_it = projectdir+'/points_primitives_it'+str(it)+'.csv'
################################################################################
## Reading input files
lines_it = primitive.readPrimitivesFromFile(linesfile_it)
assign_it = packages.io.readPointAssignementFromFiles(assignfile_it)
lines_it = packages.processing.removeUnassignedPrimitives(lines_it, assign_it)
assign_it = packages.processing.removeUnassignedPoint(lines_it, assign_it)
################################################################################
## Process noise
# Compute the distance between each point and its assigned primitive
distrib_it = utils.distanceToPrimitives(cloud, assign_it, lines_it)
print "Estimated sigma ", np.sqrt(np.var(distrib_it)) / sigma_ref
|
Add a new script processing different iterations of a run and computing statistics
|
Add a new script processing different iterations of a run and computing statistics
|
Python
|
apache-2.0
|
amonszpart/globOpt,amonszpart/globOpt,amonszpart/globOpt,NUAAXXY/globOpt,NUAAXXY/globOpt,NUAAXXY/globOpt,NUAAXXY/globOpt,NUAAXXY/globOpt,amonszpart/globOpt,amonszpart/globOpt,amonszpart/globOpt,NUAAXXY/globOpt
|
Add a new script processing different iterations of a run and computing statistics
|
import packages.project as project
import packages.primitive as primitive
import packages.utils as utils
import packages.processing
import packages.io
import argparse
from matplotlib import pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
from scipy.stats import norm,kstest,skewtest,kurtosistest,normaltest
compareToGt = False
itmin=0
itmax=3
################################################################################
## Command line parsing
parser = argparse.ArgumentParser(description='Collect full run statistics.')
parser.add_argument('projectdir')
args = parser.parse_args()
projectdir = args.projectdir
if projectdir[-1] == '/':
projectdir = projectdir[:-1]
projectname = projectdir.split('/')[-1]
cloudfile = projectdir+'/cloud.ply'
cloud = packages.io.readPointCloudFromPly(cloudfile)
print 'Processing project ', projectname
sigma_ref = 1.
if compareToGt:
projectfile = projectdir+'/gt/'+projectname+'.prj'
gtlinesfile = projectdir+'/gt/primitives.csv'
gtassignfile = projectdir+'/gt/points_primitives.csv'
project = project.PyProject(projectfile)
gtlines = primitive.readPrimitivesFromFile(gtlinesfile)
gtassign = packages.io.readPointAssignementFromFiles(gtassignfile)
gtlines = packages.processing.removeUnassignedPrimitives(gtlines, gtassign)
gtassign = packages.processing.removeUnassignedPoint(gtlines, gtassign)
############################################################################
## Process noise
sigma_ref = project.kernels[0].stdev()
gtDistrib = utils.distanceToPrimitives(cloud, gtassign, gtlines)
print "Ground truth sigma ", np.sqrt(np.var(gtDistrib)) / sigma_ref
for it in range(itmin, itmax):
print "Processing iteration",it
linesfile_it = projectdir+'/primitives_merged_it'+str(it)+'.csv'
assignfile_it = projectdir+'/points_primitives_it'+str(it)+'.csv'
################################################################################
## Reading input files
lines_it = primitive.readPrimitivesFromFile(linesfile_it)
assign_it = packages.io.readPointAssignementFromFiles(assignfile_it)
lines_it = packages.processing.removeUnassignedPrimitives(lines_it, assign_it)
assign_it = packages.processing.removeUnassignedPoint(lines_it, assign_it)
################################################################################
## Process noise
# Compute the distance between each point and its assigned primitive
distrib_it = utils.distanceToPrimitives(cloud, assign_it, lines_it)
print "Estimated sigma ", np.sqrt(np.var(distrib_it)) / sigma_ref
|
<commit_before><commit_msg>Add a new script processing different iterations of a run and computing statistics<commit_after>
|
import packages.project as project
import packages.primitive as primitive
import packages.utils as utils
import packages.processing
import packages.io
import argparse
from matplotlib import pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
from scipy.stats import norm,kstest,skewtest,kurtosistest,normaltest
compareToGt = False
itmin=0
itmax=3
################################################################################
## Command line parsing
parser = argparse.ArgumentParser(description='Collect full run statistics.')
parser.add_argument('projectdir')
args = parser.parse_args()
projectdir = args.projectdir
if projectdir[-1] == '/':
projectdir = projectdir[:-1]
projectname = projectdir.split('/')[-1]
cloudfile = projectdir+'/cloud.ply'
cloud = packages.io.readPointCloudFromPly(cloudfile)
print 'Processing project ', projectname
sigma_ref = 1.
if compareToGt:
projectfile = projectdir+'/gt/'+projectname+'.prj'
gtlinesfile = projectdir+'/gt/primitives.csv'
gtassignfile = projectdir+'/gt/points_primitives.csv'
project = project.PyProject(projectfile)
gtlines = primitive.readPrimitivesFromFile(gtlinesfile)
gtassign = packages.io.readPointAssignementFromFiles(gtassignfile)
gtlines = packages.processing.removeUnassignedPrimitives(gtlines, gtassign)
gtassign = packages.processing.removeUnassignedPoint(gtlines, gtassign)
############################################################################
## Process noise
sigma_ref = project.kernels[0].stdev()
gtDistrib = utils.distanceToPrimitives(cloud, gtassign, gtlines)
print "Ground truth sigma ", np.sqrt(np.var(gtDistrib)) / sigma_ref
for it in range(itmin, itmax):
print "Processing iteration",it
linesfile_it = projectdir+'/primitives_merged_it'+str(it)+'.csv'
assignfile_it = projectdir+'/points_primitives_it'+str(it)+'.csv'
################################################################################
## Reading input files
lines_it = primitive.readPrimitivesFromFile(linesfile_it)
assign_it = packages.io.readPointAssignementFromFiles(assignfile_it)
lines_it = packages.processing.removeUnassignedPrimitives(lines_it, assign_it)
assign_it = packages.processing.removeUnassignedPoint(lines_it, assign_it)
################################################################################
## Process noise
# Compute the distance between each point and its assigned primitive
distrib_it = utils.distanceToPrimitives(cloud, assign_it, lines_it)
print "Estimated sigma ", np.sqrt(np.var(distrib_it)) / sigma_ref
|
Add a new script processing different iterations of a run and computing statisticsimport packages.project as project
import packages.primitive as primitive
import packages.utils as utils
import packages.processing
import packages.io
import argparse
from matplotlib import pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
from scipy.stats import norm,kstest,skewtest,kurtosistest,normaltest
compareToGt = False
itmin=0
itmax=3
################################################################################
## Command line parsing
parser = argparse.ArgumentParser(description='Collect full run statistics.')
parser.add_argument('projectdir')
args = parser.parse_args()
projectdir = args.projectdir
if projectdir[-1] == '/':
projectdir = projectdir[:-1]
projectname = projectdir.split('/')[-1]
cloudfile = projectdir+'/cloud.ply'
cloud = packages.io.readPointCloudFromPly(cloudfile)
print 'Processing project ', projectname
sigma_ref = 1.
if compareToGt:
projectfile = projectdir+'/gt/'+projectname+'.prj'
gtlinesfile = projectdir+'/gt/primitives.csv'
gtassignfile = projectdir+'/gt/points_primitives.csv'
project = project.PyProject(projectfile)
gtlines = primitive.readPrimitivesFromFile(gtlinesfile)
gtassign = packages.io.readPointAssignementFromFiles(gtassignfile)
gtlines = packages.processing.removeUnassignedPrimitives(gtlines, gtassign)
gtassign = packages.processing.removeUnassignedPoint(gtlines, gtassign)
############################################################################
## Process noise
sigma_ref = project.kernels[0].stdev()
gtDistrib = utils.distanceToPrimitives(cloud, gtassign, gtlines)
print "Ground truth sigma ", np.sqrt(np.var(gtDistrib)) / sigma_ref
for it in range(itmin, itmax):
print "Processing iteration",it
linesfile_it = projectdir+'/primitives_merged_it'+str(it)+'.csv'
assignfile_it = projectdir+'/points_primitives_it'+str(it)+'.csv'
################################################################################
## Reading input files
lines_it = primitive.readPrimitivesFromFile(linesfile_it)
assign_it = packages.io.readPointAssignementFromFiles(assignfile_it)
lines_it = packages.processing.removeUnassignedPrimitives(lines_it, assign_it)
assign_it = packages.processing.removeUnassignedPoint(lines_it, assign_it)
################################################################################
## Process noise
# Compute the distance between each point and its assigned primitive
distrib_it = utils.distanceToPrimitives(cloud, assign_it, lines_it)
print "Estimated sigma ", np.sqrt(np.var(distrib_it)) / sigma_ref
|
<commit_before><commit_msg>Add a new script processing different iterations of a run and computing statistics<commit_after>import packages.project as project
import packages.primitive as primitive
import packages.utils as utils
import packages.processing
import packages.io
import argparse
from matplotlib import pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
from scipy.stats import norm,kstest,skewtest,kurtosistest,normaltest
compareToGt = False
itmin=0
itmax=3
################################################################################
## Command line parsing
parser = argparse.ArgumentParser(description='Collect full run statistics.')
parser.add_argument('projectdir')
args = parser.parse_args()
projectdir = args.projectdir
if projectdir[-1] == '/':
projectdir = projectdir[:-1]
projectname = projectdir.split('/')[-1]
cloudfile = projectdir+'/cloud.ply'
cloud = packages.io.readPointCloudFromPly(cloudfile)
print 'Processing project ', projectname
sigma_ref = 1.
if compareToGt:
projectfile = projectdir+'/gt/'+projectname+'.prj'
gtlinesfile = projectdir+'/gt/primitives.csv'
gtassignfile = projectdir+'/gt/points_primitives.csv'
project = project.PyProject(projectfile)
gtlines = primitive.readPrimitivesFromFile(gtlinesfile)
gtassign = packages.io.readPointAssignementFromFiles(gtassignfile)
gtlines = packages.processing.removeUnassignedPrimitives(gtlines, gtassign)
gtassign = packages.processing.removeUnassignedPoint(gtlines, gtassign)
############################################################################
## Process noise
sigma_ref = project.kernels[0].stdev()
gtDistrib = utils.distanceToPrimitives(cloud, gtassign, gtlines)
print "Ground truth sigma ", np.sqrt(np.var(gtDistrib)) / sigma_ref
for it in range(itmin, itmax):
print "Processing iteration",it
linesfile_it = projectdir+'/primitives_merged_it'+str(it)+'.csv'
assignfile_it = projectdir+'/points_primitives_it'+str(it)+'.csv'
################################################################################
## Reading input files
lines_it = primitive.readPrimitivesFromFile(linesfile_it)
assign_it = packages.io.readPointAssignementFromFiles(assignfile_it)
lines_it = packages.processing.removeUnassignedPrimitives(lines_it, assign_it)
assign_it = packages.processing.removeUnassignedPoint(lines_it, assign_it)
################################################################################
## Process noise
# Compute the distance between each point and its assigned primitive
distrib_it = utils.distanceToPrimitives(cloud, assign_it, lines_it)
print "Estimated sigma ", np.sqrt(np.var(distrib_it)) / sigma_ref
|
|
07eb99ca0fa82266dd183195ef5d03f4b0457d59
|
tests/test_utilities.py
|
tests/test_utilities.py
|
"""test_eniric.py"""
import pytest
import numpy as np
from eniric.utilities import get_spectrum_name, wav_selector
# Test using hypothesis
from hypothesis import given, example
import hypothesis.strategies as st
def test_get_spectrum_name():
""" """
test = ("PHOENIX-ACES_spectra/Z-0.0/lte02800-4.50"
"-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat")
assert get_spectrum_name("M6") == test
test_alpha = ("PHOENIX-ACES_spectra/Z-0.0.Alpha=+0.20/"
"lte02600-6.00-0.0.Alpha=+0.20.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat")
assert get_spectrum_name("M9", logg=6, alpha=0.2) == test_alpha
test_pos_feh = ("PHOENIX-ACES_spectra/Z+0.5/"
"lte03500-0.00+0.5.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat")
assert get_spectrum_name("M3", logg=0, feh=0.5, alpha=0.0) == test_pos_feh
# Catch Spectrum not implemented error
with pytest.raises(NotImplementedError):
get_spectrum_name("K1") # Stellar type not added
with pytest.raises(NotImplementedError):
get_spectrum_name("MO") # Miss spelled M0
def test_org_name():
""" Test org flag of get_spectrum_name, suposed to be temporary."""
test_org = "PHOENIX-ACES_spectra/lte03900-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat"
assert get_spectrum_name("M0", org=True) == test_org
@given(st.lists(st.floats()), st.floats(), st.floats(), st.floats())
def test_wav_selector(x, y, wav_min, wav_max):
"""Test some properties of wavelength selector"""
y = [xi + y for xi in x] # just to make y different
x1, y1 = wav_selector(x, y, wav_min, wav_max)
# All values in selected should be less than the max and greater
# than the min value.
assert all(x1 >= wav_min)
assert all(x1 <= wav_max)
assert len(x1) == len(y1)
assert isinstance(x1, np.ndarray)
assert isinstance(y1, np.ndarray)
|
Add some working utility tests
|
Add some working utility tests
Former-commit-id: 96e85547f79640d7c12fe0cbea9ed351396759f8 [formerly 7608385cbf13efc6342d0dcb295474eba71842d8] [formerly 91473ba3c3015cabd2c15fb4d679a355687adb10 [formerly 66526a15c595e72f014a13bf692996b624acede9]]
Former-commit-id: a01daec4a10620e2dad12e133c23bf1959d34e4d [formerly bd5a11be808ae09d012430baa1af9e6c7dad0815]
Former-commit-id: db8bf07c7454cb9a2e9d7b6d6616be66e393110d
|
Python
|
mit
|
jason-neal/eniric,jason-neal/eniric
|
Add some working utility tests
Former-commit-id: 96e85547f79640d7c12fe0cbea9ed351396759f8 [formerly 7608385cbf13efc6342d0dcb295474eba71842d8] [formerly 91473ba3c3015cabd2c15fb4d679a355687adb10 [formerly 66526a15c595e72f014a13bf692996b624acede9]]
Former-commit-id: a01daec4a10620e2dad12e133c23bf1959d34e4d [formerly bd5a11be808ae09d012430baa1af9e6c7dad0815]
Former-commit-id: db8bf07c7454cb9a2e9d7b6d6616be66e393110d
|
"""test_eniric.py"""
import pytest
import numpy as np
from eniric.utilities import get_spectrum_name, wav_selector
# Test using hypothesis
from hypothesis import given, example
import hypothesis.strategies as st
def test_get_spectrum_name():
""" """
test = ("PHOENIX-ACES_spectra/Z-0.0/lte02800-4.50"
"-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat")
assert get_spectrum_name("M6") == test
test_alpha = ("PHOENIX-ACES_spectra/Z-0.0.Alpha=+0.20/"
"lte02600-6.00-0.0.Alpha=+0.20.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat")
assert get_spectrum_name("M9", logg=6, alpha=0.2) == test_alpha
test_pos_feh = ("PHOENIX-ACES_spectra/Z+0.5/"
"lte03500-0.00+0.5.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat")
assert get_spectrum_name("M3", logg=0, feh=0.5, alpha=0.0) == test_pos_feh
# Catch Spectrum not implemented error
with pytest.raises(NotImplementedError):
get_spectrum_name("K1") # Stellar type not added
with pytest.raises(NotImplementedError):
get_spectrum_name("MO") # Miss spelled M0
def test_org_name():
""" Test org flag of get_spectrum_name, suposed to be temporary."""
test_org = "PHOENIX-ACES_spectra/lte03900-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat"
assert get_spectrum_name("M0", org=True) == test_org
@given(st.lists(st.floats()), st.floats(), st.floats(), st.floats())
def test_wav_selector(x, y, wav_min, wav_max):
"""Test some properties of wavelength selector"""
y = [xi + y for xi in x] # just to make y different
x1, y1 = wav_selector(x, y, wav_min, wav_max)
# All values in selected should be less than the max and greater
# than the min value.
assert all(x1 >= wav_min)
assert all(x1 <= wav_max)
assert len(x1) == len(y1)
assert isinstance(x1, np.ndarray)
assert isinstance(y1, np.ndarray)
|
<commit_before><commit_msg>Add some working utility tests
Former-commit-id: 96e85547f79640d7c12fe0cbea9ed351396759f8 [formerly 7608385cbf13efc6342d0dcb295474eba71842d8] [formerly 91473ba3c3015cabd2c15fb4d679a355687adb10 [formerly 66526a15c595e72f014a13bf692996b624acede9]]
Former-commit-id: a01daec4a10620e2dad12e133c23bf1959d34e4d [formerly bd5a11be808ae09d012430baa1af9e6c7dad0815]
Former-commit-id: db8bf07c7454cb9a2e9d7b6d6616be66e393110d<commit_after>
|
"""test_eniric.py"""
import pytest
import numpy as np
from eniric.utilities import get_spectrum_name, wav_selector
# Test using hypothesis
from hypothesis import given, example
import hypothesis.strategies as st
def test_get_spectrum_name():
""" """
test = ("PHOENIX-ACES_spectra/Z-0.0/lte02800-4.50"
"-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat")
assert get_spectrum_name("M6") == test
test_alpha = ("PHOENIX-ACES_spectra/Z-0.0.Alpha=+0.20/"
"lte02600-6.00-0.0.Alpha=+0.20.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat")
assert get_spectrum_name("M9", logg=6, alpha=0.2) == test_alpha
test_pos_feh = ("PHOENIX-ACES_spectra/Z+0.5/"
"lte03500-0.00+0.5.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat")
assert get_spectrum_name("M3", logg=0, feh=0.5, alpha=0.0) == test_pos_feh
# Catch Spectrum not implemented error
with pytest.raises(NotImplementedError):
get_spectrum_name("K1") # Stellar type not added
with pytest.raises(NotImplementedError):
get_spectrum_name("MO") # Miss spelled M0
def test_org_name():
""" Test org flag of get_spectrum_name, suposed to be temporary."""
test_org = "PHOENIX-ACES_spectra/lte03900-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat"
assert get_spectrum_name("M0", org=True) == test_org
@given(st.lists(st.floats()), st.floats(), st.floats(), st.floats())
def test_wav_selector(x, y, wav_min, wav_max):
"""Test some properties of wavelength selector"""
y = [xi + y for xi in x] # just to make y different
x1, y1 = wav_selector(x, y, wav_min, wav_max)
# All values in selected should be less than the max and greater
# than the min value.
assert all(x1 >= wav_min)
assert all(x1 <= wav_max)
assert len(x1) == len(y1)
assert isinstance(x1, np.ndarray)
assert isinstance(y1, np.ndarray)
|
Add some working utility tests
Former-commit-id: 96e85547f79640d7c12fe0cbea9ed351396759f8 [formerly 7608385cbf13efc6342d0dcb295474eba71842d8] [formerly 91473ba3c3015cabd2c15fb4d679a355687adb10 [formerly 66526a15c595e72f014a13bf692996b624acede9]]
Former-commit-id: a01daec4a10620e2dad12e133c23bf1959d34e4d [formerly bd5a11be808ae09d012430baa1af9e6c7dad0815]
Former-commit-id: db8bf07c7454cb9a2e9d7b6d6616be66e393110d"""test_eniric.py"""
import pytest
import numpy as np
from eniric.utilities import get_spectrum_name, wav_selector
# Test using hypothesis
from hypothesis import given, example
import hypothesis.strategies as st
def test_get_spectrum_name():
""" """
test = ("PHOENIX-ACES_spectra/Z-0.0/lte02800-4.50"
"-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat")
assert get_spectrum_name("M6") == test
test_alpha = ("PHOENIX-ACES_spectra/Z-0.0.Alpha=+0.20/"
"lte02600-6.00-0.0.Alpha=+0.20.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat")
assert get_spectrum_name("M9", logg=6, alpha=0.2) == test_alpha
test_pos_feh = ("PHOENIX-ACES_spectra/Z+0.5/"
"lte03500-0.00+0.5.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat")
assert get_spectrum_name("M3", logg=0, feh=0.5, alpha=0.0) == test_pos_feh
# Catch Spectrum not implemented error
with pytest.raises(NotImplementedError):
get_spectrum_name("K1") # Stellar type not added
with pytest.raises(NotImplementedError):
get_spectrum_name("MO") # Miss spelled M0
def test_org_name():
""" Test org flag of get_spectrum_name, suposed to be temporary."""
test_org = "PHOENIX-ACES_spectra/lte03900-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat"
assert get_spectrum_name("M0", org=True) == test_org
@given(st.lists(st.floats()), st.floats(), st.floats(), st.floats())
def test_wav_selector(x, y, wav_min, wav_max):
"""Test some properties of wavelength selector"""
y = [xi + y for xi in x] # just to make y different
x1, y1 = wav_selector(x, y, wav_min, wav_max)
# All values in selected should be less than the max and greater
# than the min value.
assert all(x1 >= wav_min)
assert all(x1 <= wav_max)
assert len(x1) == len(y1)
assert isinstance(x1, np.ndarray)
assert isinstance(y1, np.ndarray)
|
<commit_before><commit_msg>Add some working utility tests
Former-commit-id: 96e85547f79640d7c12fe0cbea9ed351396759f8 [formerly 7608385cbf13efc6342d0dcb295474eba71842d8] [formerly 91473ba3c3015cabd2c15fb4d679a355687adb10 [formerly 66526a15c595e72f014a13bf692996b624acede9]]
Former-commit-id: a01daec4a10620e2dad12e133c23bf1959d34e4d [formerly bd5a11be808ae09d012430baa1af9e6c7dad0815]
Former-commit-id: db8bf07c7454cb9a2e9d7b6d6616be66e393110d<commit_after>"""test_eniric.py"""
import pytest
import numpy as np
from eniric.utilities import get_spectrum_name, wav_selector
# Test using hypothesis
from hypothesis import given, example
import hypothesis.strategies as st
def test_get_spectrum_name():
""" """
test = ("PHOENIX-ACES_spectra/Z-0.0/lte02800-4.50"
"-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat")
assert get_spectrum_name("M6") == test
test_alpha = ("PHOENIX-ACES_spectra/Z-0.0.Alpha=+0.20/"
"lte02600-6.00-0.0.Alpha=+0.20.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat")
assert get_spectrum_name("M9", logg=6, alpha=0.2) == test_alpha
test_pos_feh = ("PHOENIX-ACES_spectra/Z+0.5/"
"lte03500-0.00+0.5.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat")
assert get_spectrum_name("M3", logg=0, feh=0.5, alpha=0.0) == test_pos_feh
# Catch Spectrum not implemented error
with pytest.raises(NotImplementedError):
get_spectrum_name("K1") # Stellar type not added
with pytest.raises(NotImplementedError):
get_spectrum_name("MO") # Miss spelled M0
def test_org_name():
""" Test org flag of get_spectrum_name, suposed to be temporary."""
test_org = "PHOENIX-ACES_spectra/lte03900-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat"
assert get_spectrum_name("M0", org=True) == test_org
@given(st.lists(st.floats()), st.floats(), st.floats(), st.floats())
def test_wav_selector(x, y, wav_min, wav_max):
"""Test some properties of wavelength selector"""
y = [xi + y for xi in x] # just to make y different
x1, y1 = wav_selector(x, y, wav_min, wav_max)
# All values in selected should be less than the max and greater
# than the min value.
assert all(x1 >= wav_min)
assert all(x1 <= wav_max)
assert len(x1) == len(y1)
assert isinstance(x1, np.ndarray)
assert isinstance(y1, np.ndarray)
|
|
835a4b21728e999ff2ef730f69afed34c7c3b98f
|
nb_train.py
|
nb_train.py
|
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.externals import joblib
print "Grabbing data..."
training_text_collection_f = open("training_text_collection.pkl", "rb")
training_text_collection = joblib.load(training_text_collection_f)
training_text_collection_f.close()
training_targets_f = open("training_targets.pkl", "rb")
training_targets = joblib.load(training_targets_f)
training_targets_f.close()
print("Vectorizing data...")
vectorizer = TfidfVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = "english", \
max_features = 2500, \
min_df = 5, \
max_df = 0.4)
train_tfidf = vectorizer.fit_transform(training_text_collection)
save_vect = open("nb_tfidf_vect.pkl", "wb")
joblib.dump(vectorizer, save_vect)
save_vect.close()
clf = MultinomialNB().fit(train_tfidf, training_targets)
save_clf = open("nb_clf.pkl", "wb")
joblib.dump(clf, save_clf)
save_clf.close()
|
Create Naive Bayes vectorizer and classifier pickles
|
Create Naive Bayes vectorizer and classifier pickles
|
Python
|
mit
|
npentella/CuriousCorpus,npentella/CuriousCorpus,npentella/CuriousCorpus
|
Create Naive Bayes vectorizer and classifier pickles
|
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.externals import joblib
print "Grabbing data..."
training_text_collection_f = open("training_text_collection.pkl", "rb")
training_text_collection = joblib.load(training_text_collection_f)
training_text_collection_f.close()
training_targets_f = open("training_targets.pkl", "rb")
training_targets = joblib.load(training_targets_f)
training_targets_f.close()
print("Vectorizing data...")
vectorizer = TfidfVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = "english", \
max_features = 2500, \
min_df = 5, \
max_df = 0.4)
train_tfidf = vectorizer.fit_transform(training_text_collection)
save_vect = open("nb_tfidf_vect.pkl", "wb")
joblib.dump(vectorizer, save_vect)
save_vect.close()
clf = MultinomialNB().fit(train_tfidf, training_targets)
save_clf = open("nb_clf.pkl", "wb")
joblib.dump(clf, save_clf)
save_clf.close()
|
<commit_before><commit_msg>Create Naive Bayes vectorizer and classifier pickles<commit_after>
|
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.externals import joblib
print "Grabbing data..."
training_text_collection_f = open("training_text_collection.pkl", "rb")
training_text_collection = joblib.load(training_text_collection_f)
training_text_collection_f.close()
training_targets_f = open("training_targets.pkl", "rb")
training_targets = joblib.load(training_targets_f)
training_targets_f.close()
print("Vectorizing data...")
vectorizer = TfidfVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = "english", \
max_features = 2500, \
min_df = 5, \
max_df = 0.4)
train_tfidf = vectorizer.fit_transform(training_text_collection)
save_vect = open("nb_tfidf_vect.pkl", "wb")
joblib.dump(vectorizer, save_vect)
save_vect.close()
clf = MultinomialNB().fit(train_tfidf, training_targets)
save_clf = open("nb_clf.pkl", "wb")
joblib.dump(clf, save_clf)
save_clf.close()
|
Create Naive Bayes vectorizer and classifier picklesfrom sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.externals import joblib
print "Grabbing data..."
training_text_collection_f = open("training_text_collection.pkl", "rb")
training_text_collection = joblib.load(training_text_collection_f)
training_text_collection_f.close()
training_targets_f = open("training_targets.pkl", "rb")
training_targets = joblib.load(training_targets_f)
training_targets_f.close()
print("Vectorizing data...")
vectorizer = TfidfVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = "english", \
max_features = 2500, \
min_df = 5, \
max_df = 0.4)
train_tfidf = vectorizer.fit_transform(training_text_collection)
save_vect = open("nb_tfidf_vect.pkl", "wb")
joblib.dump(vectorizer, save_vect)
save_vect.close()
clf = MultinomialNB().fit(train_tfidf, training_targets)
save_clf = open("nb_clf.pkl", "wb")
joblib.dump(clf, save_clf)
save_clf.close()
|
<commit_before><commit_msg>Create Naive Bayes vectorizer and classifier pickles<commit_after>from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.externals import joblib
print "Grabbing data..."
training_text_collection_f = open("training_text_collection.pkl", "rb")
training_text_collection = joblib.load(training_text_collection_f)
training_text_collection_f.close()
training_targets_f = open("training_targets.pkl", "rb")
training_targets = joblib.load(training_targets_f)
training_targets_f.close()
print("Vectorizing data...")
vectorizer = TfidfVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = "english", \
max_features = 2500, \
min_df = 5, \
max_df = 0.4)
train_tfidf = vectorizer.fit_transform(training_text_collection)
save_vect = open("nb_tfidf_vect.pkl", "wb")
joblib.dump(vectorizer, save_vect)
save_vect.close()
clf = MultinomialNB().fit(train_tfidf, training_targets)
save_clf = open("nb_clf.pkl", "wb")
joblib.dump(clf, save_clf)
save_clf.close()
|
|
950aa44a405b0ca6d057d2583d39f06409ca2c0c
|
contrib/performance/event_delete.py
|
contrib/performance/event_delete.py
|
"""
Benchmark a server's handling of event deletion.
"""
from itertools import count
from urllib2 import HTTPDigestAuthHandler
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from httpauth import AuthHandlerAgent
from httpclient import StringProducer
from benchlib import initialize, sample
from event import makeEvent
@inlineCallbacks
def measure(host, port, dtrace, attendeeCount, samples):
user = password = "user01"
root = "/"
principal = "/"
calendar = "event-deletion-benchmark"
authinfo = HTTPDigestAuthHandler()
authinfo.add_password(
realm="Test Realm",
uri="http://%s:%d/" % (host, port),
user=user,
passwd=password)
agent = AuthHandlerAgent(Agent(reactor), authinfo)
# Set up the calendar first
yield initialize(agent, host, port, user, password, root, principal, calendar)
# An infinite stream of VEVENTs to PUT to the server.
events = ((i, makeEvent(i, attendeeCount)) for i in count(2))
# Create enough events to delete
uri = 'http://%s:%d/calendars/__uids__/%s/%s/foo-%%d.ics' % (
host, port, user, calendar)
headers = Headers({"content-type": ["text/calendar"]})
urls = []
for i, body in events:
urls.append(uri % (i,))
yield agent.request(
'PUT', urls[-1], headers, StringProducer(body))
if len(urls) == samples:
break
# Now delete them all
samples = yield sample(
dtrace, samples,
agent, (('DELETE', url) for url in urls).next)
returnValue(samples)
|
Add a benchmark for event deletion
|
Add a benchmark for event deletion
git-svn-id: 81e381228600e5752b80483efd2b45b26c451ea2@6238 e27351fd-9f3e-4f54-a53b-843176b1656c
|
Python
|
apache-2.0
|
trevor/calendarserver,trevor/calendarserver,trevor/calendarserver
|
Add a benchmark for event deletion
git-svn-id: 81e381228600e5752b80483efd2b45b26c451ea2@6238 e27351fd-9f3e-4f54-a53b-843176b1656c
|
"""
Benchmark a server's handling of event deletion.
"""
from itertools import count
from urllib2 import HTTPDigestAuthHandler
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from httpauth import AuthHandlerAgent
from httpclient import StringProducer
from benchlib import initialize, sample
from event import makeEvent
@inlineCallbacks
def measure(host, port, dtrace, attendeeCount, samples):
user = password = "user01"
root = "/"
principal = "/"
calendar = "event-deletion-benchmark"
authinfo = HTTPDigestAuthHandler()
authinfo.add_password(
realm="Test Realm",
uri="http://%s:%d/" % (host, port),
user=user,
passwd=password)
agent = AuthHandlerAgent(Agent(reactor), authinfo)
# Set up the calendar first
yield initialize(agent, host, port, user, password, root, principal, calendar)
# An infinite stream of VEVENTs to PUT to the server.
events = ((i, makeEvent(i, attendeeCount)) for i in count(2))
# Create enough events to delete
uri = 'http://%s:%d/calendars/__uids__/%s/%s/foo-%%d.ics' % (
host, port, user, calendar)
headers = Headers({"content-type": ["text/calendar"]})
urls = []
for i, body in events:
urls.append(uri % (i,))
yield agent.request(
'PUT', urls[-1], headers, StringProducer(body))
if len(urls) == samples:
break
# Now delete them all
samples = yield sample(
dtrace, samples,
agent, (('DELETE', url) for url in urls).next)
returnValue(samples)
|
<commit_before><commit_msg>Add a benchmark for event deletion
git-svn-id: 81e381228600e5752b80483efd2b45b26c451ea2@6238 e27351fd-9f3e-4f54-a53b-843176b1656c<commit_after>
|
"""
Benchmark a server's handling of event deletion.
"""
from itertools import count
from urllib2 import HTTPDigestAuthHandler
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from httpauth import AuthHandlerAgent
from httpclient import StringProducer
from benchlib import initialize, sample
from event import makeEvent
@inlineCallbacks
def measure(host, port, dtrace, attendeeCount, samples):
user = password = "user01"
root = "/"
principal = "/"
calendar = "event-deletion-benchmark"
authinfo = HTTPDigestAuthHandler()
authinfo.add_password(
realm="Test Realm",
uri="http://%s:%d/" % (host, port),
user=user,
passwd=password)
agent = AuthHandlerAgent(Agent(reactor), authinfo)
# Set up the calendar first
yield initialize(agent, host, port, user, password, root, principal, calendar)
# An infinite stream of VEVENTs to PUT to the server.
events = ((i, makeEvent(i, attendeeCount)) for i in count(2))
# Create enough events to delete
uri = 'http://%s:%d/calendars/__uids__/%s/%s/foo-%%d.ics' % (
host, port, user, calendar)
headers = Headers({"content-type": ["text/calendar"]})
urls = []
for i, body in events:
urls.append(uri % (i,))
yield agent.request(
'PUT', urls[-1], headers, StringProducer(body))
if len(urls) == samples:
break
# Now delete them all
samples = yield sample(
dtrace, samples,
agent, (('DELETE', url) for url in urls).next)
returnValue(samples)
|
Add a benchmark for event deletion
git-svn-id: 81e381228600e5752b80483efd2b45b26c451ea2@6238 e27351fd-9f3e-4f54-a53b-843176b1656c
"""
Benchmark a server's handling of event deletion.
"""
from itertools import count
from urllib2 import HTTPDigestAuthHandler
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from httpauth import AuthHandlerAgent
from httpclient import StringProducer
from benchlib import initialize, sample
from event import makeEvent
@inlineCallbacks
def measure(host, port, dtrace, attendeeCount, samples):
user = password = "user01"
root = "/"
principal = "/"
calendar = "event-deletion-benchmark"
authinfo = HTTPDigestAuthHandler()
authinfo.add_password(
realm="Test Realm",
uri="http://%s:%d/" % (host, port),
user=user,
passwd=password)
agent = AuthHandlerAgent(Agent(reactor), authinfo)
# Set up the calendar first
yield initialize(agent, host, port, user, password, root, principal, calendar)
# An infinite stream of VEVENTs to PUT to the server.
events = ((i, makeEvent(i, attendeeCount)) for i in count(2))
# Create enough events to delete
uri = 'http://%s:%d/calendars/__uids__/%s/%s/foo-%%d.ics' % (
host, port, user, calendar)
headers = Headers({"content-type": ["text/calendar"]})
urls = []
for i, body in events:
urls.append(uri % (i,))
yield agent.request(
'PUT', urls[-1], headers, StringProducer(body))
if len(urls) == samples:
break
# Now delete them all
samples = yield sample(
dtrace, samples,
agent, (('DELETE', url) for url in urls).next)
returnValue(samples)
|
<commit_before><commit_msg>Add a benchmark for event deletion
git-svn-id: 81e381228600e5752b80483efd2b45b26c451ea2@6238 e27351fd-9f3e-4f54-a53b-843176b1656c<commit_after>
"""
Benchmark a server's handling of event deletion.
"""
from itertools import count
from urllib2 import HTTPDigestAuthHandler
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from httpauth import AuthHandlerAgent
from httpclient import StringProducer
from benchlib import initialize, sample
from event import makeEvent
@inlineCallbacks
def measure(host, port, dtrace, attendeeCount, samples):
user = password = "user01"
root = "/"
principal = "/"
calendar = "event-deletion-benchmark"
authinfo = HTTPDigestAuthHandler()
authinfo.add_password(
realm="Test Realm",
uri="http://%s:%d/" % (host, port),
user=user,
passwd=password)
agent = AuthHandlerAgent(Agent(reactor), authinfo)
# Set up the calendar first
yield initialize(agent, host, port, user, password, root, principal, calendar)
# An infinite stream of VEVENTs to PUT to the server.
events = ((i, makeEvent(i, attendeeCount)) for i in count(2))
# Create enough events to delete
uri = 'http://%s:%d/calendars/__uids__/%s/%s/foo-%%d.ics' % (
host, port, user, calendar)
headers = Headers({"content-type": ["text/calendar"]})
urls = []
for i, body in events:
urls.append(uri % (i,))
yield agent.request(
'PUT', urls[-1], headers, StringProducer(body))
if len(urls) == samples:
break
# Now delete them all
samples = yield sample(
dtrace, samples,
agent, (('DELETE', url) for url in urls).next)
returnValue(samples)
|
|
6719988bc77b4e66fe438edd2d00fc619cc2adfb
|
proselint/checks/misc/symbols.py
|
proselint/checks/misc/symbols.py
|
# -*- coding: utf-8 -*-
"""MSC110: Symbols.
---
layout: post
error_code: MSC110
source: SublimeLinter-annotations
source_url: http://bit.ly/16Q7H41
title: symbols
date: 2014-06-10 12:31:19
categories: writing
---
Symbols.
"""
from proselint.tools import blacklist, memoize
@memoize
def check(text):
err = "MSC110"
msg = u"Incorrent use of symbols in {}."
symbols = [
"\$[\d]* ?(?:dollars|usd|us dollars)"
]
return blacklist(text, symbols, err, msg)
|
Add check for symbol usage
|
Add check for symbol usage
|
Python
|
bsd-3-clause
|
amperser/proselint,jstewmon/proselint,jstewmon/proselint,amperser/proselint,jstewmon/proselint,amperser/proselint,amperser/proselint,amperser/proselint
|
Add check for symbol usage
|
# -*- coding: utf-8 -*-
"""MSC110: Symbols.
---
layout: post
error_code: MSC110
source: SublimeLinter-annotations
source_url: http://bit.ly/16Q7H41
title: symbols
date: 2014-06-10 12:31:19
categories: writing
---
Symbols.
"""
from proselint.tools import blacklist, memoize
@memoize
def check(text):
err = "MSC110"
msg = u"Incorrent use of symbols in {}."
symbols = [
"\$[\d]* ?(?:dollars|usd|us dollars)"
]
return blacklist(text, symbols, err, msg)
|
<commit_before><commit_msg>Add check for symbol usage<commit_after>
|
# -*- coding: utf-8 -*-
"""MSC110: Symbols.
---
layout: post
error_code: MSC110
source: SublimeLinter-annotations
source_url: http://bit.ly/16Q7H41
title: symbols
date: 2014-06-10 12:31:19
categories: writing
---
Symbols.
"""
from proselint.tools import blacklist, memoize
@memoize
def check(text):
err = "MSC110"
msg = u"Incorrent use of symbols in {}."
symbols = [
"\$[\d]* ?(?:dollars|usd|us dollars)"
]
return blacklist(text, symbols, err, msg)
|
Add check for symbol usage# -*- coding: utf-8 -*-
"""MSC110: Symbols.
---
layout: post
error_code: MSC110
source: SublimeLinter-annotations
source_url: http://bit.ly/16Q7H41
title: symbols
date: 2014-06-10 12:31:19
categories: writing
---
Symbols.
"""
from proselint.tools import blacklist, memoize
@memoize
def check(text):
err = "MSC110"
msg = u"Incorrent use of symbols in {}."
symbols = [
"\$[\d]* ?(?:dollars|usd|us dollars)"
]
return blacklist(text, symbols, err, msg)
|
<commit_before><commit_msg>Add check for symbol usage<commit_after># -*- coding: utf-8 -*-
"""MSC110: Symbols.
---
layout: post
error_code: MSC110
source: SublimeLinter-annotations
source_url: http://bit.ly/16Q7H41
title: symbols
date: 2014-06-10 12:31:19
categories: writing
---
Symbols.
"""
from proselint.tools import blacklist, memoize
@memoize
def check(text):
err = "MSC110"
msg = u"Incorrent use of symbols in {}."
symbols = [
"\$[\d]* ?(?:dollars|usd|us dollars)"
]
return blacklist(text, symbols, err, msg)
|
|
f947d72281c7909c1d51399b06c8ebb9671da68d
|
build/generate_standalone_timeline_view.py
|
build/generate_standalone_timeline_view.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import parse_deps
import sys
import os
srcdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../src"))
js_warning_message = """/**
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
* WARNING: This file is generated by generate_standalone_timeline_view.py
*
* Do not edit directly.
*/
"""
css_warning_message = """/**
/* Copyright (c) 2012 The Chromium Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file. */
* WARNING: This file is generated by generate_standalone_timeline_view.py
*
* Do not edit directly.
*/
"""
def generate_css(filenames):
load_sequence = parse_deps.calc_load_sequence(filenames)
style_sheet_chunks = [css_warning_message, '\n']
for module in load_sequence:
for style_sheet in module.style_sheets:
style_sheet_chunks.append("""%s\n""" % style_sheet.timeline_view)
return ''.join(style_sheet_chunks)
def generate_js(filenames):
load_sequence = parse_deps.calc_load_sequence(filenames)
js_chunks = [js_warning_message, '\n']
js_chunks.append("window.FLATTENED = {};\n")
for module in load_sequence:
js_chunks.append( "window.FLATTENED['%s'] = true;\n" % module.name)
for module in load_sequence:
js_chunks.append(module.timeline_view)
js_chunks.append("\n")
return ''.join(js_chunks)
def main(args):
parser = optparse.OptionParser()
parser.add_option("--js", dest="js_file",
help="Where to place generated javascript file")
parser.add_option("--css", dest="css_file",
help="Where to place generated css file")
options, args = parser.parse_args(args)
if not options.js_file and not options.css_file:
print "Must specify one, or both of --js and --css"
return 1
input_filenames = [os.path.join(srcdir, f)
for f in ['base.js', 'timeline_view.js']]
if options.js_file:
with open(options.js_file, 'w') as f:
f.write(generate_js(input_filenames))
if options.css_file:
with open(options.css_file, 'w') as f:
f.write(generate_css(input_filenames))
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
Add script to generate a standalone timeline view.
|
Add script to generate a standalone timeline view.
TBR=jgennis@google.com
Review URL: https://codereview.appspot.com/6497071
git-svn-id: 3a56fcae908c7e16d23cb53443ea4795ac387cf2@146 0e6d7f2b-9903-5b78-7403-59d27f066143
|
Python
|
bsd-3-clause
|
bpsinc-native/src_third_party_trace-viewer,bpsinc-native/src_third_party_trace-viewer,bpsinc-native/src_third_party_trace-viewer,bpsinc-native/src_third_party_trace-viewer
|
Add script to generate a standalone timeline view.
TBR=jgennis@google.com
Review URL: https://codereview.appspot.com/6497071
git-svn-id: 3a56fcae908c7e16d23cb53443ea4795ac387cf2@146 0e6d7f2b-9903-5b78-7403-59d27f066143
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import parse_deps
import sys
import os
srcdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../src"))
js_warning_message = """/**
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
* WARNING: This file is generated by generate_standalone_timeline_view.py
*
* Do not edit directly.
*/
"""
css_warning_message = """/**
/* Copyright (c) 2012 The Chromium Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file. */
* WARNING: This file is generated by generate_standalone_timeline_view.py
*
* Do not edit directly.
*/
"""
def generate_css(filenames):
load_sequence = parse_deps.calc_load_sequence(filenames)
style_sheet_chunks = [css_warning_message, '\n']
for module in load_sequence:
for style_sheet in module.style_sheets:
style_sheet_chunks.append("""%s\n""" % style_sheet.timeline_view)
return ''.join(style_sheet_chunks)
def generate_js(filenames):
load_sequence = parse_deps.calc_load_sequence(filenames)
js_chunks = [js_warning_message, '\n']
js_chunks.append("window.FLATTENED = {};\n")
for module in load_sequence:
js_chunks.append( "window.FLATTENED['%s'] = true;\n" % module.name)
for module in load_sequence:
js_chunks.append(module.timeline_view)
js_chunks.append("\n")
return ''.join(js_chunks)
def main(args):
parser = optparse.OptionParser()
parser.add_option("--js", dest="js_file",
help="Where to place generated javascript file")
parser.add_option("--css", dest="css_file",
help="Where to place generated css file")
options, args = parser.parse_args(args)
if not options.js_file and not options.css_file:
print "Must specify one, or both of --js and --css"
return 1
input_filenames = [os.path.join(srcdir, f)
for f in ['base.js', 'timeline_view.js']]
if options.js_file:
with open(options.js_file, 'w') as f:
f.write(generate_js(input_filenames))
if options.css_file:
with open(options.css_file, 'w') as f:
f.write(generate_css(input_filenames))
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add script to generate a standalone timeline view.
TBR=jgennis@google.com
Review URL: https://codereview.appspot.com/6497071
git-svn-id: 3a56fcae908c7e16d23cb53443ea4795ac387cf2@146 0e6d7f2b-9903-5b78-7403-59d27f066143<commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import parse_deps
import sys
import os
srcdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../src"))
js_warning_message = """/**
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
* WARNING: This file is generated by generate_standalone_timeline_view.py
*
* Do not edit directly.
*/
"""
css_warning_message = """/**
/* Copyright (c) 2012 The Chromium Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file. */
* WARNING: This file is generated by generate_standalone_timeline_view.py
*
* Do not edit directly.
*/
"""
def generate_css(filenames):
load_sequence = parse_deps.calc_load_sequence(filenames)
style_sheet_chunks = [css_warning_message, '\n']
for module in load_sequence:
for style_sheet in module.style_sheets:
style_sheet_chunks.append("""%s\n""" % style_sheet.timeline_view)
return ''.join(style_sheet_chunks)
def generate_js(filenames):
load_sequence = parse_deps.calc_load_sequence(filenames)
js_chunks = [js_warning_message, '\n']
js_chunks.append("window.FLATTENED = {};\n")
for module in load_sequence:
js_chunks.append( "window.FLATTENED['%s'] = true;\n" % module.name)
for module in load_sequence:
js_chunks.append(module.timeline_view)
js_chunks.append("\n")
return ''.join(js_chunks)
def main(args):
parser = optparse.OptionParser()
parser.add_option("--js", dest="js_file",
help="Where to place generated javascript file")
parser.add_option("--css", dest="css_file",
help="Where to place generated css file")
options, args = parser.parse_args(args)
if not options.js_file and not options.css_file:
print "Must specify one, or both of --js and --css"
return 1
input_filenames = [os.path.join(srcdir, f)
for f in ['base.js', 'timeline_view.js']]
if options.js_file:
with open(options.js_file, 'w') as f:
f.write(generate_js(input_filenames))
if options.css_file:
with open(options.css_file, 'w') as f:
f.write(generate_css(input_filenames))
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
Add script to generate a standalone timeline view.
TBR=jgennis@google.com
Review URL: https://codereview.appspot.com/6497071
git-svn-id: 3a56fcae908c7e16d23cb53443ea4795ac387cf2@146 0e6d7f2b-9903-5b78-7403-59d27f066143#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import parse_deps
import sys
import os
srcdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../src"))
js_warning_message = """/**
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
* WARNING: This file is generated by generate_standalone_timeline_view.py
*
* Do not edit directly.
*/
"""
css_warning_message = """/**
/* Copyright (c) 2012 The Chromium Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file. */
* WARNING: This file is generated by generate_standalone_timeline_view.py
*
* Do not edit directly.
*/
"""
def generate_css(filenames):
load_sequence = parse_deps.calc_load_sequence(filenames)
style_sheet_chunks = [css_warning_message, '\n']
for module in load_sequence:
for style_sheet in module.style_sheets:
style_sheet_chunks.append("""%s\n""" % style_sheet.timeline_view)
return ''.join(style_sheet_chunks)
def generate_js(filenames):
load_sequence = parse_deps.calc_load_sequence(filenames)
js_chunks = [js_warning_message, '\n']
js_chunks.append("window.FLATTENED = {};\n")
for module in load_sequence:
js_chunks.append( "window.FLATTENED['%s'] = true;\n" % module.name)
for module in load_sequence:
js_chunks.append(module.timeline_view)
js_chunks.append("\n")
return ''.join(js_chunks)
def main(args):
parser = optparse.OptionParser()
parser.add_option("--js", dest="js_file",
help="Where to place generated javascript file")
parser.add_option("--css", dest="css_file",
help="Where to place generated css file")
options, args = parser.parse_args(args)
if not options.js_file and not options.css_file:
print "Must specify one, or both of --js and --css"
return 1
input_filenames = [os.path.join(srcdir, f)
for f in ['base.js', 'timeline_view.js']]
if options.js_file:
with open(options.js_file, 'w') as f:
f.write(generate_js(input_filenames))
if options.css_file:
with open(options.css_file, 'w') as f:
f.write(generate_css(input_filenames))
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add script to generate a standalone timeline view.
TBR=jgennis@google.com
Review URL: https://codereview.appspot.com/6497071
git-svn-id: 3a56fcae908c7e16d23cb53443ea4795ac387cf2@146 0e6d7f2b-9903-5b78-7403-59d27f066143<commit_after>#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import parse_deps
import sys
import os
srcdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../src"))
js_warning_message = """/**
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
* WARNING: This file is generated by generate_standalone_timeline_view.py
*
* Do not edit directly.
*/
"""
css_warning_message = """/**
/* Copyright (c) 2012 The Chromium Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file. */
* WARNING: This file is generated by generate_standalone_timeline_view.py
*
* Do not edit directly.
*/
"""
def generate_css(filenames):
load_sequence = parse_deps.calc_load_sequence(filenames)
style_sheet_chunks = [css_warning_message, '\n']
for module in load_sequence:
for style_sheet in module.style_sheets:
style_sheet_chunks.append("""%s\n""" % style_sheet.timeline_view)
return ''.join(style_sheet_chunks)
def generate_js(filenames):
load_sequence = parse_deps.calc_load_sequence(filenames)
js_chunks = [js_warning_message, '\n']
js_chunks.append("window.FLATTENED = {};\n")
for module in load_sequence:
js_chunks.append( "window.FLATTENED['%s'] = true;\n" % module.name)
for module in load_sequence:
js_chunks.append(module.timeline_view)
js_chunks.append("\n")
return ''.join(js_chunks)
def main(args):
parser = optparse.OptionParser()
parser.add_option("--js", dest="js_file",
help="Where to place generated javascript file")
parser.add_option("--css", dest="css_file",
help="Where to place generated css file")
options, args = parser.parse_args(args)
if not options.js_file and not options.css_file:
print "Must specify one, or both of --js and --css"
return 1
input_filenames = [os.path.join(srcdir, f)
for f in ['base.js', 'timeline_view.js']]
if options.js_file:
with open(options.js_file, 'w') as f:
f.write(generate_js(input_filenames))
if options.css_file:
with open(options.css_file, 'w') as f:
f.write(generate_css(input_filenames))
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
|
9c77a1fd037e2a59cba57b5492671f32ffcb5409
|
clodius/cli/utils.py
|
clodius/cli/utils.py
|
import math
def get_tile_box(zoom, x, y):
"""convert Google-style Mercator tile coordinate to
(minlat, maxlat, minlng, maxlng) bounding box"""
minlng, minlat = get_lng_lat_from_tile_pos(zoom, x, y)
maxlng, maxlat = get_lng_lat_from_tile_pos(zoom, x + 1, y + 1)
return (minlng, maxlng, minlat, maxlat)
def get_lng_lat_from_tile_pos(zoom, x, y):
"""convert Google-style Mercator tile coordinate to
(lng, lat) of top-left corner of tile"""
# "map-centric" latitude, in radians:
lat_rad = math.pi - 2*math.pi*y/(2**zoom)
# true latitude:
lat_rad = gudermannian(lat_rad)
lat = lat_rad * 180.0 / math.pi
# longitude maps linearly to map, so we simply scale:
lng = -180.0 + 360.0*x/(2**zoom)
return (lng, lat)
def get_tile_pos_from_lng_lat(lng, lat, zoom):
"""convert lng/lat to Google-style Mercator tile coordinate (x, y)
at the given zoom level"""
lat_rad = lat * math.pi / 180.0
# "map-centric" latitude, in radians:
lat_rad = inv_gudermannian(lat_rad)
x = 2**zoom * (lng + 180.0) / 360.0
y = 2**zoom * (math.pi - lat_rad) / (2 * math.pi)
return (x, y)
def gudermannian(x):
return 2*math.atan(math.exp(x)) - math.pi/2
def inv_gudermannian(y):
return math.log(math.tan((y + math.pi/2) / 2))
|
Add utility function for converting lng-lat to tile positions
|
Add utility function for converting lng-lat to tile positions
|
Python
|
mit
|
hms-dbmi/clodius,hms-dbmi/clodius
|
Add utility function for converting lng-lat to tile positions
|
import math
def get_tile_box(zoom, x, y):
"""convert Google-style Mercator tile coordinate to
(minlat, maxlat, minlng, maxlng) bounding box"""
minlng, minlat = get_lng_lat_from_tile_pos(zoom, x, y)
maxlng, maxlat = get_lng_lat_from_tile_pos(zoom, x + 1, y + 1)
return (minlng, maxlng, minlat, maxlat)
def get_lng_lat_from_tile_pos(zoom, x, y):
"""convert Google-style Mercator tile coordinate to
(lng, lat) of top-left corner of tile"""
# "map-centric" latitude, in radians:
lat_rad = math.pi - 2*math.pi*y/(2**zoom)
# true latitude:
lat_rad = gudermannian(lat_rad)
lat = lat_rad * 180.0 / math.pi
# longitude maps linearly to map, so we simply scale:
lng = -180.0 + 360.0*x/(2**zoom)
return (lng, lat)
def get_tile_pos_from_lng_lat(lng, lat, zoom):
"""convert lng/lat to Google-style Mercator tile coordinate (x, y)
at the given zoom level"""
lat_rad = lat * math.pi / 180.0
# "map-centric" latitude, in radians:
lat_rad = inv_gudermannian(lat_rad)
x = 2**zoom * (lng + 180.0) / 360.0
y = 2**zoom * (math.pi - lat_rad) / (2 * math.pi)
return (x, y)
def gudermannian(x):
return 2*math.atan(math.exp(x)) - math.pi/2
def inv_gudermannian(y):
return math.log(math.tan((y + math.pi/2) / 2))
|
<commit_before><commit_msg>Add utility function for converting lng-lat to tile positions<commit_after>
|
import math
def get_tile_box(zoom, x, y):
"""convert Google-style Mercator tile coordinate to
(minlat, maxlat, minlng, maxlng) bounding box"""
minlng, minlat = get_lng_lat_from_tile_pos(zoom, x, y)
maxlng, maxlat = get_lng_lat_from_tile_pos(zoom, x + 1, y + 1)
return (minlng, maxlng, minlat, maxlat)
def get_lng_lat_from_tile_pos(zoom, x, y):
"""convert Google-style Mercator tile coordinate to
(lng, lat) of top-left corner of tile"""
# "map-centric" latitude, in radians:
lat_rad = math.pi - 2*math.pi*y/(2**zoom)
# true latitude:
lat_rad = gudermannian(lat_rad)
lat = lat_rad * 180.0 / math.pi
# longitude maps linearly to map, so we simply scale:
lng = -180.0 + 360.0*x/(2**zoom)
return (lng, lat)
def get_tile_pos_from_lng_lat(lng, lat, zoom):
"""convert lng/lat to Google-style Mercator tile coordinate (x, y)
at the given zoom level"""
lat_rad = lat * math.pi / 180.0
# "map-centric" latitude, in radians:
lat_rad = inv_gudermannian(lat_rad)
x = 2**zoom * (lng + 180.0) / 360.0
y = 2**zoom * (math.pi - lat_rad) / (2 * math.pi)
return (x, y)
def gudermannian(x):
return 2*math.atan(math.exp(x)) - math.pi/2
def inv_gudermannian(y):
return math.log(math.tan((y + math.pi/2) / 2))
|
Add utility function for converting lng-lat to tile positionsimport math
def get_tile_box(zoom, x, y):
"""convert Google-style Mercator tile coordinate to
(minlat, maxlat, minlng, maxlng) bounding box"""
minlng, minlat = get_lng_lat_from_tile_pos(zoom, x, y)
maxlng, maxlat = get_lng_lat_from_tile_pos(zoom, x + 1, y + 1)
return (minlng, maxlng, minlat, maxlat)
def get_lng_lat_from_tile_pos(zoom, x, y):
"""convert Google-style Mercator tile coordinate to
(lng, lat) of top-left corner of tile"""
# "map-centric" latitude, in radians:
lat_rad = math.pi - 2*math.pi*y/(2**zoom)
# true latitude:
lat_rad = gudermannian(lat_rad)
lat = lat_rad * 180.0 / math.pi
# longitude maps linearly to map, so we simply scale:
lng = -180.0 + 360.0*x/(2**zoom)
return (lng, lat)
def get_tile_pos_from_lng_lat(lng, lat, zoom):
"""convert lng/lat to Google-style Mercator tile coordinate (x, y)
at the given zoom level"""
lat_rad = lat * math.pi / 180.0
# "map-centric" latitude, in radians:
lat_rad = inv_gudermannian(lat_rad)
x = 2**zoom * (lng + 180.0) / 360.0
y = 2**zoom * (math.pi - lat_rad) / (2 * math.pi)
return (x, y)
def gudermannian(x):
return 2*math.atan(math.exp(x)) - math.pi/2
def inv_gudermannian(y):
return math.log(math.tan((y + math.pi/2) / 2))
|
<commit_before><commit_msg>Add utility function for converting lng-lat to tile positions<commit_after>import math
def get_tile_box(zoom, x, y):
"""convert Google-style Mercator tile coordinate to
(minlat, maxlat, minlng, maxlng) bounding box"""
minlng, minlat = get_lng_lat_from_tile_pos(zoom, x, y)
maxlng, maxlat = get_lng_lat_from_tile_pos(zoom, x + 1, y + 1)
return (minlng, maxlng, minlat, maxlat)
def get_lng_lat_from_tile_pos(zoom, x, y):
"""convert Google-style Mercator tile coordinate to
(lng, lat) of top-left corner of tile"""
# "map-centric" latitude, in radians:
lat_rad = math.pi - 2*math.pi*y/(2**zoom)
# true latitude:
lat_rad = gudermannian(lat_rad)
lat = lat_rad * 180.0 / math.pi
# longitude maps linearly to map, so we simply scale:
lng = -180.0 + 360.0*x/(2**zoom)
return (lng, lat)
def get_tile_pos_from_lng_lat(lng, lat, zoom):
"""convert lng/lat to Google-style Mercator tile coordinate (x, y)
at the given zoom level"""
lat_rad = lat * math.pi / 180.0
# "map-centric" latitude, in radians:
lat_rad = inv_gudermannian(lat_rad)
x = 2**zoom * (lng + 180.0) / 360.0
y = 2**zoom * (math.pi - lat_rad) / (2 * math.pi)
return (x, y)
def gudermannian(x):
return 2*math.atan(math.exp(x)) - math.pi/2
def inv_gudermannian(y):
return math.log(math.tan((y + math.pi/2) / 2))
|
|
64094025914de15107c450557b301b8a4dd9d9f9
|
tests/test_volume.py
|
tests/test_volume.py
|
from farmfs.volume import *
from itertools import permutations
import re
def produce_mismatches():
""" Helper function to produce pairs of paths which have lexographical/path order mismatches"""
letters = list("abc/+")
paths = filter(lambda p: re.search("//", p) is None, map(lambda p: "/"+p, map(lambda s: reduce(lambda x,y:x+y, s), permutations(letters, 5))))
combos = list(combinations(paths,2))
mismatches = filter(lambda (x,y): bool(x<y) != bool(Path(x) < Path(y)), combos)
return mismatches
def test_mismatches_possible():
assert len(produce_mismatches()) > 0
def makeLink(path):
return {"path": path, "csum": "00000000000000000000000000000000", "type": "link"}
def test_tree_diff_order():
name_a = "/a/+b"
name_b = "/a+/b"
path_a = Path(name_a)
path_b = Path(name_b)
link_a = makeLink(name_a)
link_b = makeLink(name_b)
left = KeySnapshot([link_a], "left", None)
right = KeySnapshot([link_b], "right", None)
root = Path("/")
diff = tree_diff(left, right)
paths = map(lambda change: change.path(root), diff)
assert paths == [path_a, path_b]
|
Add tests for lex order issue.
|
Add tests for lex order issue.
|
Python
|
mit
|
andrewguy9/farmfs,andrewguy9/farmfs
|
Add tests for lex order issue.
|
from farmfs.volume import *
from itertools import permutations
import re
def produce_mismatches():
""" Helper function to produce pairs of paths which have lexographical/path order mismatches"""
letters = list("abc/+")
paths = filter(lambda p: re.search("//", p) is None, map(lambda p: "/"+p, map(lambda s: reduce(lambda x,y:x+y, s), permutations(letters, 5))))
combos = list(combinations(paths,2))
mismatches = filter(lambda (x,y): bool(x<y) != bool(Path(x) < Path(y)), combos)
return mismatches
def test_mismatches_possible():
assert len(produce_mismatches()) > 0
def makeLink(path):
return {"path": path, "csum": "00000000000000000000000000000000", "type": "link"}
def test_tree_diff_order():
name_a = "/a/+b"
name_b = "/a+/b"
path_a = Path(name_a)
path_b = Path(name_b)
link_a = makeLink(name_a)
link_b = makeLink(name_b)
left = KeySnapshot([link_a], "left", None)
right = KeySnapshot([link_b], "right", None)
root = Path("/")
diff = tree_diff(left, right)
paths = map(lambda change: change.path(root), diff)
assert paths == [path_a, path_b]
|
<commit_before><commit_msg>Add tests for lex order issue.<commit_after>
|
from farmfs.volume import *
from itertools import permutations
import re
def produce_mismatches():
""" Helper function to produce pairs of paths which have lexographical/path order mismatches"""
letters = list("abc/+")
paths = filter(lambda p: re.search("//", p) is None, map(lambda p: "/"+p, map(lambda s: reduce(lambda x,y:x+y, s), permutations(letters, 5))))
combos = list(combinations(paths,2))
mismatches = filter(lambda (x,y): bool(x<y) != bool(Path(x) < Path(y)), combos)
return mismatches
def test_mismatches_possible():
assert len(produce_mismatches()) > 0
def makeLink(path):
return {"path": path, "csum": "00000000000000000000000000000000", "type": "link"}
def test_tree_diff_order():
name_a = "/a/+b"
name_b = "/a+/b"
path_a = Path(name_a)
path_b = Path(name_b)
link_a = makeLink(name_a)
link_b = makeLink(name_b)
left = KeySnapshot([link_a], "left", None)
right = KeySnapshot([link_b], "right", None)
root = Path("/")
diff = tree_diff(left, right)
paths = map(lambda change: change.path(root), diff)
assert paths == [path_a, path_b]
|
Add tests for lex order issue.from farmfs.volume import *
from itertools import permutations
import re
def produce_mismatches():
""" Helper function to produce pairs of paths which have lexographical/path order mismatches"""
letters = list("abc/+")
paths = filter(lambda p: re.search("//", p) is None, map(lambda p: "/"+p, map(lambda s: reduce(lambda x,y:x+y, s), permutations(letters, 5))))
combos = list(combinations(paths,2))
mismatches = filter(lambda (x,y): bool(x<y) != bool(Path(x) < Path(y)), combos)
return mismatches
def test_mismatches_possible():
assert len(produce_mismatches()) > 0
def makeLink(path):
return {"path": path, "csum": "00000000000000000000000000000000", "type": "link"}
def test_tree_diff_order():
name_a = "/a/+b"
name_b = "/a+/b"
path_a = Path(name_a)
path_b = Path(name_b)
link_a = makeLink(name_a)
link_b = makeLink(name_b)
left = KeySnapshot([link_a], "left", None)
right = KeySnapshot([link_b], "right", None)
root = Path("/")
diff = tree_diff(left, right)
paths = map(lambda change: change.path(root), diff)
assert paths == [path_a, path_b]
|
<commit_before><commit_msg>Add tests for lex order issue.<commit_after>from farmfs.volume import *
from itertools import permutations
import re
def produce_mismatches():
""" Helper function to produce pairs of paths which have lexographical/path order mismatches"""
letters = list("abc/+")
paths = filter(lambda p: re.search("//", p) is None, map(lambda p: "/"+p, map(lambda s: reduce(lambda x,y:x+y, s), permutations(letters, 5))))
combos = list(combinations(paths,2))
mismatches = filter(lambda (x,y): bool(x<y) != bool(Path(x) < Path(y)), combos)
return mismatches
def test_mismatches_possible():
assert len(produce_mismatches()) > 0
def makeLink(path):
return {"path": path, "csum": "00000000000000000000000000000000", "type": "link"}
def test_tree_diff_order():
name_a = "/a/+b"
name_b = "/a+/b"
path_a = Path(name_a)
path_b = Path(name_b)
link_a = makeLink(name_a)
link_b = makeLink(name_b)
left = KeySnapshot([link_a], "left", None)
right = KeySnapshot([link_b], "right", None)
root = Path("/")
diff = tree_diff(left, right)
paths = map(lambda change: change.path(root), diff)
assert paths == [path_a, path_b]
|
|
e65bc2b46eacca05720370d000564bc4a51de223
|
seleniumbase/common/unobfuscate.py
|
seleniumbase/common/unobfuscate.py
|
"""
Unobfuscates an encrypted string/password into a plaintext string/password.
Usage:
python unobfuscate.py
Then enter the encrypted string/password.
The result is a plaintext string/password.
Works the same as obfuscate.py, but doesn't mask the input.
"""
from seleniumbase.common import encryption
import time
def main():
try:
# Python 2 has the raw_input() method. Python 3 does not.
input_method = raw_input # noqa: ignore=F821
except:
input_method = input # Using Python 3
try:
while(1):
code = input_method(
'\nEnter obfuscated/encrypted string: (CTRL-C to exit):\n')
print("\nHere is the unobfuscated string/password:")
time.sleep(0.07)
print(encryption.decrypt(code))
time.sleep(0.21)
except:
print("\nExiting...\n")
if __name__ == "__main__":
main()
|
Add the user interface to reverse string obfuscation
|
Add the user interface to reverse string obfuscation
|
Python
|
mit
|
mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/seleniumspot,seleniumbase/SeleniumBase,mdmintz/seleniumspot
|
Add the user interface to reverse string obfuscation
|
"""
Unobfuscates an encrypted string/password into a plaintext string/password.
Usage:
python unobfuscate.py
Then enter the encrypted string/password.
The result is a plaintext string/password.
Works the same as obfuscate.py, but doesn't mask the input.
"""
from seleniumbase.common import encryption
import time
def main():
try:
# Python 2 has the raw_input() method. Python 3 does not.
input_method = raw_input # noqa: ignore=F821
except:
input_method = input # Using Python 3
try:
while(1):
code = input_method(
'\nEnter obfuscated/encrypted string: (CTRL-C to exit):\n')
print("\nHere is the unobfuscated string/password:")
time.sleep(0.07)
print(encryption.decrypt(code))
time.sleep(0.21)
except:
print("\nExiting...\n")
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add the user interface to reverse string obfuscation<commit_after>
|
"""
Unobfuscates an encrypted string/password into a plaintext string/password.
Usage:
python unobfuscate.py
Then enter the encrypted string/password.
The result is a plaintext string/password.
Works the same as obfuscate.py, but doesn't mask the input.
"""
from seleniumbase.common import encryption
import time
def main():
try:
# Python 2 has the raw_input() method. Python 3 does not.
input_method = raw_input # noqa: ignore=F821
except:
input_method = input # Using Python 3
try:
while(1):
code = input_method(
'\nEnter obfuscated/encrypted string: (CTRL-C to exit):\n')
print("\nHere is the unobfuscated string/password:")
time.sleep(0.07)
print(encryption.decrypt(code))
time.sleep(0.21)
except:
print("\nExiting...\n")
if __name__ == "__main__":
main()
|
Add the user interface to reverse string obfuscation"""
Unobfuscates an encrypted string/password into a plaintext string/password.
Usage:
python unobfuscate.py
Then enter the encrypted string/password.
The result is a plaintext string/password.
Works the same as obfuscate.py, but doesn't mask the input.
"""
from seleniumbase.common import encryption
import time
def main():
try:
# Python 2 has the raw_input() method. Python 3 does not.
input_method = raw_input # noqa: ignore=F821
except:
input_method = input # Using Python 3
try:
while(1):
code = input_method(
'\nEnter obfuscated/encrypted string: (CTRL-C to exit):\n')
print("\nHere is the unobfuscated string/password:")
time.sleep(0.07)
print(encryption.decrypt(code))
time.sleep(0.21)
except:
print("\nExiting...\n")
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add the user interface to reverse string obfuscation<commit_after>"""
Unobfuscates an encrypted string/password into a plaintext string/password.
Usage:
python unobfuscate.py
Then enter the encrypted string/password.
The result is a plaintext string/password.
Works the same as obfuscate.py, but doesn't mask the input.
"""
from seleniumbase.common import encryption
import time
def main():
try:
# Python 2 has the raw_input() method. Python 3 does not.
input_method = raw_input # noqa: ignore=F821
except:
input_method = input # Using Python 3
try:
while(1):
code = input_method(
'\nEnter obfuscated/encrypted string: (CTRL-C to exit):\n')
print("\nHere is the unobfuscated string/password:")
time.sleep(0.07)
print(encryption.decrypt(code))
time.sleep(0.21)
except:
print("\nExiting...\n")
if __name__ == "__main__":
main()
|
|
0f30fb878ae0c493eb117e0b27a33937bb90e52c
|
examples/use_socket.py
|
examples/use_socket.py
|
"""A basic example of using hug.use.Socket to return data from raw sockets"""
import hug
import socket
import struct
import time
http_socket = hug.use.Socket(connect_to=('www.google.com', 80), proto='tcp', pool=4, timeout=10.0)
ntp_service = hug.use.Socket(connect_to=('127.0.0.1', 123), proto='udp', pool=4, timeout=10.0)
EPOCH_START = 2208988800
@hug.get()
def get_time():
"""Get time from a locally running NTP server"""
time_request = '\x1b' + 47 * '\0'
now = struct.unpack("!12I", ntp_service.request(time_request, timeout=5.0).data.read())[10]
return time.ctime(now - EPOCH_START)
@hug.get()
def reverse_http_proxy(length:int=100):
"""Simple reverse http proxy function that returns data/html from another http server (via sockets)
only drawback is the peername is static, and currently does not support being changed.
Example: curl localhost:8000/reverse_http_proxy?length=400"""
http_request = """
GET / HTTP/1.0\r\n\r\n
Host: www.google.com\r\n\r\n
\r\n\r\n
"""
return http_socket.request(http_request, timeout=5.0).data.read()[0:length]
|
Add example code for using hug.use.Socket with udp and tcp
|
Add example code for using hug.use.Socket with udp and tcp
|
Python
|
mit
|
MuhammadAlkarouri/hug,MuhammadAlkarouri/hug,timothycrosley/hug,MuhammadAlkarouri/hug,timothycrosley/hug,timothycrosley/hug
|
Add example code for using hug.use.Socket with udp and tcp
|
"""A basic example of using hug.use.Socket to return data from raw sockets"""
import hug
import socket
import struct
import time
http_socket = hug.use.Socket(connect_to=('www.google.com', 80), proto='tcp', pool=4, timeout=10.0)
ntp_service = hug.use.Socket(connect_to=('127.0.0.1', 123), proto='udp', pool=4, timeout=10.0)
EPOCH_START = 2208988800
@hug.get()
def get_time():
"""Get time from a locally running NTP server"""
time_request = '\x1b' + 47 * '\0'
now = struct.unpack("!12I", ntp_service.request(time_request, timeout=5.0).data.read())[10]
return time.ctime(now - EPOCH_START)
@hug.get()
def reverse_http_proxy(length:int=100):
"""Simple reverse http proxy function that returns data/html from another http server (via sockets)
only drawback is the peername is static, and currently does not support being changed.
Example: curl localhost:8000/reverse_http_proxy?length=400"""
http_request = """
GET / HTTP/1.0\r\n\r\n
Host: www.google.com\r\n\r\n
\r\n\r\n
"""
return http_socket.request(http_request, timeout=5.0).data.read()[0:length]
|
<commit_before><commit_msg>Add example code for using hug.use.Socket with udp and tcp<commit_after>
|
"""A basic example of using hug.use.Socket to return data from raw sockets"""
import hug
import socket
import struct
import time
http_socket = hug.use.Socket(connect_to=('www.google.com', 80), proto='tcp', pool=4, timeout=10.0)
ntp_service = hug.use.Socket(connect_to=('127.0.0.1', 123), proto='udp', pool=4, timeout=10.0)
EPOCH_START = 2208988800
@hug.get()
def get_time():
"""Get time from a locally running NTP server"""
time_request = '\x1b' + 47 * '\0'
now = struct.unpack("!12I", ntp_service.request(time_request, timeout=5.0).data.read())[10]
return time.ctime(now - EPOCH_START)
@hug.get()
def reverse_http_proxy(length:int=100):
"""Simple reverse http proxy function that returns data/html from another http server (via sockets)
only drawback is the peername is static, and currently does not support being changed.
Example: curl localhost:8000/reverse_http_proxy?length=400"""
http_request = """
GET / HTTP/1.0\r\n\r\n
Host: www.google.com\r\n\r\n
\r\n\r\n
"""
return http_socket.request(http_request, timeout=5.0).data.read()[0:length]
|
Add example code for using hug.use.Socket with udp and tcp"""A basic example of using hug.use.Socket to return data from raw sockets"""
import hug
import socket
import struct
import time
http_socket = hug.use.Socket(connect_to=('www.google.com', 80), proto='tcp', pool=4, timeout=10.0)
ntp_service = hug.use.Socket(connect_to=('127.0.0.1', 123), proto='udp', pool=4, timeout=10.0)
EPOCH_START = 2208988800
@hug.get()
def get_time():
"""Get time from a locally running NTP server"""
time_request = '\x1b' + 47 * '\0'
now = struct.unpack("!12I", ntp_service.request(time_request, timeout=5.0).data.read())[10]
return time.ctime(now - EPOCH_START)
@hug.get()
def reverse_http_proxy(length:int=100):
"""Simple reverse http proxy function that returns data/html from another http server (via sockets)
only drawback is the peername is static, and currently does not support being changed.
Example: curl localhost:8000/reverse_http_proxy?length=400"""
http_request = """
GET / HTTP/1.0\r\n\r\n
Host: www.google.com\r\n\r\n
\r\n\r\n
"""
return http_socket.request(http_request, timeout=5.0).data.read()[0:length]
|
<commit_before><commit_msg>Add example code for using hug.use.Socket with udp and tcp<commit_after>"""A basic example of using hug.use.Socket to return data from raw sockets"""
import hug
import socket
import struct
import time
http_socket = hug.use.Socket(connect_to=('www.google.com', 80), proto='tcp', pool=4, timeout=10.0)
ntp_service = hug.use.Socket(connect_to=('127.0.0.1', 123), proto='udp', pool=4, timeout=10.0)
EPOCH_START = 2208988800
@hug.get()
def get_time():
"""Get time from a locally running NTP server"""
time_request = '\x1b' + 47 * '\0'
now = struct.unpack("!12I", ntp_service.request(time_request, timeout=5.0).data.read())[10]
return time.ctime(now - EPOCH_START)
@hug.get()
def reverse_http_proxy(length:int=100):
"""Simple reverse http proxy function that returns data/html from another http server (via sockets)
only drawback is the peername is static, and currently does not support being changed.
Example: curl localhost:8000/reverse_http_proxy?length=400"""
http_request = """
GET / HTTP/1.0\r\n\r\n
Host: www.google.com\r\n\r\n
\r\n\r\n
"""
return http_socket.request(http_request, timeout=5.0).data.read()[0:length]
|
|
f297b67920f7406c6fbe4bbfabf99c7bcb5a8d05
|
find_unimplemented_attacks.py
|
find_unimplemented_attacks.py
|
#!/usr/bin/env python3
"""
This script is meant to help discover attack types/damages that have not been
added to Pinobot unixmain.patch.
Example:
./find_unimplemented_attacks.py nethack/include/monattk.h pinobot/patch/unixmain.patch
"""
import re
import sys
AT_RE = re.compile('.*(AT|AD)_([0-9A-Za-z]+).*')
if __name__ == '__main__':
seen_ats1 = set()
seen_ats2 = set()
with open(sys.argv[1], 'rt') as f:
for line in f:
m = AT_RE.match(line)
if not m:
continue
seen_ats1.add(f'{m.group(1)}_{m.group(2)}')
with open(sys.argv[2], 'rt') as f:
for line in f:
m = AT_RE.match(line)
if not m:
continue
seen_ats2.add(f'{m.group(1)}_{m.group(2)}')
# What attacks/damages are missing?
missing = seen_ats1 - seen_ats2
# Go through the first file again, but if it contains a missing entry then
# print the entire line.
with open(sys.argv[1], 'rt') as f:
for line in f:
for m in missing:
if m in line:
print(line.rstrip('\n'))
break
|
Add the tool that lists all unimplemented AD_/AT_ flags.
|
Add the tool that lists all unimplemented AD_/AT_ flags.
|
Python
|
mit
|
UnNetHack/pinobot,UnNetHack/pinobot
|
Add the tool that lists all unimplemented AD_/AT_ flags.
|
#!/usr/bin/env python3
"""
This script is meant to help discover attack types/damages that have not been
added to Pinobot unixmain.patch.
Example:
./find_unimplemented_attacks.py nethack/include/monattk.h pinobot/patch/unixmain.patch
"""
import re
import sys
AT_RE = re.compile('.*(AT|AD)_([0-9A-Za-z]+).*')
if __name__ == '__main__':
seen_ats1 = set()
seen_ats2 = set()
with open(sys.argv[1], 'rt') as f:
for line in f:
m = AT_RE.match(line)
if not m:
continue
seen_ats1.add(f'{m.group(1)}_{m.group(2)}')
with open(sys.argv[2], 'rt') as f:
for line in f:
m = AT_RE.match(line)
if not m:
continue
seen_ats2.add(f'{m.group(1)}_{m.group(2)}')
# What attacks/damages are missing?
missing = seen_ats1 - seen_ats2
# Go through the first file again, but if it contains a missing entry then
# print the entire line.
with open(sys.argv[1], 'rt') as f:
for line in f:
for m in missing:
if m in line:
print(line.rstrip('\n'))
break
|
<commit_before><commit_msg>Add the tool that lists all unimplemented AD_/AT_ flags.<commit_after>
|
#!/usr/bin/env python3
"""
This script is meant to help discover attack types/damages that have not been
added to Pinobot unixmain.patch.
Example:
./find_unimplemented_attacks.py nethack/include/monattk.h pinobot/patch/unixmain.patch
"""
import re
import sys
AT_RE = re.compile('.*(AT|AD)_([0-9A-Za-z]+).*')
if __name__ == '__main__':
seen_ats1 = set()
seen_ats2 = set()
with open(sys.argv[1], 'rt') as f:
for line in f:
m = AT_RE.match(line)
if not m:
continue
seen_ats1.add(f'{m.group(1)}_{m.group(2)}')
with open(sys.argv[2], 'rt') as f:
for line in f:
m = AT_RE.match(line)
if not m:
continue
seen_ats2.add(f'{m.group(1)}_{m.group(2)}')
# What attacks/damages are missing?
missing = seen_ats1 - seen_ats2
# Go through the first file again, but if it contains a missing entry then
# print the entire line.
with open(sys.argv[1], 'rt') as f:
for line in f:
for m in missing:
if m in line:
print(line.rstrip('\n'))
break
|
Add the tool that lists all unimplemented AD_/AT_ flags.#!/usr/bin/env python3
"""
This script is meant to help discover attack types/damages that have not been
added to Pinobot unixmain.patch.
Example:
./find_unimplemented_attacks.py nethack/include/monattk.h pinobot/patch/unixmain.patch
"""
import re
import sys
AT_RE = re.compile('.*(AT|AD)_([0-9A-Za-z]+).*')
if __name__ == '__main__':
seen_ats1 = set()
seen_ats2 = set()
with open(sys.argv[1], 'rt') as f:
for line in f:
m = AT_RE.match(line)
if not m:
continue
seen_ats1.add(f'{m.group(1)}_{m.group(2)}')
with open(sys.argv[2], 'rt') as f:
for line in f:
m = AT_RE.match(line)
if not m:
continue
seen_ats2.add(f'{m.group(1)}_{m.group(2)}')
# What attacks/damages are missing?
missing = seen_ats1 - seen_ats2
# Go through the first file again, but if it contains a missing entry then
# print the entire line.
with open(sys.argv[1], 'rt') as f:
for line in f:
for m in missing:
if m in line:
print(line.rstrip('\n'))
break
|
<commit_before><commit_msg>Add the tool that lists all unimplemented AD_/AT_ flags.<commit_after>#!/usr/bin/env python3
"""
This script is meant to help discover attack types/damages that have not been
added to Pinobot unixmain.patch.
Example:
./find_unimplemented_attacks.py nethack/include/monattk.h pinobot/patch/unixmain.patch
"""
import re
import sys
AT_RE = re.compile('.*(AT|AD)_([0-9A-Za-z]+).*')
if __name__ == '__main__':
seen_ats1 = set()
seen_ats2 = set()
with open(sys.argv[1], 'rt') as f:
for line in f:
m = AT_RE.match(line)
if not m:
continue
seen_ats1.add(f'{m.group(1)}_{m.group(2)}')
with open(sys.argv[2], 'rt') as f:
for line in f:
m = AT_RE.match(line)
if not m:
continue
seen_ats2.add(f'{m.group(1)}_{m.group(2)}')
# What attacks/damages are missing?
missing = seen_ats1 - seen_ats2
# Go through the first file again, but if it contains a missing entry then
# print the entire line.
with open(sys.argv[1], 'rt') as f:
for line in f:
for m in missing:
if m in line:
print(line.rstrip('\n'))
break
|
|
209774dfff05f0716cccca61bd8baa7eb456badf
|
apps/news/tests.py
|
apps/news/tests.py
|
from django import test
from django.http import QueryDict
from mock import patch
from news.models import Subscriber
from news.tasks import SET
class UserTest(test.TestCase):
@patch('news.views.update_user')
def test_user_set(self, update_user):
"""If the user view is sent a POST request, it should attempt to update
the user's info.
"""
subscriber = Subscriber(email='test@example.com', token='asdf')
subscriber.save()
self.client.post('/news/user/asdf/', {'fake': 'data'})
update_user.assert_called_with(QueryDict('fake=data'),
'test@example.com', SET, True)
|
Add test for user view.
|
Add test for user view.
|
Python
|
mpl-2.0
|
glogiotatidis/basket,pmclanahan/basket,meandavejustice/basket,glogiotatidis/basket,pmclanahan/basket,meandavejustice/basket,glogiotatidis/basket
|
Add test for user view.
|
from django import test
from django.http import QueryDict
from mock import patch
from news.models import Subscriber
from news.tasks import SET
class UserTest(test.TestCase):
@patch('news.views.update_user')
def test_user_set(self, update_user):
"""If the user view is sent a POST request, it should attempt to update
the user's info.
"""
subscriber = Subscriber(email='test@example.com', token='asdf')
subscriber.save()
self.client.post('/news/user/asdf/', {'fake': 'data'})
update_user.assert_called_with(QueryDict('fake=data'),
'test@example.com', SET, True)
|
<commit_before><commit_msg>Add test for user view.<commit_after>
|
from django import test
from django.http import QueryDict
from mock import patch
from news.models import Subscriber
from news.tasks import SET
class UserTest(test.TestCase):
@patch('news.views.update_user')
def test_user_set(self, update_user):
"""If the user view is sent a POST request, it should attempt to update
the user's info.
"""
subscriber = Subscriber(email='test@example.com', token='asdf')
subscriber.save()
self.client.post('/news/user/asdf/', {'fake': 'data'})
update_user.assert_called_with(QueryDict('fake=data'),
'test@example.com', SET, True)
|
Add test for user view.from django import test
from django.http import QueryDict
from mock import patch
from news.models import Subscriber
from news.tasks import SET
class UserTest(test.TestCase):
@patch('news.views.update_user')
def test_user_set(self, update_user):
"""If the user view is sent a POST request, it should attempt to update
the user's info.
"""
subscriber = Subscriber(email='test@example.com', token='asdf')
subscriber.save()
self.client.post('/news/user/asdf/', {'fake': 'data'})
update_user.assert_called_with(QueryDict('fake=data'),
'test@example.com', SET, True)
|
<commit_before><commit_msg>Add test for user view.<commit_after>from django import test
from django.http import QueryDict
from mock import patch
from news.models import Subscriber
from news.tasks import SET
class UserTest(test.TestCase):
@patch('news.views.update_user')
def test_user_set(self, update_user):
"""If the user view is sent a POST request, it should attempt to update
the user's info.
"""
subscriber = Subscriber(email='test@example.com', token='asdf')
subscriber.save()
self.client.post('/news/user/asdf/', {'fake': 'data'})
update_user.assert_called_with(QueryDict('fake=data'),
'test@example.com', SET, True)
|
|
a8b6e3fd52796b17f5fd287e82364406f23461e0
|
scripts/svmlight_sortcols.py
|
scripts/svmlight_sortcols.py
|
from sys import argv
from operator import itemgetter
if __name__ == "__main__":
if (len(argv) != 3):
print("Usage: " + argv[0] + " <input.svm> <output.svm>")
print("Example:")
print("input.svm:")
print("1 24:1 12:1 55:1")
print("0 84:1 82:1 15:1")
print("...")
print("output.svm:")
print("1 12:1 24:1 55:1")
print("0 15:1 82:1 84:1")
print("...")
exit(1)
input = argv[1]
output = argv[2]
with open(input, "r") as f:
for line in f:
splits = line.split()
target = splits[0]
features = splits[1:]
d = {int(a[0]):int(a[1]) for a in [feat.split(':') for feat in features]}
dsorted = sorted(d.iteritems(), key=itemgetter(0), reverse=False)
with open(output, "a") as g:
g.write(target + ' ')
for (i,j) in dsorted:
g.write(str(i)+':'+str(j)+' ')
g.write('\n')
|
Add helper script to sort svmlight files by column keys.
|
Add helper script to sort svmlight files by column keys.
|
Python
|
apache-2.0
|
YzPaul3/h2o-3,bospetersen/h2o-3,ChristosChristofidis/h2o-3,mrgloom/h2o-3,kyoren/https-github.com-h2oai-h2o-3,h2oai/h2o-3,michalkurka/h2o-3,junwucs/h2o-3,h2oai/h2o-3,nilbody/h2o-3,mathemage/h2o-3,kyoren/https-github.com-h2oai-h2o-3,mrgloom/h2o-3,tarasane/h2o-3,pchmieli/h2o-3,weaver-viii/h2o-3,ChristosChristofidis/h2o-3,junwucs/h2o-3,ChristosChristofidis/h2o-3,printedheart/h2o-3,michalkurka/h2o-3,bospetersen/h2o-3,datachand/h2o-3,h2oai/h2o-dev,tarasane/h2o-3,weaver-viii/h2o-3,PawarPawan/h2o-v3,kyoren/https-github.com-h2oai-h2o-3,pchmieli/h2o-3,spennihana/h2o-3,brightchen/h2o-3,nilbody/h2o-3,pchmieli/h2o-3,YzPaul3/h2o-3,nilbody/h2o-3,bospetersen/h2o-3,brightchen/h2o-3,kyoren/https-github.com-h2oai-h2o-3,mrgloom/h2o-3,PawarPawan/h2o-v3,michalkurka/h2o-3,michalkurka/h2o-3,ChristosChristofidis/h2o-3,brightchen/h2o-3,PawarPawan/h2o-v3,printedheart/h2o-3,datachand/h2o-3,PawarPawan/h2o-v3,madmax983/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,mrgloom/h2o-3,nilbody/h2o-3,michalkurka/h2o-3,bospetersen/h2o-3,h2oai/h2o-dev,kyoren/https-github.com-h2oai-h2o-3,printedheart/h2o-3,michalkurka/h2o-3,brightchen/h2o-3,spennihana/h2o-3,weaver-viii/h2o-3,h2oai/h2o-3,printedheart/h2o-3,YzPaul3/h2o-3,datachand/h2o-3,h2oai/h2o-3,junwucs/h2o-3,tarasane/h2o-3,spennihana/h2o-3,h2oai/h2o-dev,h2oai/h2o-dev,datachand/h2o-3,YzPaul3/h2o-3,printedheart/h2o-3,mathemage/h2o-3,nilbody/h2o-3,kyoren/https-github.com-h2oai-h2o-3,brightchen/h2o-3,YzPaul3/h2o-3,tarasane/h2o-3,tarasane/h2o-3,weaver-viii/h2o-3,jangorecki/h2o-3,pchmieli/h2o-3,datachand/h2o-3,PawarPawan/h2o-v3,weaver-viii/h2o-3,jangorecki/h2o-3,madmax983/h2o-3,kyoren/https-github.com-h2oai-h2o-3,nilbody/h2o-3,spennihana/h2o-3,h2oai/h2o-dev,junwucs/h2o-3,mathemage/h2o-3,weaver-viii/h2o-3,bospetersen/h2o-3,tarasane/h2o-3,printedheart/h2o-3,bospetersen/h2o-3,YzPaul3/h2o-3,madmax983/h2o-3,jangorecki/h2o-3,h2oai/h2o-dev,mrgloom/h2o-3,nilbody/h2o-3,mathemage/h2o-3,tarasane/h2o-3,jangorecki/h2o-3,YzPaul3/h2o-3,madmax983/h2o-3,ChristosChristofidis/h2o-3,brightchen/h2o-3,mathemage/h2o-3,pchmieli/h2o-3,bospetersen/h2o-3,junwucs/h2o-3,printedheart/h2o-3,ChristosChristofidis/h2o-3,spennihana/h2o-3,datachand/h2o-3,spennihana/h2o-3,junwucs/h2o-3,jangorecki/h2o-3,mrgloom/h2o-3,h2oai/h2o-3,brightchen/h2o-3,h2oai/h2o-3,PawarPawan/h2o-v3,pchmieli/h2o-3,spennihana/h2o-3,madmax983/h2o-3,pchmieli/h2o-3,h2oai/h2o-3,jangorecki/h2o-3,mrgloom/h2o-3,h2oai/h2o-dev,mathemage/h2o-3,datachand/h2o-3,madmax983/h2o-3,madmax983/h2o-3,mathemage/h2o-3,junwucs/h2o-3,jangorecki/h2o-3,weaver-viii/h2o-3,ChristosChristofidis/h2o-3,PawarPawan/h2o-v3
|
Add helper script to sort svmlight files by column keys.
|
from sys import argv
from operator import itemgetter
if __name__ == "__main__":
if (len(argv) != 3):
print("Usage: " + argv[0] + " <input.svm> <output.svm>")
print("Example:")
print("input.svm:")
print("1 24:1 12:1 55:1")
print("0 84:1 82:1 15:1")
print("...")
print("output.svm:")
print("1 12:1 24:1 55:1")
print("0 15:1 82:1 84:1")
print("...")
exit(1)
input = argv[1]
output = argv[2]
with open(input, "r") as f:
for line in f:
splits = line.split()
target = splits[0]
features = splits[1:]
d = {int(a[0]):int(a[1]) for a in [feat.split(':') for feat in features]}
dsorted = sorted(d.iteritems(), key=itemgetter(0), reverse=False)
with open(output, "a") as g:
g.write(target + ' ')
for (i,j) in dsorted:
g.write(str(i)+':'+str(j)+' ')
g.write('\n')
|
<commit_before><commit_msg>Add helper script to sort svmlight files by column keys.<commit_after>
|
from sys import argv
from operator import itemgetter
if __name__ == "__main__":
if (len(argv) != 3):
print("Usage: " + argv[0] + " <input.svm> <output.svm>")
print("Example:")
print("input.svm:")
print("1 24:1 12:1 55:1")
print("0 84:1 82:1 15:1")
print("...")
print("output.svm:")
print("1 12:1 24:1 55:1")
print("0 15:1 82:1 84:1")
print("...")
exit(1)
input = argv[1]
output = argv[2]
with open(input, "r") as f:
for line in f:
splits = line.split()
target = splits[0]
features = splits[1:]
d = {int(a[0]):int(a[1]) for a in [feat.split(':') for feat in features]}
dsorted = sorted(d.iteritems(), key=itemgetter(0), reverse=False)
with open(output, "a") as g:
g.write(target + ' ')
for (i,j) in dsorted:
g.write(str(i)+':'+str(j)+' ')
g.write('\n')
|
Add helper script to sort svmlight files by column keys.from sys import argv
from operator import itemgetter
if __name__ == "__main__":
if (len(argv) != 3):
print("Usage: " + argv[0] + " <input.svm> <output.svm>")
print("Example:")
print("input.svm:")
print("1 24:1 12:1 55:1")
print("0 84:1 82:1 15:1")
print("...")
print("output.svm:")
print("1 12:1 24:1 55:1")
print("0 15:1 82:1 84:1")
print("...")
exit(1)
input = argv[1]
output = argv[2]
with open(input, "r") as f:
for line in f:
splits = line.split()
target = splits[0]
features = splits[1:]
d = {int(a[0]):int(a[1]) for a in [feat.split(':') for feat in features]}
dsorted = sorted(d.iteritems(), key=itemgetter(0), reverse=False)
with open(output, "a") as g:
g.write(target + ' ')
for (i,j) in dsorted:
g.write(str(i)+':'+str(j)+' ')
g.write('\n')
|
<commit_before><commit_msg>Add helper script to sort svmlight files by column keys.<commit_after>from sys import argv
from operator import itemgetter
if __name__ == "__main__":
if (len(argv) != 3):
print("Usage: " + argv[0] + " <input.svm> <output.svm>")
print("Example:")
print("input.svm:")
print("1 24:1 12:1 55:1")
print("0 84:1 82:1 15:1")
print("...")
print("output.svm:")
print("1 12:1 24:1 55:1")
print("0 15:1 82:1 84:1")
print("...")
exit(1)
input = argv[1]
output = argv[2]
with open(input, "r") as f:
for line in f:
splits = line.split()
target = splits[0]
features = splits[1:]
d = {int(a[0]):int(a[1]) for a in [feat.split(':') for feat in features]}
dsorted = sorted(d.iteritems(), key=itemgetter(0), reverse=False)
with open(output, "a") as g:
g.write(target + ' ')
for (i,j) in dsorted:
g.write(str(i)+':'+str(j)+' ')
g.write('\n')
|
|
3316c68bdd61cc817f558e1fdd5862f64fd80bbb
|
spreadflow_core/test/test_config.py
|
spreadflow_core/test/test_config.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from tempfile import NamedTemporaryFile
from unittest import TestCase
from spreadflow_core.config import config_eval
from spreadflow_core.flow import Flowmap
class ConfigTestCase(TestCase):
def test_config_eval(self):
with NamedTemporaryFile(delete=False) as tmpfile:
tmpfile.write(b'from spreadflow_core.script import *')
flowmap = config_eval(tmpfile.name)
os.unlink(tmpfile.name)
self.assertIsInstance(flowmap, Flowmap)
|
Add test case for config_eval
|
Add test case for config_eval
|
Python
|
mit
|
znerol/spreadflow-core,spreadflow/spreadflow-core
|
Add test case for config_eval
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from tempfile import NamedTemporaryFile
from unittest import TestCase
from spreadflow_core.config import config_eval
from spreadflow_core.flow import Flowmap
class ConfigTestCase(TestCase):
def test_config_eval(self):
with NamedTemporaryFile(delete=False) as tmpfile:
tmpfile.write(b'from spreadflow_core.script import *')
flowmap = config_eval(tmpfile.name)
os.unlink(tmpfile.name)
self.assertIsInstance(flowmap, Flowmap)
|
<commit_before><commit_msg>Add test case for config_eval<commit_after>
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from tempfile import NamedTemporaryFile
from unittest import TestCase
from spreadflow_core.config import config_eval
from spreadflow_core.flow import Flowmap
class ConfigTestCase(TestCase):
def test_config_eval(self):
with NamedTemporaryFile(delete=False) as tmpfile:
tmpfile.write(b'from spreadflow_core.script import *')
flowmap = config_eval(tmpfile.name)
os.unlink(tmpfile.name)
self.assertIsInstance(flowmap, Flowmap)
|
Add test case for config_evalfrom __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from tempfile import NamedTemporaryFile
from unittest import TestCase
from spreadflow_core.config import config_eval
from spreadflow_core.flow import Flowmap
class ConfigTestCase(TestCase):
def test_config_eval(self):
with NamedTemporaryFile(delete=False) as tmpfile:
tmpfile.write(b'from spreadflow_core.script import *')
flowmap = config_eval(tmpfile.name)
os.unlink(tmpfile.name)
self.assertIsInstance(flowmap, Flowmap)
|
<commit_before><commit_msg>Add test case for config_eval<commit_after>from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from tempfile import NamedTemporaryFile
from unittest import TestCase
from spreadflow_core.config import config_eval
from spreadflow_core.flow import Flowmap
class ConfigTestCase(TestCase):
def test_config_eval(self):
with NamedTemporaryFile(delete=False) as tmpfile:
tmpfile.write(b'from spreadflow_core.script import *')
flowmap = config_eval(tmpfile.name)
os.unlink(tmpfile.name)
self.assertIsInstance(flowmap, Flowmap)
|
|
03f4ab8b7ab39f39b3584a1a7ce68db0f8b92cd4
|
tests/test_init.py
|
tests/test_init.py
|
# coding: utf-8
from __future__ import unicode_literals
import unittest
from textwrap import dedent
from conllu import parse, parse_tree
from conllu.compat import text
class TestParse(unittest.TestCase):
def test_multiple_sentences(self):
data = dedent("""\
1 The the DET DT Definite=Def|PronType=Art 4 det _ _
2 dog dog NOUN NN Number=Sing 5 nmod _ SpaceAfter=No
3 . . PUNCT . _ 5 punct _ _
1 The the DET DT Definite=Def|PronType=Art 4 det _ _
2 dog dog NOUN NN Number=Sing 5 nmod _ SpaceAfter=No
3 . . PUNCT . _ 5 punct _ _
""")
self.assertEqual(
text(parse(data)),
"[TokenList<The, dog, .>, TokenList<The, dog, .>]"
)
|
Add test for parsing multiple sentences.
|
Add test for parsing multiple sentences.
|
Python
|
mit
|
EmilStenstrom/conllu
|
Add test for parsing multiple sentences.
|
# coding: utf-8
from __future__ import unicode_literals
import unittest
from textwrap import dedent
from conllu import parse, parse_tree
from conllu.compat import text
class TestParse(unittest.TestCase):
def test_multiple_sentences(self):
data = dedent("""\
1 The the DET DT Definite=Def|PronType=Art 4 det _ _
2 dog dog NOUN NN Number=Sing 5 nmod _ SpaceAfter=No
3 . . PUNCT . _ 5 punct _ _
1 The the DET DT Definite=Def|PronType=Art 4 det _ _
2 dog dog NOUN NN Number=Sing 5 nmod _ SpaceAfter=No
3 . . PUNCT . _ 5 punct _ _
""")
self.assertEqual(
text(parse(data)),
"[TokenList<The, dog, .>, TokenList<The, dog, .>]"
)
|
<commit_before><commit_msg>Add test for parsing multiple sentences.<commit_after>
|
# coding: utf-8
from __future__ import unicode_literals
import unittest
from textwrap import dedent
from conllu import parse, parse_tree
from conllu.compat import text
class TestParse(unittest.TestCase):
def test_multiple_sentences(self):
data = dedent("""\
1 The the DET DT Definite=Def|PronType=Art 4 det _ _
2 dog dog NOUN NN Number=Sing 5 nmod _ SpaceAfter=No
3 . . PUNCT . _ 5 punct _ _
1 The the DET DT Definite=Def|PronType=Art 4 det _ _
2 dog dog NOUN NN Number=Sing 5 nmod _ SpaceAfter=No
3 . . PUNCT . _ 5 punct _ _
""")
self.assertEqual(
text(parse(data)),
"[TokenList<The, dog, .>, TokenList<The, dog, .>]"
)
|
Add test for parsing multiple sentences.# coding: utf-8
from __future__ import unicode_literals
import unittest
from textwrap import dedent
from conllu import parse, parse_tree
from conllu.compat import text
class TestParse(unittest.TestCase):
def test_multiple_sentences(self):
data = dedent("""\
1 The the DET DT Definite=Def|PronType=Art 4 det _ _
2 dog dog NOUN NN Number=Sing 5 nmod _ SpaceAfter=No
3 . . PUNCT . _ 5 punct _ _
1 The the DET DT Definite=Def|PronType=Art 4 det _ _
2 dog dog NOUN NN Number=Sing 5 nmod _ SpaceAfter=No
3 . . PUNCT . _ 5 punct _ _
""")
self.assertEqual(
text(parse(data)),
"[TokenList<The, dog, .>, TokenList<The, dog, .>]"
)
|
<commit_before><commit_msg>Add test for parsing multiple sentences.<commit_after># coding: utf-8
from __future__ import unicode_literals
import unittest
from textwrap import dedent
from conllu import parse, parse_tree
from conllu.compat import text
class TestParse(unittest.TestCase):
def test_multiple_sentences(self):
data = dedent("""\
1 The the DET DT Definite=Def|PronType=Art 4 det _ _
2 dog dog NOUN NN Number=Sing 5 nmod _ SpaceAfter=No
3 . . PUNCT . _ 5 punct _ _
1 The the DET DT Definite=Def|PronType=Art 4 det _ _
2 dog dog NOUN NN Number=Sing 5 nmod _ SpaceAfter=No
3 . . PUNCT . _ 5 punct _ _
""")
self.assertEqual(
text(parse(data)),
"[TokenList<The, dog, .>, TokenList<The, dog, .>]"
)
|
|
3c5760c089e5222b968bb41925838db1ecaaba3b
|
python/bte_gen.py
|
python/bte_gen.py
|
#Parser of Baka-Tsuki ePUB generator page
#python3
import requests, re
from bs4 import BeautifulSoup
class bte_gen(BeautifulSoup):
def __init__(self, query):
self.query = query
self.result = dict()
self.bteUrl = "http://ln.m-chan.org/v3/"
self.bteGenHead = requests.get(self.bteUrl)
self.bteGenHead.raise_for_status()
super().__init__(self.bteGenHead.text)
self.matches = self.find_all("a", text=re.compile(query, re.IGNORECASE))
def perform_search(self):
for match in self.matches:
listLinks = []
bteGenMatchContent = requests.get(self.bteUrl + match['href'])
listBooksSoup = BeautifulSoup(bteGenMatchContent.text)
listBooks = listBooksSoup.find_all("tr")
listBooks.pop(0) #pop header
for book in listBooks:
#volume_number = book.td.find_next("td").string
volume_number = book.td.next_sibling.next_sibling
volume_name = volume_number.next_sibling.next_sibling
volume_text = "Volume"
if volume_number.string:
volume_text+= " {}".format(volume_number.string)
if volume_name.string:
volume_text+= " {}".format(volume_name.string)
listLinks.append("{0}: {1}{2}".format(volume_text, self.bteUrl, book.td.a['href']))
if listLinks: self.result[match.string] = listLinks
return any(listLinks)
def print_findings(self):
for key, value in self.result.items():
print(key)
for val in value:
print(val)
|
Add baka-tsuki epub generator parser
|
Add baka-tsuki epub generator parser
|
Python
|
mit
|
DoumanAsh/collectionScripts,DoumanAsh/collectionScripts,DoumanAsh/collectionScripts
|
Add baka-tsuki epub generator parser
|
#Parser of Baka-Tsuki ePUB generator page
#python3
import requests, re
from bs4 import BeautifulSoup
class bte_gen(BeautifulSoup):
def __init__(self, query):
self.query = query
self.result = dict()
self.bteUrl = "http://ln.m-chan.org/v3/"
self.bteGenHead = requests.get(self.bteUrl)
self.bteGenHead.raise_for_status()
super().__init__(self.bteGenHead.text)
self.matches = self.find_all("a", text=re.compile(query, re.IGNORECASE))
def perform_search(self):
for match in self.matches:
listLinks = []
bteGenMatchContent = requests.get(self.bteUrl + match['href'])
listBooksSoup = BeautifulSoup(bteGenMatchContent.text)
listBooks = listBooksSoup.find_all("tr")
listBooks.pop(0) #pop header
for book in listBooks:
#volume_number = book.td.find_next("td").string
volume_number = book.td.next_sibling.next_sibling
volume_name = volume_number.next_sibling.next_sibling
volume_text = "Volume"
if volume_number.string:
volume_text+= " {}".format(volume_number.string)
if volume_name.string:
volume_text+= " {}".format(volume_name.string)
listLinks.append("{0}: {1}{2}".format(volume_text, self.bteUrl, book.td.a['href']))
if listLinks: self.result[match.string] = listLinks
return any(listLinks)
def print_findings(self):
for key, value in self.result.items():
print(key)
for val in value:
print(val)
|
<commit_before><commit_msg>Add baka-tsuki epub generator parser<commit_after>
|
#Parser of Baka-Tsuki ePUB generator page
#python3
import requests, re
from bs4 import BeautifulSoup
class bte_gen(BeautifulSoup):
def __init__(self, query):
self.query = query
self.result = dict()
self.bteUrl = "http://ln.m-chan.org/v3/"
self.bteGenHead = requests.get(self.bteUrl)
self.bteGenHead.raise_for_status()
super().__init__(self.bteGenHead.text)
self.matches = self.find_all("a", text=re.compile(query, re.IGNORECASE))
def perform_search(self):
for match in self.matches:
listLinks = []
bteGenMatchContent = requests.get(self.bteUrl + match['href'])
listBooksSoup = BeautifulSoup(bteGenMatchContent.text)
listBooks = listBooksSoup.find_all("tr")
listBooks.pop(0) #pop header
for book in listBooks:
#volume_number = book.td.find_next("td").string
volume_number = book.td.next_sibling.next_sibling
volume_name = volume_number.next_sibling.next_sibling
volume_text = "Volume"
if volume_number.string:
volume_text+= " {}".format(volume_number.string)
if volume_name.string:
volume_text+= " {}".format(volume_name.string)
listLinks.append("{0}: {1}{2}".format(volume_text, self.bteUrl, book.td.a['href']))
if listLinks: self.result[match.string] = listLinks
return any(listLinks)
def print_findings(self):
for key, value in self.result.items():
print(key)
for val in value:
print(val)
|
Add baka-tsuki epub generator parser#Parser of Baka-Tsuki ePUB generator page
#python3
import requests, re
from bs4 import BeautifulSoup
class bte_gen(BeautifulSoup):
def __init__(self, query):
self.query = query
self.result = dict()
self.bteUrl = "http://ln.m-chan.org/v3/"
self.bteGenHead = requests.get(self.bteUrl)
self.bteGenHead.raise_for_status()
super().__init__(self.bteGenHead.text)
self.matches = self.find_all("a", text=re.compile(query, re.IGNORECASE))
def perform_search(self):
for match in self.matches:
listLinks = []
bteGenMatchContent = requests.get(self.bteUrl + match['href'])
listBooksSoup = BeautifulSoup(bteGenMatchContent.text)
listBooks = listBooksSoup.find_all("tr")
listBooks.pop(0) #pop header
for book in listBooks:
#volume_number = book.td.find_next("td").string
volume_number = book.td.next_sibling.next_sibling
volume_name = volume_number.next_sibling.next_sibling
volume_text = "Volume"
if volume_number.string:
volume_text+= " {}".format(volume_number.string)
if volume_name.string:
volume_text+= " {}".format(volume_name.string)
listLinks.append("{0}: {1}{2}".format(volume_text, self.bteUrl, book.td.a['href']))
if listLinks: self.result[match.string] = listLinks
return any(listLinks)
def print_findings(self):
for key, value in self.result.items():
print(key)
for val in value:
print(val)
|
<commit_before><commit_msg>Add baka-tsuki epub generator parser<commit_after>#Parser of Baka-Tsuki ePUB generator page
#python3
import requests, re
from bs4 import BeautifulSoup
class bte_gen(BeautifulSoup):
def __init__(self, query):
self.query = query
self.result = dict()
self.bteUrl = "http://ln.m-chan.org/v3/"
self.bteGenHead = requests.get(self.bteUrl)
self.bteGenHead.raise_for_status()
super().__init__(self.bteGenHead.text)
self.matches = self.find_all("a", text=re.compile(query, re.IGNORECASE))
def perform_search(self):
for match in self.matches:
listLinks = []
bteGenMatchContent = requests.get(self.bteUrl + match['href'])
listBooksSoup = BeautifulSoup(bteGenMatchContent.text)
listBooks = listBooksSoup.find_all("tr")
listBooks.pop(0) #pop header
for book in listBooks:
#volume_number = book.td.find_next("td").string
volume_number = book.td.next_sibling.next_sibling
volume_name = volume_number.next_sibling.next_sibling
volume_text = "Volume"
if volume_number.string:
volume_text+= " {}".format(volume_number.string)
if volume_name.string:
volume_text+= " {}".format(volume_name.string)
listLinks.append("{0}: {1}{2}".format(volume_text, self.bteUrl, book.td.a['href']))
if listLinks: self.result[match.string] = listLinks
return any(listLinks)
def print_findings(self):
for key, value in self.result.items():
print(key)
for val in value:
print(val)
|
|
c9a97f15da4963884d13e654f901b331dae7886a
|
sqlobject/tests/test_class_hash.py
|
sqlobject/tests/test_class_hash.py
|
from sqlobject import *
from sqlobject.tests.dbtest import *
########################################
# Test hashing a column instance
########################################
class ClassHashTest(SQLObject):
name = StringCol(length=50, alternateID=True, dbName='name_col')
def test_class_hash():
setupClass(ClassHashTest)
ClassHashTest(name='bob')
conn = ClassHashTest._connection
b = ClassHashTest.byName('bob')
hashed = hash(b)
b.expire()
b = ClassHashTest.byName('bob')
assert hash(b) == hashed
|
Add simple test case for hash implementation
|
Add simple test case for hash implementation
|
Python
|
lgpl-2.1
|
drnlm/sqlobject,drnlm/sqlobject,sqlobject/sqlobject,sqlobject/sqlobject
|
Add simple test case for hash implementation
|
from sqlobject import *
from sqlobject.tests.dbtest import *
########################################
# Test hashing a column instance
########################################
class ClassHashTest(SQLObject):
name = StringCol(length=50, alternateID=True, dbName='name_col')
def test_class_hash():
setupClass(ClassHashTest)
ClassHashTest(name='bob')
conn = ClassHashTest._connection
b = ClassHashTest.byName('bob')
hashed = hash(b)
b.expire()
b = ClassHashTest.byName('bob')
assert hash(b) == hashed
|
<commit_before><commit_msg>Add simple test case for hash implementation<commit_after>
|
from sqlobject import *
from sqlobject.tests.dbtest import *
########################################
# Test hashing a column instance
########################################
class ClassHashTest(SQLObject):
name = StringCol(length=50, alternateID=True, dbName='name_col')
def test_class_hash():
setupClass(ClassHashTest)
ClassHashTest(name='bob')
conn = ClassHashTest._connection
b = ClassHashTest.byName('bob')
hashed = hash(b)
b.expire()
b = ClassHashTest.byName('bob')
assert hash(b) == hashed
|
Add simple test case for hash implementationfrom sqlobject import *
from sqlobject.tests.dbtest import *
########################################
# Test hashing a column instance
########################################
class ClassHashTest(SQLObject):
name = StringCol(length=50, alternateID=True, dbName='name_col')
def test_class_hash():
setupClass(ClassHashTest)
ClassHashTest(name='bob')
conn = ClassHashTest._connection
b = ClassHashTest.byName('bob')
hashed = hash(b)
b.expire()
b = ClassHashTest.byName('bob')
assert hash(b) == hashed
|
<commit_before><commit_msg>Add simple test case for hash implementation<commit_after>from sqlobject import *
from sqlobject.tests.dbtest import *
########################################
# Test hashing a column instance
########################################
class ClassHashTest(SQLObject):
name = StringCol(length=50, alternateID=True, dbName='name_col')
def test_class_hash():
setupClass(ClassHashTest)
ClassHashTest(name='bob')
conn = ClassHashTest._connection
b = ClassHashTest.byName('bob')
hashed = hash(b)
b.expire()
b = ClassHashTest.byName('bob')
assert hash(b) == hashed
|
|
93fa68985c211d43098b60a5b6409db8ae29c3de
|
aiozk/test/test_treecache.py
|
aiozk/test/test_treecache.py
|
import asyncio
import uuid
from .base import ZKBase
from ..exc import NoNode
class TestTreeCache(ZKBase):
async def setUp(self):
await super().setUp()
for attrname in ['basenode', 'node1', 'node2', 'subnode1', 'subnode2', 'subnode3']:
setattr(self, attrname, uuid.uuid4().hex)
for attrname in ['data1', 'data2', 'data3']:
setattr(self, attrname, uuid.uuid4().hex.encode())
self.basepath = '/{}'.format(self.basenode)
self.path1 = '{}/{}'.format(self.basepath, self.node1)
self.path2 = '{}/{}'.format(self.basepath, self.node2)
self.subpath1 = '{}/{}'.format(self.path2, self.subnode1)
self.subpath2 = '{}/{}'.format(self.path2, self.subnode2)
self.subpath3 = '{}/{}'.format(self.subpath1, self.subnode3)
await self.c.create(self.basepath)
await self.c.create(self.path1, self.data1)
await self.c.create(self.path2)
await self.c.create(self.subpath1, self.data2)
await self.c.create(self.subpath2)
await self.c.create(self.subpath3, self.data3)
async def tearDown(self):
await self.c.deleteall(self.basepath)
await super().tearDown()
async def test_cache(self):
cache = self.c.recipes.TreeCache(self.basenode)
cache.set_client(self.c)
await cache.start()
expected = {
self.node1: self.data1,
self.node2: {
self.subnode1: {
self.subnode3: self.data3
},
self.subnode2: None
}
}
self.assertDictEqual(cache.as_dict(), expected)
# we can't see this one in the dict:
assert getattr(getattr(cache.root, self.node2), self.subnode1).value == self.data2
newnode = uuid.uuid4().hex
newdata = [uuid.uuid4().hex.encode() for i in range(3)]
await self.c.create('{}/{}'.format(self.basepath, newnode), newdata[0]) # add node
await self.c.set_data(self.path1, newdata[1]) # change data
await self.c.set_data(self.subpath2, newdata[2]) # set data
await self.c.delete(self.subpath3) # delete node
await asyncio.sleep(0.1)
expected[newnode] = newdata[0]
expected[self.node1] = newdata[1]
expected[self.node2][self.subnode2] = newdata[2]
expected[self.node2][self.subnode1] = self.data2 # this one is now exposed
self.assertDictEqual(cache.as_dict(), expected)
await cache.stop()
|
Test for the TreeCache recipe
|
Test for the TreeCache recipe
|
Python
|
mit
|
tipsi/aiozk,tipsi/aiozk
|
Test for the TreeCache recipe
|
import asyncio
import uuid
from .base import ZKBase
from ..exc import NoNode
class TestTreeCache(ZKBase):
async def setUp(self):
await super().setUp()
for attrname in ['basenode', 'node1', 'node2', 'subnode1', 'subnode2', 'subnode3']:
setattr(self, attrname, uuid.uuid4().hex)
for attrname in ['data1', 'data2', 'data3']:
setattr(self, attrname, uuid.uuid4().hex.encode())
self.basepath = '/{}'.format(self.basenode)
self.path1 = '{}/{}'.format(self.basepath, self.node1)
self.path2 = '{}/{}'.format(self.basepath, self.node2)
self.subpath1 = '{}/{}'.format(self.path2, self.subnode1)
self.subpath2 = '{}/{}'.format(self.path2, self.subnode2)
self.subpath3 = '{}/{}'.format(self.subpath1, self.subnode3)
await self.c.create(self.basepath)
await self.c.create(self.path1, self.data1)
await self.c.create(self.path2)
await self.c.create(self.subpath1, self.data2)
await self.c.create(self.subpath2)
await self.c.create(self.subpath3, self.data3)
async def tearDown(self):
await self.c.deleteall(self.basepath)
await super().tearDown()
async def test_cache(self):
cache = self.c.recipes.TreeCache(self.basenode)
cache.set_client(self.c)
await cache.start()
expected = {
self.node1: self.data1,
self.node2: {
self.subnode1: {
self.subnode3: self.data3
},
self.subnode2: None
}
}
self.assertDictEqual(cache.as_dict(), expected)
# we can't see this one in the dict:
assert getattr(getattr(cache.root, self.node2), self.subnode1).value == self.data2
newnode = uuid.uuid4().hex
newdata = [uuid.uuid4().hex.encode() for i in range(3)]
await self.c.create('{}/{}'.format(self.basepath, newnode), newdata[0]) # add node
await self.c.set_data(self.path1, newdata[1]) # change data
await self.c.set_data(self.subpath2, newdata[2]) # set data
await self.c.delete(self.subpath3) # delete node
await asyncio.sleep(0.1)
expected[newnode] = newdata[0]
expected[self.node1] = newdata[1]
expected[self.node2][self.subnode2] = newdata[2]
expected[self.node2][self.subnode1] = self.data2 # this one is now exposed
self.assertDictEqual(cache.as_dict(), expected)
await cache.stop()
|
<commit_before><commit_msg>Test for the TreeCache recipe<commit_after>
|
import asyncio
import uuid
from .base import ZKBase
from ..exc import NoNode
class TestTreeCache(ZKBase):
async def setUp(self):
await super().setUp()
for attrname in ['basenode', 'node1', 'node2', 'subnode1', 'subnode2', 'subnode3']:
setattr(self, attrname, uuid.uuid4().hex)
for attrname in ['data1', 'data2', 'data3']:
setattr(self, attrname, uuid.uuid4().hex.encode())
self.basepath = '/{}'.format(self.basenode)
self.path1 = '{}/{}'.format(self.basepath, self.node1)
self.path2 = '{}/{}'.format(self.basepath, self.node2)
self.subpath1 = '{}/{}'.format(self.path2, self.subnode1)
self.subpath2 = '{}/{}'.format(self.path2, self.subnode2)
self.subpath3 = '{}/{}'.format(self.subpath1, self.subnode3)
await self.c.create(self.basepath)
await self.c.create(self.path1, self.data1)
await self.c.create(self.path2)
await self.c.create(self.subpath1, self.data2)
await self.c.create(self.subpath2)
await self.c.create(self.subpath3, self.data3)
async def tearDown(self):
await self.c.deleteall(self.basepath)
await super().tearDown()
async def test_cache(self):
cache = self.c.recipes.TreeCache(self.basenode)
cache.set_client(self.c)
await cache.start()
expected = {
self.node1: self.data1,
self.node2: {
self.subnode1: {
self.subnode3: self.data3
},
self.subnode2: None
}
}
self.assertDictEqual(cache.as_dict(), expected)
# we can't see this one in the dict:
assert getattr(getattr(cache.root, self.node2), self.subnode1).value == self.data2
newnode = uuid.uuid4().hex
newdata = [uuid.uuid4().hex.encode() for i in range(3)]
await self.c.create('{}/{}'.format(self.basepath, newnode), newdata[0]) # add node
await self.c.set_data(self.path1, newdata[1]) # change data
await self.c.set_data(self.subpath2, newdata[2]) # set data
await self.c.delete(self.subpath3) # delete node
await asyncio.sleep(0.1)
expected[newnode] = newdata[0]
expected[self.node1] = newdata[1]
expected[self.node2][self.subnode2] = newdata[2]
expected[self.node2][self.subnode1] = self.data2 # this one is now exposed
self.assertDictEqual(cache.as_dict(), expected)
await cache.stop()
|
Test for the TreeCache recipeimport asyncio
import uuid
from .base import ZKBase
from ..exc import NoNode
class TestTreeCache(ZKBase):
async def setUp(self):
await super().setUp()
for attrname in ['basenode', 'node1', 'node2', 'subnode1', 'subnode2', 'subnode3']:
setattr(self, attrname, uuid.uuid4().hex)
for attrname in ['data1', 'data2', 'data3']:
setattr(self, attrname, uuid.uuid4().hex.encode())
self.basepath = '/{}'.format(self.basenode)
self.path1 = '{}/{}'.format(self.basepath, self.node1)
self.path2 = '{}/{}'.format(self.basepath, self.node2)
self.subpath1 = '{}/{}'.format(self.path2, self.subnode1)
self.subpath2 = '{}/{}'.format(self.path2, self.subnode2)
self.subpath3 = '{}/{}'.format(self.subpath1, self.subnode3)
await self.c.create(self.basepath)
await self.c.create(self.path1, self.data1)
await self.c.create(self.path2)
await self.c.create(self.subpath1, self.data2)
await self.c.create(self.subpath2)
await self.c.create(self.subpath3, self.data3)
async def tearDown(self):
await self.c.deleteall(self.basepath)
await super().tearDown()
async def test_cache(self):
cache = self.c.recipes.TreeCache(self.basenode)
cache.set_client(self.c)
await cache.start()
expected = {
self.node1: self.data1,
self.node2: {
self.subnode1: {
self.subnode3: self.data3
},
self.subnode2: None
}
}
self.assertDictEqual(cache.as_dict(), expected)
# we can't see this one in the dict:
assert getattr(getattr(cache.root, self.node2), self.subnode1).value == self.data2
newnode = uuid.uuid4().hex
newdata = [uuid.uuid4().hex.encode() for i in range(3)]
await self.c.create('{}/{}'.format(self.basepath, newnode), newdata[0]) # add node
await self.c.set_data(self.path1, newdata[1]) # change data
await self.c.set_data(self.subpath2, newdata[2]) # set data
await self.c.delete(self.subpath3) # delete node
await asyncio.sleep(0.1)
expected[newnode] = newdata[0]
expected[self.node1] = newdata[1]
expected[self.node2][self.subnode2] = newdata[2]
expected[self.node2][self.subnode1] = self.data2 # this one is now exposed
self.assertDictEqual(cache.as_dict(), expected)
await cache.stop()
|
<commit_before><commit_msg>Test for the TreeCache recipe<commit_after>import asyncio
import uuid
from .base import ZKBase
from ..exc import NoNode
class TestTreeCache(ZKBase):
async def setUp(self):
await super().setUp()
for attrname in ['basenode', 'node1', 'node2', 'subnode1', 'subnode2', 'subnode3']:
setattr(self, attrname, uuid.uuid4().hex)
for attrname in ['data1', 'data2', 'data3']:
setattr(self, attrname, uuid.uuid4().hex.encode())
self.basepath = '/{}'.format(self.basenode)
self.path1 = '{}/{}'.format(self.basepath, self.node1)
self.path2 = '{}/{}'.format(self.basepath, self.node2)
self.subpath1 = '{}/{}'.format(self.path2, self.subnode1)
self.subpath2 = '{}/{}'.format(self.path2, self.subnode2)
self.subpath3 = '{}/{}'.format(self.subpath1, self.subnode3)
await self.c.create(self.basepath)
await self.c.create(self.path1, self.data1)
await self.c.create(self.path2)
await self.c.create(self.subpath1, self.data2)
await self.c.create(self.subpath2)
await self.c.create(self.subpath3, self.data3)
async def tearDown(self):
await self.c.deleteall(self.basepath)
await super().tearDown()
async def test_cache(self):
cache = self.c.recipes.TreeCache(self.basenode)
cache.set_client(self.c)
await cache.start()
expected = {
self.node1: self.data1,
self.node2: {
self.subnode1: {
self.subnode3: self.data3
},
self.subnode2: None
}
}
self.assertDictEqual(cache.as_dict(), expected)
# we can't see this one in the dict:
assert getattr(getattr(cache.root, self.node2), self.subnode1).value == self.data2
newnode = uuid.uuid4().hex
newdata = [uuid.uuid4().hex.encode() for i in range(3)]
await self.c.create('{}/{}'.format(self.basepath, newnode), newdata[0]) # add node
await self.c.set_data(self.path1, newdata[1]) # change data
await self.c.set_data(self.subpath2, newdata[2]) # set data
await self.c.delete(self.subpath3) # delete node
await asyncio.sleep(0.1)
expected[newnode] = newdata[0]
expected[self.node1] = newdata[1]
expected[self.node2][self.subnode2] = newdata[2]
expected[self.node2][self.subnode1] = self.data2 # this one is now exposed
self.assertDictEqual(cache.as_dict(), expected)
await cache.stop()
|
|
7369f9374c11f28853cba4ecdd351c88d7e23d79
|
nessusapi/utils.py
|
nessusapi/utils.py
|
import inspect
def multiton(cls):
"""
Class decorator to make a class a multiton.
That is, there will be only (at most) one object existing for a given set
of initialization parameters.
"""
instances = {}
def getinstance(*args, **kwargs):
key = _gen_key(cls, *args, **kwargs)
if key not in instances:
instances[key] = cls(*args, **kwargs)
return instances[key]
return getinstance
kwd_mark = (object(), ) # seperate args and kwargs with a unique object
def _gen_key(cls, *args, **kwargs):
new_args, new_kwargs = _normalize_args(cls.__init__, *args, **kwargs)
key = new_args
if new_kwargs:
key += kwd_mark
sorted_items = sorted(new_kwargs.items())
for item in sorted_items:
key += item
return tuple(key)
def _normalize_args(func, *args, **kwargs):
try:
arg_names, _, _, arg_defaults = inspect.getargspec(func)
except AttributeError: # cls has no __init__
arg_names = ['self']
arg_defaults = ()
arg_names = arg_names[1:] # skip first arg (self)
if arg_defaults is None:
arg_defaults = ()
new_args = []
new_kwargs = {}
# match named args to names
for name, arg in zip(arg_names, args):
new_kwargs[name] = arg
# handle extra args from *
if len(args) > len(arg_names):
for arg in args[len(arg_names):]:
new_args.append(arg)
# or fill in default values
else:
for name, default in zip(arg_names[len(args):], arg_defaults):
new_kwargs[name] = default
# merge remaining **kwargs
new_kwargs.update(kwargs)
return new_args, new_kwargs
"""@multiton
class A():
def __init__(self, a, b=10, *args, **kwargs):
self.a = a
self.b = b
A(1)
A(1, 10)
A(1, b=10)
A(1, 10, 11, 12, 13)
A(1, 10, 11, 12, 13, d=14, e=15, f=16)
A(1, b=10, d=14, e=15, f=16)"""
|
Add util class: the multiton
|
Add util class: the multiton
|
Python
|
mit
|
sait-berkeley-infosec/pynessus-api
|
Add util class: the multiton
|
import inspect
def multiton(cls):
"""
Class decorator to make a class a multiton.
That is, there will be only (at most) one object existing for a given set
of initialization parameters.
"""
instances = {}
def getinstance(*args, **kwargs):
key = _gen_key(cls, *args, **kwargs)
if key not in instances:
instances[key] = cls(*args, **kwargs)
return instances[key]
return getinstance
kwd_mark = (object(), ) # seperate args and kwargs with a unique object
def _gen_key(cls, *args, **kwargs):
new_args, new_kwargs = _normalize_args(cls.__init__, *args, **kwargs)
key = new_args
if new_kwargs:
key += kwd_mark
sorted_items = sorted(new_kwargs.items())
for item in sorted_items:
key += item
return tuple(key)
def _normalize_args(func, *args, **kwargs):
try:
arg_names, _, _, arg_defaults = inspect.getargspec(func)
except AttributeError: # cls has no __init__
arg_names = ['self']
arg_defaults = ()
arg_names = arg_names[1:] # skip first arg (self)
if arg_defaults is None:
arg_defaults = ()
new_args = []
new_kwargs = {}
# match named args to names
for name, arg in zip(arg_names, args):
new_kwargs[name] = arg
# handle extra args from *
if len(args) > len(arg_names):
for arg in args[len(arg_names):]:
new_args.append(arg)
# or fill in default values
else:
for name, default in zip(arg_names[len(args):], arg_defaults):
new_kwargs[name] = default
# merge remaining **kwargs
new_kwargs.update(kwargs)
return new_args, new_kwargs
"""@multiton
class A():
def __init__(self, a, b=10, *args, **kwargs):
self.a = a
self.b = b
A(1)
A(1, 10)
A(1, b=10)
A(1, 10, 11, 12, 13)
A(1, 10, 11, 12, 13, d=14, e=15, f=16)
A(1, b=10, d=14, e=15, f=16)"""
|
<commit_before><commit_msg>Add util class: the multiton<commit_after>
|
import inspect
def multiton(cls):
"""
Class decorator to make a class a multiton.
That is, there will be only (at most) one object existing for a given set
of initialization parameters.
"""
instances = {}
def getinstance(*args, **kwargs):
key = _gen_key(cls, *args, **kwargs)
if key not in instances:
instances[key] = cls(*args, **kwargs)
return instances[key]
return getinstance
kwd_mark = (object(), ) # seperate args and kwargs with a unique object
def _gen_key(cls, *args, **kwargs):
new_args, new_kwargs = _normalize_args(cls.__init__, *args, **kwargs)
key = new_args
if new_kwargs:
key += kwd_mark
sorted_items = sorted(new_kwargs.items())
for item in sorted_items:
key += item
return tuple(key)
def _normalize_args(func, *args, **kwargs):
try:
arg_names, _, _, arg_defaults = inspect.getargspec(func)
except AttributeError: # cls has no __init__
arg_names = ['self']
arg_defaults = ()
arg_names = arg_names[1:] # skip first arg (self)
if arg_defaults is None:
arg_defaults = ()
new_args = []
new_kwargs = {}
# match named args to names
for name, arg in zip(arg_names, args):
new_kwargs[name] = arg
# handle extra args from *
if len(args) > len(arg_names):
for arg in args[len(arg_names):]:
new_args.append(arg)
# or fill in default values
else:
for name, default in zip(arg_names[len(args):], arg_defaults):
new_kwargs[name] = default
# merge remaining **kwargs
new_kwargs.update(kwargs)
return new_args, new_kwargs
"""@multiton
class A():
def __init__(self, a, b=10, *args, **kwargs):
self.a = a
self.b = b
A(1)
A(1, 10)
A(1, b=10)
A(1, 10, 11, 12, 13)
A(1, 10, 11, 12, 13, d=14, e=15, f=16)
A(1, b=10, d=14, e=15, f=16)"""
|
Add util class: the multitonimport inspect
def multiton(cls):
"""
Class decorator to make a class a multiton.
That is, there will be only (at most) one object existing for a given set
of initialization parameters.
"""
instances = {}
def getinstance(*args, **kwargs):
key = _gen_key(cls, *args, **kwargs)
if key not in instances:
instances[key] = cls(*args, **kwargs)
return instances[key]
return getinstance
kwd_mark = (object(), ) # seperate args and kwargs with a unique object
def _gen_key(cls, *args, **kwargs):
new_args, new_kwargs = _normalize_args(cls.__init__, *args, **kwargs)
key = new_args
if new_kwargs:
key += kwd_mark
sorted_items = sorted(new_kwargs.items())
for item in sorted_items:
key += item
return tuple(key)
def _normalize_args(func, *args, **kwargs):
try:
arg_names, _, _, arg_defaults = inspect.getargspec(func)
except AttributeError: # cls has no __init__
arg_names = ['self']
arg_defaults = ()
arg_names = arg_names[1:] # skip first arg (self)
if arg_defaults is None:
arg_defaults = ()
new_args = []
new_kwargs = {}
# match named args to names
for name, arg in zip(arg_names, args):
new_kwargs[name] = arg
# handle extra args from *
if len(args) > len(arg_names):
for arg in args[len(arg_names):]:
new_args.append(arg)
# or fill in default values
else:
for name, default in zip(arg_names[len(args):], arg_defaults):
new_kwargs[name] = default
# merge remaining **kwargs
new_kwargs.update(kwargs)
return new_args, new_kwargs
"""@multiton
class A():
def __init__(self, a, b=10, *args, **kwargs):
self.a = a
self.b = b
A(1)
A(1, 10)
A(1, b=10)
A(1, 10, 11, 12, 13)
A(1, 10, 11, 12, 13, d=14, e=15, f=16)
A(1, b=10, d=14, e=15, f=16)"""
|
<commit_before><commit_msg>Add util class: the multiton<commit_after>import inspect
def multiton(cls):
"""
Class decorator to make a class a multiton.
That is, there will be only (at most) one object existing for a given set
of initialization parameters.
"""
instances = {}
def getinstance(*args, **kwargs):
key = _gen_key(cls, *args, **kwargs)
if key not in instances:
instances[key] = cls(*args, **kwargs)
return instances[key]
return getinstance
kwd_mark = (object(), ) # seperate args and kwargs with a unique object
def _gen_key(cls, *args, **kwargs):
new_args, new_kwargs = _normalize_args(cls.__init__, *args, **kwargs)
key = new_args
if new_kwargs:
key += kwd_mark
sorted_items = sorted(new_kwargs.items())
for item in sorted_items:
key += item
return tuple(key)
def _normalize_args(func, *args, **kwargs):
try:
arg_names, _, _, arg_defaults = inspect.getargspec(func)
except AttributeError: # cls has no __init__
arg_names = ['self']
arg_defaults = ()
arg_names = arg_names[1:] # skip first arg (self)
if arg_defaults is None:
arg_defaults = ()
new_args = []
new_kwargs = {}
# match named args to names
for name, arg in zip(arg_names, args):
new_kwargs[name] = arg
# handle extra args from *
if len(args) > len(arg_names):
for arg in args[len(arg_names):]:
new_args.append(arg)
# or fill in default values
else:
for name, default in zip(arg_names[len(args):], arg_defaults):
new_kwargs[name] = default
# merge remaining **kwargs
new_kwargs.update(kwargs)
return new_args, new_kwargs
"""@multiton
class A():
def __init__(self, a, b=10, *args, **kwargs):
self.a = a
self.b = b
A(1)
A(1, 10)
A(1, b=10)
A(1, 10, 11, 12, 13)
A(1, 10, 11, 12, 13, d=14, e=15, f=16)
A(1, b=10, d=14, e=15, f=16)"""
|
|
5d182ac50d87c3d46d5419f449b7c9db7d9b2133
|
kb/keyboard.py
|
kb/keyboard.py
|
import abc
from decimal import Decimal
from core import Key, Keyboard
class StandardKeyboard(Keyboard):
""" A StandardKeyboard is a keyboard with standard Cherry MX key sizes and spacings. (see: http://www.fentek-ind.com/images/CHERRY_MX_keyswitch.pdf)
"""
def __init__(self):
self.unit_height = 19.05
self.unit_width = 19.05
self.separator = '|'
self.comment = '-'
@property
def keys(self):
keys = set()
offset_y = Decimal(0)
for line in self.schema.splitlines():
# don't process lines starting with the comment prefix
if line.startswith(self.comment):
continue
offset_x = Decimal(0)
for key in line.strip(self.separator).split(self.separator):
keys.add(Key(x=offset_x, y=offset_y))
unit = len(key) * 0.25 + 1
offset_x = offset_x.add(Decimal(self.unit_width * unit))
offset_y = offset_y.add(Decimal(self.unit_height))
return keys
@abc.abstractproperty
def schema(self):
pass
|
Implement parsing in StandardKeyboard class
|
Implement parsing in StandardKeyboard class
|
Python
|
mit
|
Cyanogenoid/kb-project
|
Implement parsing in StandardKeyboard class
|
import abc
from decimal import Decimal
from core import Key, Keyboard
class StandardKeyboard(Keyboard):
""" A StandardKeyboard is a keyboard with standard Cherry MX key sizes and spacings. (see: http://www.fentek-ind.com/images/CHERRY_MX_keyswitch.pdf)
"""
def __init__(self):
self.unit_height = 19.05
self.unit_width = 19.05
self.separator = '|'
self.comment = '-'
@property
def keys(self):
keys = set()
offset_y = Decimal(0)
for line in self.schema.splitlines():
# don't process lines starting with the comment prefix
if line.startswith(self.comment):
continue
offset_x = Decimal(0)
for key in line.strip(self.separator).split(self.separator):
keys.add(Key(x=offset_x, y=offset_y))
unit = len(key) * 0.25 + 1
offset_x = offset_x.add(Decimal(self.unit_width * unit))
offset_y = offset_y.add(Decimal(self.unit_height))
return keys
@abc.abstractproperty
def schema(self):
pass
|
<commit_before><commit_msg>Implement parsing in StandardKeyboard class<commit_after>
|
import abc
from decimal import Decimal
from core import Key, Keyboard
class StandardKeyboard(Keyboard):
""" A StandardKeyboard is a keyboard with standard Cherry MX key sizes and spacings. (see: http://www.fentek-ind.com/images/CHERRY_MX_keyswitch.pdf)
"""
def __init__(self):
self.unit_height = 19.05
self.unit_width = 19.05
self.separator = '|'
self.comment = '-'
@property
def keys(self):
keys = set()
offset_y = Decimal(0)
for line in self.schema.splitlines():
# don't process lines starting with the comment prefix
if line.startswith(self.comment):
continue
offset_x = Decimal(0)
for key in line.strip(self.separator).split(self.separator):
keys.add(Key(x=offset_x, y=offset_y))
unit = len(key) * 0.25 + 1
offset_x = offset_x.add(Decimal(self.unit_width * unit))
offset_y = offset_y.add(Decimal(self.unit_height))
return keys
@abc.abstractproperty
def schema(self):
pass
|
Implement parsing in StandardKeyboard classimport abc
from decimal import Decimal
from core import Key, Keyboard
class StandardKeyboard(Keyboard):
""" A StandardKeyboard is a keyboard with standard Cherry MX key sizes and spacings. (see: http://www.fentek-ind.com/images/CHERRY_MX_keyswitch.pdf)
"""
def __init__(self):
self.unit_height = 19.05
self.unit_width = 19.05
self.separator = '|'
self.comment = '-'
@property
def keys(self):
keys = set()
offset_y = Decimal(0)
for line in self.schema.splitlines():
# don't process lines starting with the comment prefix
if line.startswith(self.comment):
continue
offset_x = Decimal(0)
for key in line.strip(self.separator).split(self.separator):
keys.add(Key(x=offset_x, y=offset_y))
unit = len(key) * 0.25 + 1
offset_x = offset_x.add(Decimal(self.unit_width * unit))
offset_y = offset_y.add(Decimal(self.unit_height))
return keys
@abc.abstractproperty
def schema(self):
pass
|
<commit_before><commit_msg>Implement parsing in StandardKeyboard class<commit_after>import abc
from decimal import Decimal
from core import Key, Keyboard
class StandardKeyboard(Keyboard):
""" A StandardKeyboard is a keyboard with standard Cherry MX key sizes and spacings. (see: http://www.fentek-ind.com/images/CHERRY_MX_keyswitch.pdf)
"""
def __init__(self):
self.unit_height = 19.05
self.unit_width = 19.05
self.separator = '|'
self.comment = '-'
@property
def keys(self):
keys = set()
offset_y = Decimal(0)
for line in self.schema.splitlines():
# don't process lines starting with the comment prefix
if line.startswith(self.comment):
continue
offset_x = Decimal(0)
for key in line.strip(self.separator).split(self.separator):
keys.add(Key(x=offset_x, y=offset_y))
unit = len(key) * 0.25 + 1
offset_x = offset_x.add(Decimal(self.unit_width * unit))
offset_y = offset_y.add(Decimal(self.unit_height))
return keys
@abc.abstractproperty
def schema(self):
pass
|
|
99b27c037f8072f027ef025d6c4940093ad2c006
|
tests/unit/modules/test_win_file.py
|
tests/unit/modules/test_win_file.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Shane Lee <slee@saltstack.com>`
'''
# Import Python Libs
from __future__ import absolute_import
import os
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.win_file as win_file
from salt.exceptions import CommandExecutionError
@skipIf(NO_MOCK, NO_MOCK_REASON)
class WinFileTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.win_file
'''
FAKE_RET = {'fake': 'ret data'}
FAKE_PATH = os.sep.join(['C:', 'path', 'does', 'not', 'exist'])
def setup_loader_modules(self):
return {win_file: {}}
def test_issue_43328_stats(self):
'''
Make sure that an empty dictionary is returned if the file doesn't exist
'''
with patch('os.path.exists', return_value=False):
ret = win_file.stats(self.FAKE_PATH)
self.assertEqual(ret, {})
def test_issue_43328_check_perms_ret_passed(self):
'''
Make sure that ret is returned if the file doesn't exist and ret is
passed
'''
with patch('os.path.exists', return_value=False):
ret = win_file.check_perms(self.FAKE_PATH, ret=self.FAKE_RET)
self.assertEqual(ret, self.FAKE_RET)
def test_issue_43328_check_perms_no_ret(self):
'''
Make sure that a CommandExecutionError is raised if the file doesn't
exist and ret is NOT passed
'''
with patch('os.path.exists', return_value=False):
self.assertRaises(
CommandExecutionError, win_file.check_perms, self.FAKE_PATH)
|
Add tests to avoid future regression
|
Add tests to avoid future regression
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add tests to avoid future regression
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Shane Lee <slee@saltstack.com>`
'''
# Import Python Libs
from __future__ import absolute_import
import os
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.win_file as win_file
from salt.exceptions import CommandExecutionError
@skipIf(NO_MOCK, NO_MOCK_REASON)
class WinFileTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.win_file
'''
FAKE_RET = {'fake': 'ret data'}
FAKE_PATH = os.sep.join(['C:', 'path', 'does', 'not', 'exist'])
def setup_loader_modules(self):
return {win_file: {}}
def test_issue_43328_stats(self):
'''
Make sure that an empty dictionary is returned if the file doesn't exist
'''
with patch('os.path.exists', return_value=False):
ret = win_file.stats(self.FAKE_PATH)
self.assertEqual(ret, {})
def test_issue_43328_check_perms_ret_passed(self):
'''
Make sure that ret is returned if the file doesn't exist and ret is
passed
'''
with patch('os.path.exists', return_value=False):
ret = win_file.check_perms(self.FAKE_PATH, ret=self.FAKE_RET)
self.assertEqual(ret, self.FAKE_RET)
def test_issue_43328_check_perms_no_ret(self):
'''
Make sure that a CommandExecutionError is raised if the file doesn't
exist and ret is NOT passed
'''
with patch('os.path.exists', return_value=False):
self.assertRaises(
CommandExecutionError, win_file.check_perms, self.FAKE_PATH)
|
<commit_before><commit_msg>Add tests to avoid future regression<commit_after>
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Shane Lee <slee@saltstack.com>`
'''
# Import Python Libs
from __future__ import absolute_import
import os
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.win_file as win_file
from salt.exceptions import CommandExecutionError
@skipIf(NO_MOCK, NO_MOCK_REASON)
class WinFileTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.win_file
'''
FAKE_RET = {'fake': 'ret data'}
FAKE_PATH = os.sep.join(['C:', 'path', 'does', 'not', 'exist'])
def setup_loader_modules(self):
return {win_file: {}}
def test_issue_43328_stats(self):
'''
Make sure that an empty dictionary is returned if the file doesn't exist
'''
with patch('os.path.exists', return_value=False):
ret = win_file.stats(self.FAKE_PATH)
self.assertEqual(ret, {})
def test_issue_43328_check_perms_ret_passed(self):
'''
Make sure that ret is returned if the file doesn't exist and ret is
passed
'''
with patch('os.path.exists', return_value=False):
ret = win_file.check_perms(self.FAKE_PATH, ret=self.FAKE_RET)
self.assertEqual(ret, self.FAKE_RET)
def test_issue_43328_check_perms_no_ret(self):
'''
Make sure that a CommandExecutionError is raised if the file doesn't
exist and ret is NOT passed
'''
with patch('os.path.exists', return_value=False):
self.assertRaises(
CommandExecutionError, win_file.check_perms, self.FAKE_PATH)
|
Add tests to avoid future regression# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Shane Lee <slee@saltstack.com>`
'''
# Import Python Libs
from __future__ import absolute_import
import os
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.win_file as win_file
from salt.exceptions import CommandExecutionError
@skipIf(NO_MOCK, NO_MOCK_REASON)
class WinFileTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.win_file
'''
FAKE_RET = {'fake': 'ret data'}
FAKE_PATH = os.sep.join(['C:', 'path', 'does', 'not', 'exist'])
def setup_loader_modules(self):
return {win_file: {}}
def test_issue_43328_stats(self):
'''
Make sure that an empty dictionary is returned if the file doesn't exist
'''
with patch('os.path.exists', return_value=False):
ret = win_file.stats(self.FAKE_PATH)
self.assertEqual(ret, {})
def test_issue_43328_check_perms_ret_passed(self):
'''
Make sure that ret is returned if the file doesn't exist and ret is
passed
'''
with patch('os.path.exists', return_value=False):
ret = win_file.check_perms(self.FAKE_PATH, ret=self.FAKE_RET)
self.assertEqual(ret, self.FAKE_RET)
def test_issue_43328_check_perms_no_ret(self):
'''
Make sure that a CommandExecutionError is raised if the file doesn't
exist and ret is NOT passed
'''
with patch('os.path.exists', return_value=False):
self.assertRaises(
CommandExecutionError, win_file.check_perms, self.FAKE_PATH)
|
<commit_before><commit_msg>Add tests to avoid future regression<commit_after># -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Shane Lee <slee@saltstack.com>`
'''
# Import Python Libs
from __future__ import absolute_import
import os
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.win_file as win_file
from salt.exceptions import CommandExecutionError
@skipIf(NO_MOCK, NO_MOCK_REASON)
class WinFileTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.win_file
'''
FAKE_RET = {'fake': 'ret data'}
FAKE_PATH = os.sep.join(['C:', 'path', 'does', 'not', 'exist'])
def setup_loader_modules(self):
return {win_file: {}}
def test_issue_43328_stats(self):
'''
Make sure that an empty dictionary is returned if the file doesn't exist
'''
with patch('os.path.exists', return_value=False):
ret = win_file.stats(self.FAKE_PATH)
self.assertEqual(ret, {})
def test_issue_43328_check_perms_ret_passed(self):
'''
Make sure that ret is returned if the file doesn't exist and ret is
passed
'''
with patch('os.path.exists', return_value=False):
ret = win_file.check_perms(self.FAKE_PATH, ret=self.FAKE_RET)
self.assertEqual(ret, self.FAKE_RET)
def test_issue_43328_check_perms_no_ret(self):
'''
Make sure that a CommandExecutionError is raised if the file doesn't
exist and ret is NOT passed
'''
with patch('os.path.exists', return_value=False):
self.assertRaises(
CommandExecutionError, win_file.check_perms, self.FAKE_PATH)
|
|
ed6628411e74f95b5228da0c14f890119d5a8a77
|
ch7/profile_items.py
|
ch7/profile_items.py
|
'''
Listing 7.7: Profiling data partitioning
'''
import numpy as np
import pyopencl as cl
import pyopencl.array
import utility
NUM_INTS= 4096
NUM_ITEMS = 512
NUM_ITERATIONS = 2000
kernel_src = '''
__kernel void profile_items(__global int4 *x, int num_ints) {
int num_vectors = num_ints/(4 * get_global_size(0));
x += get_global_id(0) * num_vectors;
for(int i=0; i<num_vectors; i++) {
x[i] += 1;
x[i] *= 2;
x[i] /= 3;
}
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev], properties=None, dev_type=None, cache_dir=None)
# Create a command queue with the profiling flag enabled
queue = cl.CommandQueue(context, dev, properties=cl.command_queue_properties.PROFILING_ENABLE)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev], cache_dir=None)
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Data
data = np.arange(start=0, stop=NUM_INTS, dtype=np.int32)
# Create input/output buffer
data_buff = cl.Buffer(context, cl.mem_flags.READ_WRITE | cl.mem_flags.COPY_HOST_PTR, hostbuf=data)
# Enqueue kernel (with argument specified directly)
global_size = (NUM_ITEMS,)
local_size = None
# Execute the kernel repeatedly using enqueue_read
total_time = 0.0
for i in range(NUM_ITERATIONS):
# Enqueue kernel
# __call__(queue, global_size, local_size, *args, global_offset=None, wait_for=None, g_times_l=False)
kernel_event = prog.profile_items(queue, global_size, local_size, data_buff, np.int32(NUM_INTS))
# Finish processing the queue and get profiling information
queue.finish()
total_time += kernel_event.profile.end - kernel_event.profile.start
# Print averaged results
print('Average time (ms): {}'.format(total_time / ( NUM_ITERATIONS * 1000)))
|
Add example from listing 7.7
|
Add example from listing 7.7
|
Python
|
mit
|
oysstu/pyopencl-in-action
|
Add example from listing 7.7
|
'''
Listing 7.7: Profiling data partitioning
'''
import numpy as np
import pyopencl as cl
import pyopencl.array
import utility
NUM_INTS= 4096
NUM_ITEMS = 512
NUM_ITERATIONS = 2000
kernel_src = '''
__kernel void profile_items(__global int4 *x, int num_ints) {
int num_vectors = num_ints/(4 * get_global_size(0));
x += get_global_id(0) * num_vectors;
for(int i=0; i<num_vectors; i++) {
x[i] += 1;
x[i] *= 2;
x[i] /= 3;
}
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev], properties=None, dev_type=None, cache_dir=None)
# Create a command queue with the profiling flag enabled
queue = cl.CommandQueue(context, dev, properties=cl.command_queue_properties.PROFILING_ENABLE)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev], cache_dir=None)
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Data
data = np.arange(start=0, stop=NUM_INTS, dtype=np.int32)
# Create input/output buffer
data_buff = cl.Buffer(context, cl.mem_flags.READ_WRITE | cl.mem_flags.COPY_HOST_PTR, hostbuf=data)
# Enqueue kernel (with argument specified directly)
global_size = (NUM_ITEMS,)
local_size = None
# Execute the kernel repeatedly using enqueue_read
total_time = 0.0
for i in range(NUM_ITERATIONS):
# Enqueue kernel
# __call__(queue, global_size, local_size, *args, global_offset=None, wait_for=None, g_times_l=False)
kernel_event = prog.profile_items(queue, global_size, local_size, data_buff, np.int32(NUM_INTS))
# Finish processing the queue and get profiling information
queue.finish()
total_time += kernel_event.profile.end - kernel_event.profile.start
# Print averaged results
print('Average time (ms): {}'.format(total_time / ( NUM_ITERATIONS * 1000)))
|
<commit_before><commit_msg>Add example from listing 7.7<commit_after>
|
'''
Listing 7.7: Profiling data partitioning
'''
import numpy as np
import pyopencl as cl
import pyopencl.array
import utility
NUM_INTS= 4096
NUM_ITEMS = 512
NUM_ITERATIONS = 2000
kernel_src = '''
__kernel void profile_items(__global int4 *x, int num_ints) {
int num_vectors = num_ints/(4 * get_global_size(0));
x += get_global_id(0) * num_vectors;
for(int i=0; i<num_vectors; i++) {
x[i] += 1;
x[i] *= 2;
x[i] /= 3;
}
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev], properties=None, dev_type=None, cache_dir=None)
# Create a command queue with the profiling flag enabled
queue = cl.CommandQueue(context, dev, properties=cl.command_queue_properties.PROFILING_ENABLE)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev], cache_dir=None)
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Data
data = np.arange(start=0, stop=NUM_INTS, dtype=np.int32)
# Create input/output buffer
data_buff = cl.Buffer(context, cl.mem_flags.READ_WRITE | cl.mem_flags.COPY_HOST_PTR, hostbuf=data)
# Enqueue kernel (with argument specified directly)
global_size = (NUM_ITEMS,)
local_size = None
# Execute the kernel repeatedly using enqueue_read
total_time = 0.0
for i in range(NUM_ITERATIONS):
# Enqueue kernel
# __call__(queue, global_size, local_size, *args, global_offset=None, wait_for=None, g_times_l=False)
kernel_event = prog.profile_items(queue, global_size, local_size, data_buff, np.int32(NUM_INTS))
# Finish processing the queue and get profiling information
queue.finish()
total_time += kernel_event.profile.end - kernel_event.profile.start
# Print averaged results
print('Average time (ms): {}'.format(total_time / ( NUM_ITERATIONS * 1000)))
|
Add example from listing 7.7'''
Listing 7.7: Profiling data partitioning
'''
import numpy as np
import pyopencl as cl
import pyopencl.array
import utility
NUM_INTS= 4096
NUM_ITEMS = 512
NUM_ITERATIONS = 2000
kernel_src = '''
__kernel void profile_items(__global int4 *x, int num_ints) {
int num_vectors = num_ints/(4 * get_global_size(0));
x += get_global_id(0) * num_vectors;
for(int i=0; i<num_vectors; i++) {
x[i] += 1;
x[i] *= 2;
x[i] /= 3;
}
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev], properties=None, dev_type=None, cache_dir=None)
# Create a command queue with the profiling flag enabled
queue = cl.CommandQueue(context, dev, properties=cl.command_queue_properties.PROFILING_ENABLE)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev], cache_dir=None)
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Data
data = np.arange(start=0, stop=NUM_INTS, dtype=np.int32)
# Create input/output buffer
data_buff = cl.Buffer(context, cl.mem_flags.READ_WRITE | cl.mem_flags.COPY_HOST_PTR, hostbuf=data)
# Enqueue kernel (with argument specified directly)
global_size = (NUM_ITEMS,)
local_size = None
# Execute the kernel repeatedly using enqueue_read
total_time = 0.0
for i in range(NUM_ITERATIONS):
# Enqueue kernel
# __call__(queue, global_size, local_size, *args, global_offset=None, wait_for=None, g_times_l=False)
kernel_event = prog.profile_items(queue, global_size, local_size, data_buff, np.int32(NUM_INTS))
# Finish processing the queue and get profiling information
queue.finish()
total_time += kernel_event.profile.end - kernel_event.profile.start
# Print averaged results
print('Average time (ms): {}'.format(total_time / ( NUM_ITERATIONS * 1000)))
|
<commit_before><commit_msg>Add example from listing 7.7<commit_after>'''
Listing 7.7: Profiling data partitioning
'''
import numpy as np
import pyopencl as cl
import pyopencl.array
import utility
NUM_INTS= 4096
NUM_ITEMS = 512
NUM_ITERATIONS = 2000
kernel_src = '''
__kernel void profile_items(__global int4 *x, int num_ints) {
int num_vectors = num_ints/(4 * get_global_size(0));
x += get_global_id(0) * num_vectors;
for(int i=0; i<num_vectors; i++) {
x[i] += 1;
x[i] *= 2;
x[i] /= 3;
}
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev], properties=None, dev_type=None, cache_dir=None)
# Create a command queue with the profiling flag enabled
queue = cl.CommandQueue(context, dev, properties=cl.command_queue_properties.PROFILING_ENABLE)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev], cache_dir=None)
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Data
data = np.arange(start=0, stop=NUM_INTS, dtype=np.int32)
# Create input/output buffer
data_buff = cl.Buffer(context, cl.mem_flags.READ_WRITE | cl.mem_flags.COPY_HOST_PTR, hostbuf=data)
# Enqueue kernel (with argument specified directly)
global_size = (NUM_ITEMS,)
local_size = None
# Execute the kernel repeatedly using enqueue_read
total_time = 0.0
for i in range(NUM_ITERATIONS):
# Enqueue kernel
# __call__(queue, global_size, local_size, *args, global_offset=None, wait_for=None, g_times_l=False)
kernel_event = prog.profile_items(queue, global_size, local_size, data_buff, np.int32(NUM_INTS))
# Finish processing the queue and get profiling information
queue.finish()
total_time += kernel_event.profile.end - kernel_event.profile.start
# Print averaged results
print('Average time (ms): {}'.format(total_time / ( NUM_ITERATIONS * 1000)))
|
|
8d8cccf12e19283b57a61f34127f0536940ef34e
|
docs/examples/ccd.py
|
docs/examples/ccd.py
|
"""Automatic derivation of CCD equations.
"""
import pickle
from pyspark import SparkConf, SparkContext
from sympy import IndexedBase, Rational
from drudge import PartHoleDrudge, CR, AN
conf = SparkConf().setAppName('CCSD-derivation')
ctx = SparkContext(conf=conf)
dr = PartHoleDrudge(ctx)
p = dr.names
c_ = dr.op[AN]
c_dag = dr.op[CR]
a, b = p.V_dumms[:2]
i, j = p.O_dumms[:2]
t = IndexedBase('t')
dr.set_dbbar_base(t, 2)
doubles = dr.sum(
(a, p.V), (b, p.V), (i, p.O), (j, p.O),
t[a, b, i, j] * c_dag[a] * c_dag[b] * c_[j] * c_[i]
)
curr = dr.ham
h_bar = dr.ham
for i in range(0, 4):
curr = (curr | doubles).simplify() * Rational(1, i + 1)
h_bar += curr
en_eqn = dr.eval_fermi_vev(h_bar)
proj = c_dag[i] * c_dag[j] * c_[b] * c_[a]
t2_eqn = dr.eval_fermi_vev(proj * h_bar)
with open('ccd_eqns.pickle') as fp:
pickle.dump([en_eqn, t2_eqn], fp)
|
Add example script for CCD theory
|
Add example script for CCD theory
Currently this script may or may not work.
|
Python
|
mit
|
tschijnmo/drudge,tschijnmo/drudge,tschijnmo/drudge
|
Add example script for CCD theory
Currently this script may or may not work.
|
"""Automatic derivation of CCD equations.
"""
import pickle
from pyspark import SparkConf, SparkContext
from sympy import IndexedBase, Rational
from drudge import PartHoleDrudge, CR, AN
conf = SparkConf().setAppName('CCSD-derivation')
ctx = SparkContext(conf=conf)
dr = PartHoleDrudge(ctx)
p = dr.names
c_ = dr.op[AN]
c_dag = dr.op[CR]
a, b = p.V_dumms[:2]
i, j = p.O_dumms[:2]
t = IndexedBase('t')
dr.set_dbbar_base(t, 2)
doubles = dr.sum(
(a, p.V), (b, p.V), (i, p.O), (j, p.O),
t[a, b, i, j] * c_dag[a] * c_dag[b] * c_[j] * c_[i]
)
curr = dr.ham
h_bar = dr.ham
for i in range(0, 4):
curr = (curr | doubles).simplify() * Rational(1, i + 1)
h_bar += curr
en_eqn = dr.eval_fermi_vev(h_bar)
proj = c_dag[i] * c_dag[j] * c_[b] * c_[a]
t2_eqn = dr.eval_fermi_vev(proj * h_bar)
with open('ccd_eqns.pickle') as fp:
pickle.dump([en_eqn, t2_eqn], fp)
|
<commit_before><commit_msg>Add example script for CCD theory
Currently this script may or may not work.<commit_after>
|
"""Automatic derivation of CCD equations.
"""
import pickle
from pyspark import SparkConf, SparkContext
from sympy import IndexedBase, Rational
from drudge import PartHoleDrudge, CR, AN
conf = SparkConf().setAppName('CCSD-derivation')
ctx = SparkContext(conf=conf)
dr = PartHoleDrudge(ctx)
p = dr.names
c_ = dr.op[AN]
c_dag = dr.op[CR]
a, b = p.V_dumms[:2]
i, j = p.O_dumms[:2]
t = IndexedBase('t')
dr.set_dbbar_base(t, 2)
doubles = dr.sum(
(a, p.V), (b, p.V), (i, p.O), (j, p.O),
t[a, b, i, j] * c_dag[a] * c_dag[b] * c_[j] * c_[i]
)
curr = dr.ham
h_bar = dr.ham
for i in range(0, 4):
curr = (curr | doubles).simplify() * Rational(1, i + 1)
h_bar += curr
en_eqn = dr.eval_fermi_vev(h_bar)
proj = c_dag[i] * c_dag[j] * c_[b] * c_[a]
t2_eqn = dr.eval_fermi_vev(proj * h_bar)
with open('ccd_eqns.pickle') as fp:
pickle.dump([en_eqn, t2_eqn], fp)
|
Add example script for CCD theory
Currently this script may or may not work."""Automatic derivation of CCD equations.
"""
import pickle
from pyspark import SparkConf, SparkContext
from sympy import IndexedBase, Rational
from drudge import PartHoleDrudge, CR, AN
conf = SparkConf().setAppName('CCSD-derivation')
ctx = SparkContext(conf=conf)
dr = PartHoleDrudge(ctx)
p = dr.names
c_ = dr.op[AN]
c_dag = dr.op[CR]
a, b = p.V_dumms[:2]
i, j = p.O_dumms[:2]
t = IndexedBase('t')
dr.set_dbbar_base(t, 2)
doubles = dr.sum(
(a, p.V), (b, p.V), (i, p.O), (j, p.O),
t[a, b, i, j] * c_dag[a] * c_dag[b] * c_[j] * c_[i]
)
curr = dr.ham
h_bar = dr.ham
for i in range(0, 4):
curr = (curr | doubles).simplify() * Rational(1, i + 1)
h_bar += curr
en_eqn = dr.eval_fermi_vev(h_bar)
proj = c_dag[i] * c_dag[j] * c_[b] * c_[a]
t2_eqn = dr.eval_fermi_vev(proj * h_bar)
with open('ccd_eqns.pickle') as fp:
pickle.dump([en_eqn, t2_eqn], fp)
|
<commit_before><commit_msg>Add example script for CCD theory
Currently this script may or may not work.<commit_after>"""Automatic derivation of CCD equations.
"""
import pickle
from pyspark import SparkConf, SparkContext
from sympy import IndexedBase, Rational
from drudge import PartHoleDrudge, CR, AN
conf = SparkConf().setAppName('CCSD-derivation')
ctx = SparkContext(conf=conf)
dr = PartHoleDrudge(ctx)
p = dr.names
c_ = dr.op[AN]
c_dag = dr.op[CR]
a, b = p.V_dumms[:2]
i, j = p.O_dumms[:2]
t = IndexedBase('t')
dr.set_dbbar_base(t, 2)
doubles = dr.sum(
(a, p.V), (b, p.V), (i, p.O), (j, p.O),
t[a, b, i, j] * c_dag[a] * c_dag[b] * c_[j] * c_[i]
)
curr = dr.ham
h_bar = dr.ham
for i in range(0, 4):
curr = (curr | doubles).simplify() * Rational(1, i + 1)
h_bar += curr
en_eqn = dr.eval_fermi_vev(h_bar)
proj = c_dag[i] * c_dag[j] * c_[b] * c_[a]
t2_eqn = dr.eval_fermi_vev(proj * h_bar)
with open('ccd_eqns.pickle') as fp:
pickle.dump([en_eqn, t2_eqn], fp)
|
|
42a92130fc9d6f3358bb03a7ab56cdc5f20eb4d1
|
tests/test_config.py
|
tests/test_config.py
|
import os
import pytest
from vrun import config
from vrun.compat import ConfigParser
@pytest.mark.parametrize('parts, result', [
(
['simple'],
['simple']
),
(
['multiple', 'simple'],
['multiple', 'simple']
),
(
['with', '"quotes"'],
['with', '"quotes"']
),
(
['"testing', 'quote', 'support"'],
['testing quote support']
),
(
["'testing", 'quote', "support'"],
['testing quote support']
),
(
['"testing', '\'quote', 'support"'],
['testing \'quote support']
),
(
['"testing', '\'quote\'', 'support"'],
['testing \'quote\' support']
),
(
['"testing', '\'quote', '\'support"'],
['testing \'quote \'support']
),
(
['""'],
['""']
),
(
['" ', ' "'],
[' ']
),
])
def test_quoted_combine(parts, result):
assert list(config.quoted_combine(parts)) == result
@pytest.mark.parametrize('parts', [
['"testing', '\'quote', '"support"'],
['" ', '""'],
['"test', '"ing'],
])
def test_quoted_combine_invalid(parts):
with pytest.raises(ValueError):
assert list(config.quoted_combine(parts))
@pytest.mark.parametrize('folder, result', [
('configtest', 'vrun.cfg'),
('configtest/vrun_ini', 'vrun.ini'),
('configtest/setup_cfg', 'setup.cfg'),
('configtest/setup_cfg_no_section', None),
])
def test_find_config(folder, result):
curpath = os.path.dirname(os.path.realpath(__file__))
cwd = os.path.join(curpath, folder)
if result:
assert config.find_config(cwd).endswith(result)
else:
assert config.find_config(cwd) == result
@pytest.mark.parametrize('folder, result', [
('configtest', 'vrun.cfg'),
('configtest/vrun_ini', 'vrun.ini'),
('configtest/setup_cfg', 'setup.cfg'),
])
def test_config_from_file(folder, result):
curpath = os.path.dirname(os.path.realpath(__file__))
cwd = os.path.join(curpath, folder)
config_file = config.find_config(cwd)
assert isinstance(config.config_from_file(config_file), ConfigParser)
|
Add tests for ancillary functions
|
Add tests for ancillary functions
|
Python
|
isc
|
bertjwregeer/vrun
|
Add tests for ancillary functions
|
import os
import pytest
from vrun import config
from vrun.compat import ConfigParser
@pytest.mark.parametrize('parts, result', [
(
['simple'],
['simple']
),
(
['multiple', 'simple'],
['multiple', 'simple']
),
(
['with', '"quotes"'],
['with', '"quotes"']
),
(
['"testing', 'quote', 'support"'],
['testing quote support']
),
(
["'testing", 'quote', "support'"],
['testing quote support']
),
(
['"testing', '\'quote', 'support"'],
['testing \'quote support']
),
(
['"testing', '\'quote\'', 'support"'],
['testing \'quote\' support']
),
(
['"testing', '\'quote', '\'support"'],
['testing \'quote \'support']
),
(
['""'],
['""']
),
(
['" ', ' "'],
[' ']
),
])
def test_quoted_combine(parts, result):
assert list(config.quoted_combine(parts)) == result
@pytest.mark.parametrize('parts', [
['"testing', '\'quote', '"support"'],
['" ', '""'],
['"test', '"ing'],
])
def test_quoted_combine_invalid(parts):
with pytest.raises(ValueError):
assert list(config.quoted_combine(parts))
@pytest.mark.parametrize('folder, result', [
('configtest', 'vrun.cfg'),
('configtest/vrun_ini', 'vrun.ini'),
('configtest/setup_cfg', 'setup.cfg'),
('configtest/setup_cfg_no_section', None),
])
def test_find_config(folder, result):
curpath = os.path.dirname(os.path.realpath(__file__))
cwd = os.path.join(curpath, folder)
if result:
assert config.find_config(cwd).endswith(result)
else:
assert config.find_config(cwd) == result
@pytest.mark.parametrize('folder, result', [
('configtest', 'vrun.cfg'),
('configtest/vrun_ini', 'vrun.ini'),
('configtest/setup_cfg', 'setup.cfg'),
])
def test_config_from_file(folder, result):
curpath = os.path.dirname(os.path.realpath(__file__))
cwd = os.path.join(curpath, folder)
config_file = config.find_config(cwd)
assert isinstance(config.config_from_file(config_file), ConfigParser)
|
<commit_before><commit_msg>Add tests for ancillary functions<commit_after>
|
import os
import pytest
from vrun import config
from vrun.compat import ConfigParser
@pytest.mark.parametrize('parts, result', [
(
['simple'],
['simple']
),
(
['multiple', 'simple'],
['multiple', 'simple']
),
(
['with', '"quotes"'],
['with', '"quotes"']
),
(
['"testing', 'quote', 'support"'],
['testing quote support']
),
(
["'testing", 'quote', "support'"],
['testing quote support']
),
(
['"testing', '\'quote', 'support"'],
['testing \'quote support']
),
(
['"testing', '\'quote\'', 'support"'],
['testing \'quote\' support']
),
(
['"testing', '\'quote', '\'support"'],
['testing \'quote \'support']
),
(
['""'],
['""']
),
(
['" ', ' "'],
[' ']
),
])
def test_quoted_combine(parts, result):
assert list(config.quoted_combine(parts)) == result
@pytest.mark.parametrize('parts', [
['"testing', '\'quote', '"support"'],
['" ', '""'],
['"test', '"ing'],
])
def test_quoted_combine_invalid(parts):
with pytest.raises(ValueError):
assert list(config.quoted_combine(parts))
@pytest.mark.parametrize('folder, result', [
('configtest', 'vrun.cfg'),
('configtest/vrun_ini', 'vrun.ini'),
('configtest/setup_cfg', 'setup.cfg'),
('configtest/setup_cfg_no_section', None),
])
def test_find_config(folder, result):
curpath = os.path.dirname(os.path.realpath(__file__))
cwd = os.path.join(curpath, folder)
if result:
assert config.find_config(cwd).endswith(result)
else:
assert config.find_config(cwd) == result
@pytest.mark.parametrize('folder, result', [
('configtest', 'vrun.cfg'),
('configtest/vrun_ini', 'vrun.ini'),
('configtest/setup_cfg', 'setup.cfg'),
])
def test_config_from_file(folder, result):
curpath = os.path.dirname(os.path.realpath(__file__))
cwd = os.path.join(curpath, folder)
config_file = config.find_config(cwd)
assert isinstance(config.config_from_file(config_file), ConfigParser)
|
Add tests for ancillary functionsimport os
import pytest
from vrun import config
from vrun.compat import ConfigParser
@pytest.mark.parametrize('parts, result', [
(
['simple'],
['simple']
),
(
['multiple', 'simple'],
['multiple', 'simple']
),
(
['with', '"quotes"'],
['with', '"quotes"']
),
(
['"testing', 'quote', 'support"'],
['testing quote support']
),
(
["'testing", 'quote', "support'"],
['testing quote support']
),
(
['"testing', '\'quote', 'support"'],
['testing \'quote support']
),
(
['"testing', '\'quote\'', 'support"'],
['testing \'quote\' support']
),
(
['"testing', '\'quote', '\'support"'],
['testing \'quote \'support']
),
(
['""'],
['""']
),
(
['" ', ' "'],
[' ']
),
])
def test_quoted_combine(parts, result):
assert list(config.quoted_combine(parts)) == result
@pytest.mark.parametrize('parts', [
['"testing', '\'quote', '"support"'],
['" ', '""'],
['"test', '"ing'],
])
def test_quoted_combine_invalid(parts):
with pytest.raises(ValueError):
assert list(config.quoted_combine(parts))
@pytest.mark.parametrize('folder, result', [
('configtest', 'vrun.cfg'),
('configtest/vrun_ini', 'vrun.ini'),
('configtest/setup_cfg', 'setup.cfg'),
('configtest/setup_cfg_no_section', None),
])
def test_find_config(folder, result):
curpath = os.path.dirname(os.path.realpath(__file__))
cwd = os.path.join(curpath, folder)
if result:
assert config.find_config(cwd).endswith(result)
else:
assert config.find_config(cwd) == result
@pytest.mark.parametrize('folder, result', [
('configtest', 'vrun.cfg'),
('configtest/vrun_ini', 'vrun.ini'),
('configtest/setup_cfg', 'setup.cfg'),
])
def test_config_from_file(folder, result):
curpath = os.path.dirname(os.path.realpath(__file__))
cwd = os.path.join(curpath, folder)
config_file = config.find_config(cwd)
assert isinstance(config.config_from_file(config_file), ConfigParser)
|
<commit_before><commit_msg>Add tests for ancillary functions<commit_after>import os
import pytest
from vrun import config
from vrun.compat import ConfigParser
@pytest.mark.parametrize('parts, result', [
(
['simple'],
['simple']
),
(
['multiple', 'simple'],
['multiple', 'simple']
),
(
['with', '"quotes"'],
['with', '"quotes"']
),
(
['"testing', 'quote', 'support"'],
['testing quote support']
),
(
["'testing", 'quote', "support'"],
['testing quote support']
),
(
['"testing', '\'quote', 'support"'],
['testing \'quote support']
),
(
['"testing', '\'quote\'', 'support"'],
['testing \'quote\' support']
),
(
['"testing', '\'quote', '\'support"'],
['testing \'quote \'support']
),
(
['""'],
['""']
),
(
['" ', ' "'],
[' ']
),
])
def test_quoted_combine(parts, result):
assert list(config.quoted_combine(parts)) == result
@pytest.mark.parametrize('parts', [
['"testing', '\'quote', '"support"'],
['" ', '""'],
['"test', '"ing'],
])
def test_quoted_combine_invalid(parts):
with pytest.raises(ValueError):
assert list(config.quoted_combine(parts))
@pytest.mark.parametrize('folder, result', [
('configtest', 'vrun.cfg'),
('configtest/vrun_ini', 'vrun.ini'),
('configtest/setup_cfg', 'setup.cfg'),
('configtest/setup_cfg_no_section', None),
])
def test_find_config(folder, result):
curpath = os.path.dirname(os.path.realpath(__file__))
cwd = os.path.join(curpath, folder)
if result:
assert config.find_config(cwd).endswith(result)
else:
assert config.find_config(cwd) == result
@pytest.mark.parametrize('folder, result', [
('configtest', 'vrun.cfg'),
('configtest/vrun_ini', 'vrun.ini'),
('configtest/setup_cfg', 'setup.cfg'),
])
def test_config_from_file(folder, result):
curpath = os.path.dirname(os.path.realpath(__file__))
cwd = os.path.join(curpath, folder)
config_file = config.find_config(cwd)
assert isinstance(config.config_from_file(config_file), ConfigParser)
|
|
5d990443a3157a1e8061e81d9bb21cfcde6a4d2b
|
server/rest/postgres_geojson.py
|
server/rest/postgres_geojson.py
|
import ast
from girder.api import access
from girder.api.describe import Description
from girder.api.rest import Resource
import psycopg2
# TODO: This will be changed with girder_db_items
def connect_to_gryphon(host="localhost",
port="5432",
user="username",
password="password",
dbname="gryphon"):
try:
conn = psycopg2.connect("dbname={} user={} host={} password={} port={}".format(dbname,
user,
host,
password,
port))
return conn
except:
print "I am unable to connect to {}".format(dbname)
class View(object):
def __init__(self, conn):
self._conn = conn
def generateQuery(self, filters):
q = []
for k in filters.keys():
_ = []
for v in ast.literal_eval(filters[k]):
_.append(""" "{}" = '{}' """.format(k, v))
q.append("(" + "or".join(_) + ")")
return "and".join(q)
def getDistinctValues(self, table, filters={}):
conn = self._conn
cur = conn.cursor()
base_query = 'SELECT DISTINCT "{}" from gryphonstates'.format(table)
if not filters:
query = base_query + ";"
else:
query = base_query + " where" + self.generateQuery(filters) + ";"
cur.execute(query)
field = sorted([i[0] for i in cur.fetchall()])
if not filters:
field.insert(0, "All")
return field
def filter(self, filters):
resp = {}
resp['NAME'] = self.getDistinctValues('NAME', filters)
resp['PRODUCTION_CATEGORY'] = self.getDistinctValues('PRODUCTION_CATEGORY', filters)
resp['CATEGORY'] = self.getDistinctValues('CATEGORY', filters)
resp['SUB_CATEGORY'] = self.getDistinctValues('SUB_CATEGORY', filters)
resp['DATA_DERIVATION'] = self.getDistinctValues('DATA_DERIVATION', filters)
return resp
class PostgresGeojson(Resource):
def __init__(self):
self.resourceName = 'minerva_postgres_geojson'
self.route('GET',(), self.postgresGeojson)
@access.user
def postgresGeojson(self, params):
conn = connect_to_gryphon()
view = View(conn)
return view.filter(params)
postgresGeojson.description = (
Description('Get geojson from postgres database')
.param('NAME', 'state name or all states', required=False,
dataType='list')
.param('PRODUCTION_CATEGORY', 'production category', required=False,
dataType='list')
.param('CATEGORY', 'category', required=False,
dataType='list')
.param('SUB_CATEGORY', 'category', required=False,
dataType='list')
.param('DATA_DERIVATION', 'data_derivation', required=False,
dataType='list')
)
|
Add the logic to filter views
|
Add the logic to filter views
|
Python
|
apache-2.0
|
Kitware/minerva,Kitware/minerva,Kitware/minerva
|
Add the logic to filter views
|
import ast
from girder.api import access
from girder.api.describe import Description
from girder.api.rest import Resource
import psycopg2
# TODO: This will be changed with girder_db_items
def connect_to_gryphon(host="localhost",
port="5432",
user="username",
password="password",
dbname="gryphon"):
try:
conn = psycopg2.connect("dbname={} user={} host={} password={} port={}".format(dbname,
user,
host,
password,
port))
return conn
except:
print "I am unable to connect to {}".format(dbname)
class View(object):
def __init__(self, conn):
self._conn = conn
def generateQuery(self, filters):
q = []
for k in filters.keys():
_ = []
for v in ast.literal_eval(filters[k]):
_.append(""" "{}" = '{}' """.format(k, v))
q.append("(" + "or".join(_) + ")")
return "and".join(q)
def getDistinctValues(self, table, filters={}):
conn = self._conn
cur = conn.cursor()
base_query = 'SELECT DISTINCT "{}" from gryphonstates'.format(table)
if not filters:
query = base_query + ";"
else:
query = base_query + " where" + self.generateQuery(filters) + ";"
cur.execute(query)
field = sorted([i[0] for i in cur.fetchall()])
if not filters:
field.insert(0, "All")
return field
def filter(self, filters):
resp = {}
resp['NAME'] = self.getDistinctValues('NAME', filters)
resp['PRODUCTION_CATEGORY'] = self.getDistinctValues('PRODUCTION_CATEGORY', filters)
resp['CATEGORY'] = self.getDistinctValues('CATEGORY', filters)
resp['SUB_CATEGORY'] = self.getDistinctValues('SUB_CATEGORY', filters)
resp['DATA_DERIVATION'] = self.getDistinctValues('DATA_DERIVATION', filters)
return resp
class PostgresGeojson(Resource):
def __init__(self):
self.resourceName = 'minerva_postgres_geojson'
self.route('GET',(), self.postgresGeojson)
@access.user
def postgresGeojson(self, params):
conn = connect_to_gryphon()
view = View(conn)
return view.filter(params)
postgresGeojson.description = (
Description('Get geojson from postgres database')
.param('NAME', 'state name or all states', required=False,
dataType='list')
.param('PRODUCTION_CATEGORY', 'production category', required=False,
dataType='list')
.param('CATEGORY', 'category', required=False,
dataType='list')
.param('SUB_CATEGORY', 'category', required=False,
dataType='list')
.param('DATA_DERIVATION', 'data_derivation', required=False,
dataType='list')
)
|
<commit_before><commit_msg>Add the logic to filter views<commit_after>
|
import ast
from girder.api import access
from girder.api.describe import Description
from girder.api.rest import Resource
import psycopg2
# TODO: This will be changed with girder_db_items
def connect_to_gryphon(host="localhost",
port="5432",
user="username",
password="password",
dbname="gryphon"):
try:
conn = psycopg2.connect("dbname={} user={} host={} password={} port={}".format(dbname,
user,
host,
password,
port))
return conn
except:
print "I am unable to connect to {}".format(dbname)
class View(object):
def __init__(self, conn):
self._conn = conn
def generateQuery(self, filters):
q = []
for k in filters.keys():
_ = []
for v in ast.literal_eval(filters[k]):
_.append(""" "{}" = '{}' """.format(k, v))
q.append("(" + "or".join(_) + ")")
return "and".join(q)
def getDistinctValues(self, table, filters={}):
conn = self._conn
cur = conn.cursor()
base_query = 'SELECT DISTINCT "{}" from gryphonstates'.format(table)
if not filters:
query = base_query + ";"
else:
query = base_query + " where" + self.generateQuery(filters) + ";"
cur.execute(query)
field = sorted([i[0] for i in cur.fetchall()])
if not filters:
field.insert(0, "All")
return field
def filter(self, filters):
resp = {}
resp['NAME'] = self.getDistinctValues('NAME', filters)
resp['PRODUCTION_CATEGORY'] = self.getDistinctValues('PRODUCTION_CATEGORY', filters)
resp['CATEGORY'] = self.getDistinctValues('CATEGORY', filters)
resp['SUB_CATEGORY'] = self.getDistinctValues('SUB_CATEGORY', filters)
resp['DATA_DERIVATION'] = self.getDistinctValues('DATA_DERIVATION', filters)
return resp
class PostgresGeojson(Resource):
def __init__(self):
self.resourceName = 'minerva_postgres_geojson'
self.route('GET',(), self.postgresGeojson)
@access.user
def postgresGeojson(self, params):
conn = connect_to_gryphon()
view = View(conn)
return view.filter(params)
postgresGeojson.description = (
Description('Get geojson from postgres database')
.param('NAME', 'state name or all states', required=False,
dataType='list')
.param('PRODUCTION_CATEGORY', 'production category', required=False,
dataType='list')
.param('CATEGORY', 'category', required=False,
dataType='list')
.param('SUB_CATEGORY', 'category', required=False,
dataType='list')
.param('DATA_DERIVATION', 'data_derivation', required=False,
dataType='list')
)
|
Add the logic to filter viewsimport ast
from girder.api import access
from girder.api.describe import Description
from girder.api.rest import Resource
import psycopg2
# TODO: This will be changed with girder_db_items
def connect_to_gryphon(host="localhost",
port="5432",
user="username",
password="password",
dbname="gryphon"):
try:
conn = psycopg2.connect("dbname={} user={} host={} password={} port={}".format(dbname,
user,
host,
password,
port))
return conn
except:
print "I am unable to connect to {}".format(dbname)
class View(object):
def __init__(self, conn):
self._conn = conn
def generateQuery(self, filters):
q = []
for k in filters.keys():
_ = []
for v in ast.literal_eval(filters[k]):
_.append(""" "{}" = '{}' """.format(k, v))
q.append("(" + "or".join(_) + ")")
return "and".join(q)
def getDistinctValues(self, table, filters={}):
conn = self._conn
cur = conn.cursor()
base_query = 'SELECT DISTINCT "{}" from gryphonstates'.format(table)
if not filters:
query = base_query + ";"
else:
query = base_query + " where" + self.generateQuery(filters) + ";"
cur.execute(query)
field = sorted([i[0] for i in cur.fetchall()])
if not filters:
field.insert(0, "All")
return field
def filter(self, filters):
resp = {}
resp['NAME'] = self.getDistinctValues('NAME', filters)
resp['PRODUCTION_CATEGORY'] = self.getDistinctValues('PRODUCTION_CATEGORY', filters)
resp['CATEGORY'] = self.getDistinctValues('CATEGORY', filters)
resp['SUB_CATEGORY'] = self.getDistinctValues('SUB_CATEGORY', filters)
resp['DATA_DERIVATION'] = self.getDistinctValues('DATA_DERIVATION', filters)
return resp
class PostgresGeojson(Resource):
def __init__(self):
self.resourceName = 'minerva_postgres_geojson'
self.route('GET',(), self.postgresGeojson)
@access.user
def postgresGeojson(self, params):
conn = connect_to_gryphon()
view = View(conn)
return view.filter(params)
postgresGeojson.description = (
Description('Get geojson from postgres database')
.param('NAME', 'state name or all states', required=False,
dataType='list')
.param('PRODUCTION_CATEGORY', 'production category', required=False,
dataType='list')
.param('CATEGORY', 'category', required=False,
dataType='list')
.param('SUB_CATEGORY', 'category', required=False,
dataType='list')
.param('DATA_DERIVATION', 'data_derivation', required=False,
dataType='list')
)
|
<commit_before><commit_msg>Add the logic to filter views<commit_after>import ast
from girder.api import access
from girder.api.describe import Description
from girder.api.rest import Resource
import psycopg2
# TODO: This will be changed with girder_db_items
def connect_to_gryphon(host="localhost",
port="5432",
user="username",
password="password",
dbname="gryphon"):
try:
conn = psycopg2.connect("dbname={} user={} host={} password={} port={}".format(dbname,
user,
host,
password,
port))
return conn
except:
print "I am unable to connect to {}".format(dbname)
class View(object):
def __init__(self, conn):
self._conn = conn
def generateQuery(self, filters):
q = []
for k in filters.keys():
_ = []
for v in ast.literal_eval(filters[k]):
_.append(""" "{}" = '{}' """.format(k, v))
q.append("(" + "or".join(_) + ")")
return "and".join(q)
def getDistinctValues(self, table, filters={}):
conn = self._conn
cur = conn.cursor()
base_query = 'SELECT DISTINCT "{}" from gryphonstates'.format(table)
if not filters:
query = base_query + ";"
else:
query = base_query + " where" + self.generateQuery(filters) + ";"
cur.execute(query)
field = sorted([i[0] for i in cur.fetchall()])
if not filters:
field.insert(0, "All")
return field
def filter(self, filters):
resp = {}
resp['NAME'] = self.getDistinctValues('NAME', filters)
resp['PRODUCTION_CATEGORY'] = self.getDistinctValues('PRODUCTION_CATEGORY', filters)
resp['CATEGORY'] = self.getDistinctValues('CATEGORY', filters)
resp['SUB_CATEGORY'] = self.getDistinctValues('SUB_CATEGORY', filters)
resp['DATA_DERIVATION'] = self.getDistinctValues('DATA_DERIVATION', filters)
return resp
class PostgresGeojson(Resource):
def __init__(self):
self.resourceName = 'minerva_postgres_geojson'
self.route('GET',(), self.postgresGeojson)
@access.user
def postgresGeojson(self, params):
conn = connect_to_gryphon()
view = View(conn)
return view.filter(params)
postgresGeojson.description = (
Description('Get geojson from postgres database')
.param('NAME', 'state name or all states', required=False,
dataType='list')
.param('PRODUCTION_CATEGORY', 'production category', required=False,
dataType='list')
.param('CATEGORY', 'category', required=False,
dataType='list')
.param('SUB_CATEGORY', 'category', required=False,
dataType='list')
.param('DATA_DERIVATION', 'data_derivation', required=False,
dataType='list')
)
|
|
4e132bbbcd8896885eb92b78c594dbd1dcfd9ee8
|
zuul/needsrecheck.py
|
zuul/needsrecheck.py
|
#!/usr/bin/python
# Copyright 2014 Rackspace Australia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# A simple script that draws graphs of zuul workload based on the output of
# the mysql reporter.
import ConfigParser
import datetime
import MySQLdb
def report():
config = ConfigParser.ConfigParser()
config.read('/etc/zuul/zuul.conf')
db = MySQLdb.connect(host=(config.get('mysql', 'host')
if config.has_option('mysql', 'host') else
'127.0.0.1'),
port=(int(config.get('mysql', 'port'))
if config.has_option('mysql', 'port') else
3306),
user=config.get('mysql', 'user'),
passwd=config.get('mysql', 'password'),
db=config.get('mysql', 'database'))
cursor = db.cursor(MySQLdb.cursors.DictCursor)
subcursor = db.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('select * from %s where score=-1 and '
'datediff(date(now()), date(timestamp)) < 3;'
% config.get('mysql', 'table'))
for row in cursor:
subcursor.execute('select * from %s where score=1 and '
'number=%s and timestamp > "%s";'
% (config.get('mysql', 'table'),
row['number'], row['timestamp']))
if subcursor.rowcount > 0:
continue
print '%s,%s' % (row['number'], row['patchset'])
if __name__ == '__main__':
report()
|
Add a script to work out what to recheck after a TH failure
|
Add a script to work out what to recheck after a TH failure
|
Python
|
apache-2.0
|
rcbau/hacks,rcbau/hacks,rcbau/hacks
|
Add a script to work out what to recheck after a TH failure
|
#!/usr/bin/python
# Copyright 2014 Rackspace Australia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# A simple script that draws graphs of zuul workload based on the output of
# the mysql reporter.
import ConfigParser
import datetime
import MySQLdb
def report():
config = ConfigParser.ConfigParser()
config.read('/etc/zuul/zuul.conf')
db = MySQLdb.connect(host=(config.get('mysql', 'host')
if config.has_option('mysql', 'host') else
'127.0.0.1'),
port=(int(config.get('mysql', 'port'))
if config.has_option('mysql', 'port') else
3306),
user=config.get('mysql', 'user'),
passwd=config.get('mysql', 'password'),
db=config.get('mysql', 'database'))
cursor = db.cursor(MySQLdb.cursors.DictCursor)
subcursor = db.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('select * from %s where score=-1 and '
'datediff(date(now()), date(timestamp)) < 3;'
% config.get('mysql', 'table'))
for row in cursor:
subcursor.execute('select * from %s where score=1 and '
'number=%s and timestamp > "%s";'
% (config.get('mysql', 'table'),
row['number'], row['timestamp']))
if subcursor.rowcount > 0:
continue
print '%s,%s' % (row['number'], row['patchset'])
if __name__ == '__main__':
report()
|
<commit_before><commit_msg>Add a script to work out what to recheck after a TH failure<commit_after>
|
#!/usr/bin/python
# Copyright 2014 Rackspace Australia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# A simple script that draws graphs of zuul workload based on the output of
# the mysql reporter.
import ConfigParser
import datetime
import MySQLdb
def report():
config = ConfigParser.ConfigParser()
config.read('/etc/zuul/zuul.conf')
db = MySQLdb.connect(host=(config.get('mysql', 'host')
if config.has_option('mysql', 'host') else
'127.0.0.1'),
port=(int(config.get('mysql', 'port'))
if config.has_option('mysql', 'port') else
3306),
user=config.get('mysql', 'user'),
passwd=config.get('mysql', 'password'),
db=config.get('mysql', 'database'))
cursor = db.cursor(MySQLdb.cursors.DictCursor)
subcursor = db.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('select * from %s where score=-1 and '
'datediff(date(now()), date(timestamp)) < 3;'
% config.get('mysql', 'table'))
for row in cursor:
subcursor.execute('select * from %s where score=1 and '
'number=%s and timestamp > "%s";'
% (config.get('mysql', 'table'),
row['number'], row['timestamp']))
if subcursor.rowcount > 0:
continue
print '%s,%s' % (row['number'], row['patchset'])
if __name__ == '__main__':
report()
|
Add a script to work out what to recheck after a TH failure#!/usr/bin/python
# Copyright 2014 Rackspace Australia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# A simple script that draws graphs of zuul workload based on the output of
# the mysql reporter.
import ConfigParser
import datetime
import MySQLdb
def report():
config = ConfigParser.ConfigParser()
config.read('/etc/zuul/zuul.conf')
db = MySQLdb.connect(host=(config.get('mysql', 'host')
if config.has_option('mysql', 'host') else
'127.0.0.1'),
port=(int(config.get('mysql', 'port'))
if config.has_option('mysql', 'port') else
3306),
user=config.get('mysql', 'user'),
passwd=config.get('mysql', 'password'),
db=config.get('mysql', 'database'))
cursor = db.cursor(MySQLdb.cursors.DictCursor)
subcursor = db.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('select * from %s where score=-1 and '
'datediff(date(now()), date(timestamp)) < 3;'
% config.get('mysql', 'table'))
for row in cursor:
subcursor.execute('select * from %s where score=1 and '
'number=%s and timestamp > "%s";'
% (config.get('mysql', 'table'),
row['number'], row['timestamp']))
if subcursor.rowcount > 0:
continue
print '%s,%s' % (row['number'], row['patchset'])
if __name__ == '__main__':
report()
|
<commit_before><commit_msg>Add a script to work out what to recheck after a TH failure<commit_after>#!/usr/bin/python
# Copyright 2014 Rackspace Australia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# A simple script that draws graphs of zuul workload based on the output of
# the mysql reporter.
import ConfigParser
import datetime
import MySQLdb
def report():
config = ConfigParser.ConfigParser()
config.read('/etc/zuul/zuul.conf')
db = MySQLdb.connect(host=(config.get('mysql', 'host')
if config.has_option('mysql', 'host') else
'127.0.0.1'),
port=(int(config.get('mysql', 'port'))
if config.has_option('mysql', 'port') else
3306),
user=config.get('mysql', 'user'),
passwd=config.get('mysql', 'password'),
db=config.get('mysql', 'database'))
cursor = db.cursor(MySQLdb.cursors.DictCursor)
subcursor = db.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('select * from %s where score=-1 and '
'datediff(date(now()), date(timestamp)) < 3;'
% config.get('mysql', 'table'))
for row in cursor:
subcursor.execute('select * from %s where score=1 and '
'number=%s and timestamp > "%s";'
% (config.get('mysql', 'table'),
row['number'], row['timestamp']))
if subcursor.rowcount > 0:
continue
print '%s,%s' % (row['number'], row['patchset'])
if __name__ == '__main__':
report()
|
|
6b0eaf125309d89bf9223d5a2da0a938b4e70da5
|
feature_extractor.py
|
feature_extractor.py
|
import numpy as np
import re
def extract_train():
return extract('dataset/train.txt')
def extract_test():
return extract('dataset/test.txt')
def extract(file):
input_file = open(file)
traindata = input_file.readlines()
features = []
targets = []
for line in traindata:
formatted_line = line.replace("\n", "")
target_i = formatted_line.split(" ")[1]
feature_i = re.sub(r"(\d+):", "", formatted_line).split(" ")[2:]
targets.append(target_i)
features.append(feature_i)
return (np.array(features).astype(np.float), np.array(targets).astype(np.int))
|
Implement feature extractor from data set files
|
Implement feature extractor from data set files
|
Python
|
mit
|
trein/quora-classifier
|
Implement feature extractor from data set files
|
import numpy as np
import re
def extract_train():
return extract('dataset/train.txt')
def extract_test():
return extract('dataset/test.txt')
def extract(file):
input_file = open(file)
traindata = input_file.readlines()
features = []
targets = []
for line in traindata:
formatted_line = line.replace("\n", "")
target_i = formatted_line.split(" ")[1]
feature_i = re.sub(r"(\d+):", "", formatted_line).split(" ")[2:]
targets.append(target_i)
features.append(feature_i)
return (np.array(features).astype(np.float), np.array(targets).astype(np.int))
|
<commit_before><commit_msg>Implement feature extractor from data set files<commit_after>
|
import numpy as np
import re
def extract_train():
return extract('dataset/train.txt')
def extract_test():
return extract('dataset/test.txt')
def extract(file):
input_file = open(file)
traindata = input_file.readlines()
features = []
targets = []
for line in traindata:
formatted_line = line.replace("\n", "")
target_i = formatted_line.split(" ")[1]
feature_i = re.sub(r"(\d+):", "", formatted_line).split(" ")[2:]
targets.append(target_i)
features.append(feature_i)
return (np.array(features).astype(np.float), np.array(targets).astype(np.int))
|
Implement feature extractor from data set filesimport numpy as np
import re
def extract_train():
return extract('dataset/train.txt')
def extract_test():
return extract('dataset/test.txt')
def extract(file):
input_file = open(file)
traindata = input_file.readlines()
features = []
targets = []
for line in traindata:
formatted_line = line.replace("\n", "")
target_i = formatted_line.split(" ")[1]
feature_i = re.sub(r"(\d+):", "", formatted_line).split(" ")[2:]
targets.append(target_i)
features.append(feature_i)
return (np.array(features).astype(np.float), np.array(targets).astype(np.int))
|
<commit_before><commit_msg>Implement feature extractor from data set files<commit_after>import numpy as np
import re
def extract_train():
return extract('dataset/train.txt')
def extract_test():
return extract('dataset/test.txt')
def extract(file):
input_file = open(file)
traindata = input_file.readlines()
features = []
targets = []
for line in traindata:
formatted_line = line.replace("\n", "")
target_i = formatted_line.split(" ")[1]
feature_i = re.sub(r"(\d+):", "", formatted_line).split(" ")[2:]
targets.append(target_i)
features.append(feature_i)
return (np.array(features).astype(np.float), np.array(targets).astype(np.int))
|
|
8552542f6e23f886bae467f96e847b00327fa164
|
scripts/ci/guideline_check.py
|
scripts/ci/guideline_check.py
|
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2021 Intel Corporation
import os
import sh
import argparse
import re
from unidiff import PatchSet
if "ZEPHYR_BASE" not in os.environ:
exit("$ZEPHYR_BASE environment variable undefined.")
repository_path = os.environ['ZEPHYR_BASE']
sh_special_args = {
'_tty_out': False,
'_cwd': repository_path
}
coccinelle_scripts = ["/scripts/coccinelle/reserved_names.cocci",
"/scripts/coccinelle/same_identifier.cocci",
"/scripts/coccinelle/identifier_length.cocci",
]
def parse_coccinelle(contents: str, violations: dict):
reg = re.compile("([a-zA-Z0-9/]*\\.[ch]:[0-9]*)(:[0-9\\-]*: )(.*)")
for line in contents.split("\n"):
r = reg.match(line)
if r:
f = r.group(1)
if f in violations:
violations[f].append(r.group(3))
else:
violations[r.group(1)] = [r.group(3)]
def parse_args():
parser = argparse.ArgumentParser(
description="Check if change requires full twister")
parser.add_argument('-c', '--commits', default=None,
help="Commit range in the form: a..b")
return parser.parse_args()
def main():
args = parse_args()
if not args.commits:
exit("missing commit range")
commit = sh.git("diff", args.commits, **sh_special_args)
patch_set = PatchSet(commit)
zephyr_base = os.getenv("ZEPHYR_BASE")
violations = {}
numViolations = 0
for f in patch_set:
if not f.path.endswith(".c") and not f.path.endswith(".h") or not os.path.exists(zephyr_base + "/" + f.path):
continue
for script in coccinelle_scripts:
script_path = os.getenv("ZEPHYR_BASE") + "/" + script
cocci = sh.coccicheck(
"--mode=report",
"--cocci=" +
script_path,
f.path,
**sh_special_args)
parse_coccinelle(cocci, violations)
for hunk in f:
for line in hunk:
if line.is_added:
violation = "{}:{}".format(f.path, line.target_line_no)
if violation in violations:
numViolations += 1
print(
"{}:{}".format(
violation, "\t\n".join(
violations[violation])))
return numViolations
if __name__ == "__main__":
ret = main()
exit(ret)
|
Apply coccinelle scripts in git diffs
|
ci: Apply coccinelle scripts in git diffs
This scripts receives the same parameter of what_changed.py. And run
coccinelle scripts for code guideline compliance in the given git
commits. e.g: ./guideline_check.py --commits origin/master..HEAD
Signed-off-by: Flavio Ceolin <979b9165500b0741b9d0500e2efd74fc1547bff7@intel.com>
|
Python
|
apache-2.0
|
zephyrproject-rtos/zephyr,finikorg/zephyr,zephyrproject-rtos/zephyr,nashif/zephyr,nashif/zephyr,galak/zephyr,finikorg/zephyr,finikorg/zephyr,zephyrproject-rtos/zephyr,Vudentz/zephyr,zephyrproject-rtos/zephyr,galak/zephyr,nashif/zephyr,Vudentz/zephyr,zephyrproject-rtos/zephyr,galak/zephyr,nashif/zephyr,galak/zephyr,Vudentz/zephyr,galak/zephyr,Vudentz/zephyr,finikorg/zephyr,Vudentz/zephyr,nashif/zephyr,Vudentz/zephyr,finikorg/zephyr
|
ci: Apply coccinelle scripts in git diffs
This scripts receives the same parameter of what_changed.py. And run
coccinelle scripts for code guideline compliance in the given git
commits. e.g: ./guideline_check.py --commits origin/master..HEAD
Signed-off-by: Flavio Ceolin <979b9165500b0741b9d0500e2efd74fc1547bff7@intel.com>
|
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2021 Intel Corporation
import os
import sh
import argparse
import re
from unidiff import PatchSet
if "ZEPHYR_BASE" not in os.environ:
exit("$ZEPHYR_BASE environment variable undefined.")
repository_path = os.environ['ZEPHYR_BASE']
sh_special_args = {
'_tty_out': False,
'_cwd': repository_path
}
coccinelle_scripts = ["/scripts/coccinelle/reserved_names.cocci",
"/scripts/coccinelle/same_identifier.cocci",
"/scripts/coccinelle/identifier_length.cocci",
]
def parse_coccinelle(contents: str, violations: dict):
reg = re.compile("([a-zA-Z0-9/]*\\.[ch]:[0-9]*)(:[0-9\\-]*: )(.*)")
for line in contents.split("\n"):
r = reg.match(line)
if r:
f = r.group(1)
if f in violations:
violations[f].append(r.group(3))
else:
violations[r.group(1)] = [r.group(3)]
def parse_args():
parser = argparse.ArgumentParser(
description="Check if change requires full twister")
parser.add_argument('-c', '--commits', default=None,
help="Commit range in the form: a..b")
return parser.parse_args()
def main():
args = parse_args()
if not args.commits:
exit("missing commit range")
commit = sh.git("diff", args.commits, **sh_special_args)
patch_set = PatchSet(commit)
zephyr_base = os.getenv("ZEPHYR_BASE")
violations = {}
numViolations = 0
for f in patch_set:
if not f.path.endswith(".c") and not f.path.endswith(".h") or not os.path.exists(zephyr_base + "/" + f.path):
continue
for script in coccinelle_scripts:
script_path = os.getenv("ZEPHYR_BASE") + "/" + script
cocci = sh.coccicheck(
"--mode=report",
"--cocci=" +
script_path,
f.path,
**sh_special_args)
parse_coccinelle(cocci, violations)
for hunk in f:
for line in hunk:
if line.is_added:
violation = "{}:{}".format(f.path, line.target_line_no)
if violation in violations:
numViolations += 1
print(
"{}:{}".format(
violation, "\t\n".join(
violations[violation])))
return numViolations
if __name__ == "__main__":
ret = main()
exit(ret)
|
<commit_before><commit_msg>ci: Apply coccinelle scripts in git diffs
This scripts receives the same parameter of what_changed.py. And run
coccinelle scripts for code guideline compliance in the given git
commits. e.g: ./guideline_check.py --commits origin/master..HEAD
Signed-off-by: Flavio Ceolin <979b9165500b0741b9d0500e2efd74fc1547bff7@intel.com><commit_after>
|
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2021 Intel Corporation
import os
import sh
import argparse
import re
from unidiff import PatchSet
if "ZEPHYR_BASE" not in os.environ:
exit("$ZEPHYR_BASE environment variable undefined.")
repository_path = os.environ['ZEPHYR_BASE']
sh_special_args = {
'_tty_out': False,
'_cwd': repository_path
}
coccinelle_scripts = ["/scripts/coccinelle/reserved_names.cocci",
"/scripts/coccinelle/same_identifier.cocci",
"/scripts/coccinelle/identifier_length.cocci",
]
def parse_coccinelle(contents: str, violations: dict):
reg = re.compile("([a-zA-Z0-9/]*\\.[ch]:[0-9]*)(:[0-9\\-]*: )(.*)")
for line in contents.split("\n"):
r = reg.match(line)
if r:
f = r.group(1)
if f in violations:
violations[f].append(r.group(3))
else:
violations[r.group(1)] = [r.group(3)]
def parse_args():
parser = argparse.ArgumentParser(
description="Check if change requires full twister")
parser.add_argument('-c', '--commits', default=None,
help="Commit range in the form: a..b")
return parser.parse_args()
def main():
args = parse_args()
if not args.commits:
exit("missing commit range")
commit = sh.git("diff", args.commits, **sh_special_args)
patch_set = PatchSet(commit)
zephyr_base = os.getenv("ZEPHYR_BASE")
violations = {}
numViolations = 0
for f in patch_set:
if not f.path.endswith(".c") and not f.path.endswith(".h") or not os.path.exists(zephyr_base + "/" + f.path):
continue
for script in coccinelle_scripts:
script_path = os.getenv("ZEPHYR_BASE") + "/" + script
cocci = sh.coccicheck(
"--mode=report",
"--cocci=" +
script_path,
f.path,
**sh_special_args)
parse_coccinelle(cocci, violations)
for hunk in f:
for line in hunk:
if line.is_added:
violation = "{}:{}".format(f.path, line.target_line_no)
if violation in violations:
numViolations += 1
print(
"{}:{}".format(
violation, "\t\n".join(
violations[violation])))
return numViolations
if __name__ == "__main__":
ret = main()
exit(ret)
|
ci: Apply coccinelle scripts in git diffs
This scripts receives the same parameter of what_changed.py. And run
coccinelle scripts for code guideline compliance in the given git
commits. e.g: ./guideline_check.py --commits origin/master..HEAD
Signed-off-by: Flavio Ceolin <979b9165500b0741b9d0500e2efd74fc1547bff7@intel.com>#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2021 Intel Corporation
import os
import sh
import argparse
import re
from unidiff import PatchSet
if "ZEPHYR_BASE" not in os.environ:
exit("$ZEPHYR_BASE environment variable undefined.")
repository_path = os.environ['ZEPHYR_BASE']
sh_special_args = {
'_tty_out': False,
'_cwd': repository_path
}
coccinelle_scripts = ["/scripts/coccinelle/reserved_names.cocci",
"/scripts/coccinelle/same_identifier.cocci",
"/scripts/coccinelle/identifier_length.cocci",
]
def parse_coccinelle(contents: str, violations: dict):
reg = re.compile("([a-zA-Z0-9/]*\\.[ch]:[0-9]*)(:[0-9\\-]*: )(.*)")
for line in contents.split("\n"):
r = reg.match(line)
if r:
f = r.group(1)
if f in violations:
violations[f].append(r.group(3))
else:
violations[r.group(1)] = [r.group(3)]
def parse_args():
parser = argparse.ArgumentParser(
description="Check if change requires full twister")
parser.add_argument('-c', '--commits', default=None,
help="Commit range in the form: a..b")
return parser.parse_args()
def main():
args = parse_args()
if not args.commits:
exit("missing commit range")
commit = sh.git("diff", args.commits, **sh_special_args)
patch_set = PatchSet(commit)
zephyr_base = os.getenv("ZEPHYR_BASE")
violations = {}
numViolations = 0
for f in patch_set:
if not f.path.endswith(".c") and not f.path.endswith(".h") or not os.path.exists(zephyr_base + "/" + f.path):
continue
for script in coccinelle_scripts:
script_path = os.getenv("ZEPHYR_BASE") + "/" + script
cocci = sh.coccicheck(
"--mode=report",
"--cocci=" +
script_path,
f.path,
**sh_special_args)
parse_coccinelle(cocci, violations)
for hunk in f:
for line in hunk:
if line.is_added:
violation = "{}:{}".format(f.path, line.target_line_no)
if violation in violations:
numViolations += 1
print(
"{}:{}".format(
violation, "\t\n".join(
violations[violation])))
return numViolations
if __name__ == "__main__":
ret = main()
exit(ret)
|
<commit_before><commit_msg>ci: Apply coccinelle scripts in git diffs
This scripts receives the same parameter of what_changed.py. And run
coccinelle scripts for code guideline compliance in the given git
commits. e.g: ./guideline_check.py --commits origin/master..HEAD
Signed-off-by: Flavio Ceolin <979b9165500b0741b9d0500e2efd74fc1547bff7@intel.com><commit_after>#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2021 Intel Corporation
import os
import sh
import argparse
import re
from unidiff import PatchSet
if "ZEPHYR_BASE" not in os.environ:
exit("$ZEPHYR_BASE environment variable undefined.")
repository_path = os.environ['ZEPHYR_BASE']
sh_special_args = {
'_tty_out': False,
'_cwd': repository_path
}
coccinelle_scripts = ["/scripts/coccinelle/reserved_names.cocci",
"/scripts/coccinelle/same_identifier.cocci",
"/scripts/coccinelle/identifier_length.cocci",
]
def parse_coccinelle(contents: str, violations: dict):
reg = re.compile("([a-zA-Z0-9/]*\\.[ch]:[0-9]*)(:[0-9\\-]*: )(.*)")
for line in contents.split("\n"):
r = reg.match(line)
if r:
f = r.group(1)
if f in violations:
violations[f].append(r.group(3))
else:
violations[r.group(1)] = [r.group(3)]
def parse_args():
parser = argparse.ArgumentParser(
description="Check if change requires full twister")
parser.add_argument('-c', '--commits', default=None,
help="Commit range in the form: a..b")
return parser.parse_args()
def main():
args = parse_args()
if not args.commits:
exit("missing commit range")
commit = sh.git("diff", args.commits, **sh_special_args)
patch_set = PatchSet(commit)
zephyr_base = os.getenv("ZEPHYR_BASE")
violations = {}
numViolations = 0
for f in patch_set:
if not f.path.endswith(".c") and not f.path.endswith(".h") or not os.path.exists(zephyr_base + "/" + f.path):
continue
for script in coccinelle_scripts:
script_path = os.getenv("ZEPHYR_BASE") + "/" + script
cocci = sh.coccicheck(
"--mode=report",
"--cocci=" +
script_path,
f.path,
**sh_special_args)
parse_coccinelle(cocci, violations)
for hunk in f:
for line in hunk:
if line.is_added:
violation = "{}:{}".format(f.path, line.target_line_no)
if violation in violations:
numViolations += 1
print(
"{}:{}".format(
violation, "\t\n".join(
violations[violation])))
return numViolations
if __name__ == "__main__":
ret = main()
exit(ret)
|
|
67f13ee9a8cf04a863f867f6468b8b46da99b47a
|
rules/mpc.py
|
rules/mpc.py
|
import xyz
import os
import shutil
class Mpc(xyz.BuildProtocol):
pkg_name = 'mpc'
deps = ['gmp', 'mpfr']
def configure(self, builder, config):
builder.host_lib_configure(config=config)
rules = Mpc()
|
Add support for MPC package
|
Add support for MPC package
|
Python
|
mit
|
BreakawayConsulting/xyz
|
Add support for MPC package
|
import xyz
import os
import shutil
class Mpc(xyz.BuildProtocol):
pkg_name = 'mpc'
deps = ['gmp', 'mpfr']
def configure(self, builder, config):
builder.host_lib_configure(config=config)
rules = Mpc()
|
<commit_before><commit_msg>Add support for MPC package<commit_after>
|
import xyz
import os
import shutil
class Mpc(xyz.BuildProtocol):
pkg_name = 'mpc'
deps = ['gmp', 'mpfr']
def configure(self, builder, config):
builder.host_lib_configure(config=config)
rules = Mpc()
|
Add support for MPC packageimport xyz
import os
import shutil
class Mpc(xyz.BuildProtocol):
pkg_name = 'mpc'
deps = ['gmp', 'mpfr']
def configure(self, builder, config):
builder.host_lib_configure(config=config)
rules = Mpc()
|
<commit_before><commit_msg>Add support for MPC package<commit_after>import xyz
import os
import shutil
class Mpc(xyz.BuildProtocol):
pkg_name = 'mpc'
deps = ['gmp', 'mpfr']
def configure(self, builder, config):
builder.host_lib_configure(config=config)
rules = Mpc()
|
|
abe11541d94a185456a79286bb9e5800c44305c7
|
vote.py
|
vote.py
|
#!/usr/bin/python
import commands
counter =0
while counter <=100 :
#alocate new Elastic IP, and get the allocation id
(stauts,output) = commands.getstatusoutput("aws ec2 allocate-address")
allocation_id = output.split('\t') [0]
#associate the allocated ip to indicated ec2 instance
(status,output) = commands.getstatusoutput("aws ec2 associate-address --instance-id i-9afe2b90 --allocation-id "+allocation_id)
#Sleep for 5 seconds
(status,output) = commands.getstatusoutput("sleep 10")
#release allocated Elastic IP
(status,output) = commands.getstatusoutput("aws ec2 release-address --allocation-id " + allocation_id)
counter +=1
print counter
|
Add one script to use AWS CLI to allocate/associate/release EIP automatically.
|
Add one script to use AWS CLI to allocate/associate/release EIP automatically.
|
Python
|
mit
|
yuecong/tools,yuecong/tools,yuecong/tools,yuecong/tools
|
Add one script to use AWS CLI to allocate/associate/release EIP automatically.
|
#!/usr/bin/python
import commands
counter =0
while counter <=100 :
#alocate new Elastic IP, and get the allocation id
(stauts,output) = commands.getstatusoutput("aws ec2 allocate-address")
allocation_id = output.split('\t') [0]
#associate the allocated ip to indicated ec2 instance
(status,output) = commands.getstatusoutput("aws ec2 associate-address --instance-id i-9afe2b90 --allocation-id "+allocation_id)
#Sleep for 5 seconds
(status,output) = commands.getstatusoutput("sleep 10")
#release allocated Elastic IP
(status,output) = commands.getstatusoutput("aws ec2 release-address --allocation-id " + allocation_id)
counter +=1
print counter
|
<commit_before><commit_msg>Add one script to use AWS CLI to allocate/associate/release EIP automatically.<commit_after>
|
#!/usr/bin/python
import commands
counter =0
while counter <=100 :
#alocate new Elastic IP, and get the allocation id
(stauts,output) = commands.getstatusoutput("aws ec2 allocate-address")
allocation_id = output.split('\t') [0]
#associate the allocated ip to indicated ec2 instance
(status,output) = commands.getstatusoutput("aws ec2 associate-address --instance-id i-9afe2b90 --allocation-id "+allocation_id)
#Sleep for 5 seconds
(status,output) = commands.getstatusoutput("sleep 10")
#release allocated Elastic IP
(status,output) = commands.getstatusoutput("aws ec2 release-address --allocation-id " + allocation_id)
counter +=1
print counter
|
Add one script to use AWS CLI to allocate/associate/release EIP automatically.#!/usr/bin/python
import commands
counter =0
while counter <=100 :
#alocate new Elastic IP, and get the allocation id
(stauts,output) = commands.getstatusoutput("aws ec2 allocate-address")
allocation_id = output.split('\t') [0]
#associate the allocated ip to indicated ec2 instance
(status,output) = commands.getstatusoutput("aws ec2 associate-address --instance-id i-9afe2b90 --allocation-id "+allocation_id)
#Sleep for 5 seconds
(status,output) = commands.getstatusoutput("sleep 10")
#release allocated Elastic IP
(status,output) = commands.getstatusoutput("aws ec2 release-address --allocation-id " + allocation_id)
counter +=1
print counter
|
<commit_before><commit_msg>Add one script to use AWS CLI to allocate/associate/release EIP automatically.<commit_after>#!/usr/bin/python
import commands
counter =0
while counter <=100 :
#alocate new Elastic IP, and get the allocation id
(stauts,output) = commands.getstatusoutput("aws ec2 allocate-address")
allocation_id = output.split('\t') [0]
#associate the allocated ip to indicated ec2 instance
(status,output) = commands.getstatusoutput("aws ec2 associate-address --instance-id i-9afe2b90 --allocation-id "+allocation_id)
#Sleep for 5 seconds
(status,output) = commands.getstatusoutput("sleep 10")
#release allocated Elastic IP
(status,output) = commands.getstatusoutput("aws ec2 release-address --allocation-id " + allocation_id)
counter +=1
print counter
|
|
8f322fd8dab9447721e6e1bfbb1d8776f66b8740
|
stats_generator.py
|
stats_generator.py
|
#!/usr/bin/env python
from datetime import date, timedelta
import redis
def perdelta(start, end, delta):
curr = start
while curr < end:
yield curr
curr += delta
r = redis.Redis('localhost', 6334, db=1)
for result in perdelta(date(2015, 03, 01), date(2015, 12, 12), timedelta(days=1)):
val = r.zcard('{}_submissions'.format(result))
print('{},{}'.format(result, val))
|
Add a script to generate stats
|
Add a script to generate stats
|
Python
|
agpl-3.0
|
CIRCL/url-abuse,CIRCL/url-abuse,CIRCL/url-abuse,CIRCL/url-abuse
|
Add a script to generate stats
|
#!/usr/bin/env python
from datetime import date, timedelta
import redis
def perdelta(start, end, delta):
curr = start
while curr < end:
yield curr
curr += delta
r = redis.Redis('localhost', 6334, db=1)
for result in perdelta(date(2015, 03, 01), date(2015, 12, 12), timedelta(days=1)):
val = r.zcard('{}_submissions'.format(result))
print('{},{}'.format(result, val))
|
<commit_before><commit_msg>Add a script to generate stats<commit_after>
|
#!/usr/bin/env python
from datetime import date, timedelta
import redis
def perdelta(start, end, delta):
curr = start
while curr < end:
yield curr
curr += delta
r = redis.Redis('localhost', 6334, db=1)
for result in perdelta(date(2015, 03, 01), date(2015, 12, 12), timedelta(days=1)):
val = r.zcard('{}_submissions'.format(result))
print('{},{}'.format(result, val))
|
Add a script to generate stats#!/usr/bin/env python
from datetime import date, timedelta
import redis
def perdelta(start, end, delta):
curr = start
while curr < end:
yield curr
curr += delta
r = redis.Redis('localhost', 6334, db=1)
for result in perdelta(date(2015, 03, 01), date(2015, 12, 12), timedelta(days=1)):
val = r.zcard('{}_submissions'.format(result))
print('{},{}'.format(result, val))
|
<commit_before><commit_msg>Add a script to generate stats<commit_after>#!/usr/bin/env python
from datetime import date, timedelta
import redis
def perdelta(start, end, delta):
curr = start
while curr < end:
yield curr
curr += delta
r = redis.Redis('localhost', 6334, db=1)
for result in perdelta(date(2015, 03, 01), date(2015, 12, 12), timedelta(days=1)):
val = r.zcard('{}_submissions'.format(result))
print('{},{}'.format(result, val))
|
|
8baf08fd22a0e66734e927607aaab9b1a0bdd7f4
|
time-complexity/time_complexity.py
|
time-complexity/time_complexity.py
|
#Comparison of different time complexities.
#####################
#constant time - O(1)
#####################
def constant(n):
result = n * n
return result
##############################
#Logarithmic time - O(log(n))
##############################
def logarithmic(n):
result = 0
while n > 1:
n // 2
result += 1
return result
###################
#Linear Time - O(n)
###################
def linear(n,A):
for i in range(n):
if A[i] == 0:
return 0
return 1
#########################
# Quadratic time - O(n^2)
#########################
def quadratic(n):
result = 0
for i in range(n):
for j in range(1, n):
result += 1
return result
|
Add time-complexity: basic python examples
|
Add time-complexity: basic python examples
|
Python
|
cc0-1.0
|
ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms
|
Add time-complexity: basic python examples
|
#Comparison of different time complexities.
#####################
#constant time - O(1)
#####################
def constant(n):
result = n * n
return result
##############################
#Logarithmic time - O(log(n))
##############################
def logarithmic(n):
result = 0
while n > 1:
n // 2
result += 1
return result
###################
#Linear Time - O(n)
###################
def linear(n,A):
for i in range(n):
if A[i] == 0:
return 0
return 1
#########################
# Quadratic time - O(n^2)
#########################
def quadratic(n):
result = 0
for i in range(n):
for j in range(1, n):
result += 1
return result
|
<commit_before><commit_msg>Add time-complexity: basic python examples<commit_after>
|
#Comparison of different time complexities.
#####################
#constant time - O(1)
#####################
def constant(n):
result = n * n
return result
##############################
#Logarithmic time - O(log(n))
##############################
def logarithmic(n):
result = 0
while n > 1:
n // 2
result += 1
return result
###################
#Linear Time - O(n)
###################
def linear(n,A):
for i in range(n):
if A[i] == 0:
return 0
return 1
#########################
# Quadratic time - O(n^2)
#########################
def quadratic(n):
result = 0
for i in range(n):
for j in range(1, n):
result += 1
return result
|
Add time-complexity: basic python examples#Comparison of different time complexities.
#####################
#constant time - O(1)
#####################
def constant(n):
result = n * n
return result
##############################
#Logarithmic time - O(log(n))
##############################
def logarithmic(n):
result = 0
while n > 1:
n // 2
result += 1
return result
###################
#Linear Time - O(n)
###################
def linear(n,A):
for i in range(n):
if A[i] == 0:
return 0
return 1
#########################
# Quadratic time - O(n^2)
#########################
def quadratic(n):
result = 0
for i in range(n):
for j in range(1, n):
result += 1
return result
|
<commit_before><commit_msg>Add time-complexity: basic python examples<commit_after>#Comparison of different time complexities.
#####################
#constant time - O(1)
#####################
def constant(n):
result = n * n
return result
##############################
#Logarithmic time - O(log(n))
##############################
def logarithmic(n):
result = 0
while n > 1:
n // 2
result += 1
return result
###################
#Linear Time - O(n)
###################
def linear(n,A):
for i in range(n):
if A[i] == 0:
return 0
return 1
#########################
# Quadratic time - O(n^2)
#########################
def quadratic(n):
result = 0
for i in range(n):
for j in range(1, n):
result += 1
return result
|
|
bc724fca4be4efa3cdcf78f9efb2da88e3dcac2c
|
tests/gengraphs.py
|
tests/gengraphs.py
|
import random
if __name__ == '__main__':
V, E = 10, 50
path = 'directed_{}_{}.yolo'.format(V, E)
title = 'YOLO_{}_{}'.format(V, E)
with open(path, 'w') as f:
f.write(title + '\n')
f.write(str(V) + '\n')
for _ in xrange(V):
rname = ''.join(map(chr, random.sample(xrange(65, 91), 5))) + '\n'
f.write(rname)
f.write(str(E) + '\n')
for _ in xrange(E):
start = random.randint(0, V-1)
end = random.randint(0, V-1)
typ = random.choice(['Road', 'Train', 'Plane'])
dist = random.uniform(100.0, 3000.0)
f.write('{} {} {} {}\n'.format(start, end, typ, dist))
|
Add a random directed graphs generator
|
Add a random directed graphs generator
|
Python
|
unlicense
|
Thooms/yolo-graphs,Thooms/yolo-graphs,Thooms/yolo-graphs
|
Add a random directed graphs generator
|
import random
if __name__ == '__main__':
V, E = 10, 50
path = 'directed_{}_{}.yolo'.format(V, E)
title = 'YOLO_{}_{}'.format(V, E)
with open(path, 'w') as f:
f.write(title + '\n')
f.write(str(V) + '\n')
for _ in xrange(V):
rname = ''.join(map(chr, random.sample(xrange(65, 91), 5))) + '\n'
f.write(rname)
f.write(str(E) + '\n')
for _ in xrange(E):
start = random.randint(0, V-1)
end = random.randint(0, V-1)
typ = random.choice(['Road', 'Train', 'Plane'])
dist = random.uniform(100.0, 3000.0)
f.write('{} {} {} {}\n'.format(start, end, typ, dist))
|
<commit_before><commit_msg>Add a random directed graphs generator<commit_after>
|
import random
if __name__ == '__main__':
V, E = 10, 50
path = 'directed_{}_{}.yolo'.format(V, E)
title = 'YOLO_{}_{}'.format(V, E)
with open(path, 'w') as f:
f.write(title + '\n')
f.write(str(V) + '\n')
for _ in xrange(V):
rname = ''.join(map(chr, random.sample(xrange(65, 91), 5))) + '\n'
f.write(rname)
f.write(str(E) + '\n')
for _ in xrange(E):
start = random.randint(0, V-1)
end = random.randint(0, V-1)
typ = random.choice(['Road', 'Train', 'Plane'])
dist = random.uniform(100.0, 3000.0)
f.write('{} {} {} {}\n'.format(start, end, typ, dist))
|
Add a random directed graphs generatorimport random
if __name__ == '__main__':
V, E = 10, 50
path = 'directed_{}_{}.yolo'.format(V, E)
title = 'YOLO_{}_{}'.format(V, E)
with open(path, 'w') as f:
f.write(title + '\n')
f.write(str(V) + '\n')
for _ in xrange(V):
rname = ''.join(map(chr, random.sample(xrange(65, 91), 5))) + '\n'
f.write(rname)
f.write(str(E) + '\n')
for _ in xrange(E):
start = random.randint(0, V-1)
end = random.randint(0, V-1)
typ = random.choice(['Road', 'Train', 'Plane'])
dist = random.uniform(100.0, 3000.0)
f.write('{} {} {} {}\n'.format(start, end, typ, dist))
|
<commit_before><commit_msg>Add a random directed graphs generator<commit_after>import random
if __name__ == '__main__':
V, E = 10, 50
path = 'directed_{}_{}.yolo'.format(V, E)
title = 'YOLO_{}_{}'.format(V, E)
with open(path, 'w') as f:
f.write(title + '\n')
f.write(str(V) + '\n')
for _ in xrange(V):
rname = ''.join(map(chr, random.sample(xrange(65, 91), 5))) + '\n'
f.write(rname)
f.write(str(E) + '\n')
for _ in xrange(E):
start = random.randint(0, V-1)
end = random.randint(0, V-1)
typ = random.choice(['Road', 'Train', 'Plane'])
dist = random.uniform(100.0, 3000.0)
f.write('{} {} {} {}\n'.format(start, end, typ, dist))
|
|
5f910c64dcea524c1e5c887e1e03cf98dcb7d885
|
scripts/migrate.py
|
scripts/migrate.py
|
import configparser
import os
import psycopg2.extras
SETTINGS_FILE_PATH = os.path.join(os.path.dirname(__file__), "../settings.ini")
config = configparser.ConfigParser()
config.read(SETTINGS_FILE_PATH)
username = config["DATABASE"]["Username"]
password = config["DATABASE"]["Password"]
name = config["DATABASE"]["Database"]
host = config["DATABASE"]["Host"]
port = config["DATABASE"]["Port"]
connection = psycopg2.connect(dbname=name, user=username, password=password, host=host, port=port)
with connection, connection.cursor(cursor_factory=psycopg2.extras.DictCursor) as cursor:
cursor.execute("SELECT * FROM topicdb.topicmap")
records = cursor.fetchall()
for record in records:
cursor.execute(
"INSERT INTO topicdb.user_topicmap (user_identifier, topicmap_identifier, user_name, owner, collaboration_mode) VALUES (%s, %s, %s, %s, %s)",
(record['user_identifier'],
record['identifier'],
'',
True,
'edit'))
|
Add user-topic maps migration script
|
Add user-topic maps migration script
|
Python
|
mit
|
brettkromkamp/topic_db
|
Add user-topic maps migration script
|
import configparser
import os
import psycopg2.extras
SETTINGS_FILE_PATH = os.path.join(os.path.dirname(__file__), "../settings.ini")
config = configparser.ConfigParser()
config.read(SETTINGS_FILE_PATH)
username = config["DATABASE"]["Username"]
password = config["DATABASE"]["Password"]
name = config["DATABASE"]["Database"]
host = config["DATABASE"]["Host"]
port = config["DATABASE"]["Port"]
connection = psycopg2.connect(dbname=name, user=username, password=password, host=host, port=port)
with connection, connection.cursor(cursor_factory=psycopg2.extras.DictCursor) as cursor:
cursor.execute("SELECT * FROM topicdb.topicmap")
records = cursor.fetchall()
for record in records:
cursor.execute(
"INSERT INTO topicdb.user_topicmap (user_identifier, topicmap_identifier, user_name, owner, collaboration_mode) VALUES (%s, %s, %s, %s, %s)",
(record['user_identifier'],
record['identifier'],
'',
True,
'edit'))
|
<commit_before><commit_msg>Add user-topic maps migration script<commit_after>
|
import configparser
import os
import psycopg2.extras
SETTINGS_FILE_PATH = os.path.join(os.path.dirname(__file__), "../settings.ini")
config = configparser.ConfigParser()
config.read(SETTINGS_FILE_PATH)
username = config["DATABASE"]["Username"]
password = config["DATABASE"]["Password"]
name = config["DATABASE"]["Database"]
host = config["DATABASE"]["Host"]
port = config["DATABASE"]["Port"]
connection = psycopg2.connect(dbname=name, user=username, password=password, host=host, port=port)
with connection, connection.cursor(cursor_factory=psycopg2.extras.DictCursor) as cursor:
cursor.execute("SELECT * FROM topicdb.topicmap")
records = cursor.fetchall()
for record in records:
cursor.execute(
"INSERT INTO topicdb.user_topicmap (user_identifier, topicmap_identifier, user_name, owner, collaboration_mode) VALUES (%s, %s, %s, %s, %s)",
(record['user_identifier'],
record['identifier'],
'',
True,
'edit'))
|
Add user-topic maps migration scriptimport configparser
import os
import psycopg2.extras
SETTINGS_FILE_PATH = os.path.join(os.path.dirname(__file__), "../settings.ini")
config = configparser.ConfigParser()
config.read(SETTINGS_FILE_PATH)
username = config["DATABASE"]["Username"]
password = config["DATABASE"]["Password"]
name = config["DATABASE"]["Database"]
host = config["DATABASE"]["Host"]
port = config["DATABASE"]["Port"]
connection = psycopg2.connect(dbname=name, user=username, password=password, host=host, port=port)
with connection, connection.cursor(cursor_factory=psycopg2.extras.DictCursor) as cursor:
cursor.execute("SELECT * FROM topicdb.topicmap")
records = cursor.fetchall()
for record in records:
cursor.execute(
"INSERT INTO topicdb.user_topicmap (user_identifier, topicmap_identifier, user_name, owner, collaboration_mode) VALUES (%s, %s, %s, %s, %s)",
(record['user_identifier'],
record['identifier'],
'',
True,
'edit'))
|
<commit_before><commit_msg>Add user-topic maps migration script<commit_after>import configparser
import os
import psycopg2.extras
SETTINGS_FILE_PATH = os.path.join(os.path.dirname(__file__), "../settings.ini")
config = configparser.ConfigParser()
config.read(SETTINGS_FILE_PATH)
username = config["DATABASE"]["Username"]
password = config["DATABASE"]["Password"]
name = config["DATABASE"]["Database"]
host = config["DATABASE"]["Host"]
port = config["DATABASE"]["Port"]
connection = psycopg2.connect(dbname=name, user=username, password=password, host=host, port=port)
with connection, connection.cursor(cursor_factory=psycopg2.extras.DictCursor) as cursor:
cursor.execute("SELECT * FROM topicdb.topicmap")
records = cursor.fetchall()
for record in records:
cursor.execute(
"INSERT INTO topicdb.user_topicmap (user_identifier, topicmap_identifier, user_name, owner, collaboration_mode) VALUES (%s, %s, %s, %s, %s)",
(record['user_identifier'],
record['identifier'],
'',
True,
'edit'))
|
|
5a1446e6bccf6b0f89f4c616df7e41072f20b160
|
scratchpad/map_test.py
|
scratchpad/map_test.py
|
#!/usr/bin/env python3
def map_range(x, in_min, in_max, out_min, out_max):
out_delta = out_max - out_min
in_delta = in_max - in_min
return (x - in_min) * out_delta / in_delta + out_min
def show_value(value):
print(map_range(value, 650, 978, 12, 18))
show_value(650)
show_value(978)
show_value(0)
show_value(1023)
|
Add sanity check for range map function
|
Add sanity check for range map function
|
Python
|
mit
|
gizmo-cda/g2x,gizmo-cda/g2x,gizmo-cda/g2x,thelonious/g2x,gizmo-cda/g2x,thelonious/g2x
|
Add sanity check for range map function
|
#!/usr/bin/env python3
def map_range(x, in_min, in_max, out_min, out_max):
out_delta = out_max - out_min
in_delta = in_max - in_min
return (x - in_min) * out_delta / in_delta + out_min
def show_value(value):
print(map_range(value, 650, 978, 12, 18))
show_value(650)
show_value(978)
show_value(0)
show_value(1023)
|
<commit_before><commit_msg>Add sanity check for range map function<commit_after>
|
#!/usr/bin/env python3
def map_range(x, in_min, in_max, out_min, out_max):
out_delta = out_max - out_min
in_delta = in_max - in_min
return (x - in_min) * out_delta / in_delta + out_min
def show_value(value):
print(map_range(value, 650, 978, 12, 18))
show_value(650)
show_value(978)
show_value(0)
show_value(1023)
|
Add sanity check for range map function#!/usr/bin/env python3
def map_range(x, in_min, in_max, out_min, out_max):
out_delta = out_max - out_min
in_delta = in_max - in_min
return (x - in_min) * out_delta / in_delta + out_min
def show_value(value):
print(map_range(value, 650, 978, 12, 18))
show_value(650)
show_value(978)
show_value(0)
show_value(1023)
|
<commit_before><commit_msg>Add sanity check for range map function<commit_after>#!/usr/bin/env python3
def map_range(x, in_min, in_max, out_min, out_max):
out_delta = out_max - out_min
in_delta = in_max - in_min
return (x - in_min) * out_delta / in_delta + out_min
def show_value(value):
print(map_range(value, 650, 978, 12, 18))
show_value(650)
show_value(978)
show_value(0)
show_value(1023)
|
|
07801e9ae8c90f9832f5436a22b0485c2312b78b
|
daphne/server.py
|
daphne/server.py
|
import logging
import time
from twisted.internet import reactor
from .http_protocol import HTTPFactory
logger = logging.getLogger(__name__)
class Server(object):
def __init__(self, channel_layer, host="127.0.0.1", port=8000, signal_handlers=True, action_logger=None):
self.channel_layer = channel_layer
self.host = host
self.port = port
self.signal_handlers = signal_handlers
self.action_logger = action_logger
def run(self):
self.factory = HTTPFactory(self.channel_layer, self.action_logger)
reactor.listenTCP(self.port, self.factory, interface=self.host)
reactor.callLater(0, self.backend_reader)
reactor.run(installSignalHandlers=self.signal_handlers)
def backend_reader(self):
"""
Run in a separate thread; reads messages from the backend.
"""
channels = self.factory.reply_channels()
# Quit if reactor is stopping
if not reactor.running:
logging.debug("Backend reader quitting due to reactor stop")
return
# Don't do anything if there's no channels to listen on
if channels:
channel, message = self.channel_layer.receive_many(channels, block=False)
if channel:
logging.debug("Server got message on %s", channel)
# Deal with the message
self.factory.dispatch_reply(channel, message)
reactor.callLater(0, self.backend_reader)
|
import logging
import time
from twisted.internet import reactor
from .http_protocol import HTTPFactory
logger = logging.getLogger(__name__)
class Server(object):
def __init__(self, channel_layer, host="127.0.0.1", port=8000, signal_handlers=True, action_logger=None):
self.channel_layer = channel_layer
self.host = host
self.port = port
self.signal_handlers = signal_handlers
self.action_logger = action_logger
def run(self):
self.factory = HTTPFactory(self.channel_layer, self.action_logger)
reactor.listenTCP(self.port, self.factory, interface=self.host)
reactor.callLater(0, self.backend_reader)
reactor.run(installSignalHandlers=self.signal_handlers)
def backend_reader(self):
"""
Run in a separate thread; reads messages from the backend.
"""
channels = self.factory.reply_channels()
delay = 0.3
# Quit if reactor is stopping
if not reactor.running:
logging.debug("Backend reader quitting due to reactor stop")
return
# Don't do anything if there's no channels to listen on
if channels:
delay = 0.05
channel, message = self.channel_layer.receive_many(channels, block=False)
if channel:
delay = 0
logging.debug("Server got message on %s", channel)
# Deal with the message
self.factory.dispatch_reply(channel, message)
reactor.callLater(delay, self.backend_reader)
|
Make daphne serving thread idle better
|
Make daphne serving thread idle better
|
Python
|
bsd-3-clause
|
django/daphne,maikhoepfel/daphne
|
import logging
import time
from twisted.internet import reactor
from .http_protocol import HTTPFactory
logger = logging.getLogger(__name__)
class Server(object):
def __init__(self, channel_layer, host="127.0.0.1", port=8000, signal_handlers=True, action_logger=None):
self.channel_layer = channel_layer
self.host = host
self.port = port
self.signal_handlers = signal_handlers
self.action_logger = action_logger
def run(self):
self.factory = HTTPFactory(self.channel_layer, self.action_logger)
reactor.listenTCP(self.port, self.factory, interface=self.host)
reactor.callLater(0, self.backend_reader)
reactor.run(installSignalHandlers=self.signal_handlers)
def backend_reader(self):
"""
Run in a separate thread; reads messages from the backend.
"""
channels = self.factory.reply_channels()
# Quit if reactor is stopping
if not reactor.running:
logging.debug("Backend reader quitting due to reactor stop")
return
# Don't do anything if there's no channels to listen on
if channels:
channel, message = self.channel_layer.receive_many(channels, block=False)
if channel:
logging.debug("Server got message on %s", channel)
# Deal with the message
self.factory.dispatch_reply(channel, message)
reactor.callLater(0, self.backend_reader)
Make daphne serving thread idle better
|
import logging
import time
from twisted.internet import reactor
from .http_protocol import HTTPFactory
logger = logging.getLogger(__name__)
class Server(object):
def __init__(self, channel_layer, host="127.0.0.1", port=8000, signal_handlers=True, action_logger=None):
self.channel_layer = channel_layer
self.host = host
self.port = port
self.signal_handlers = signal_handlers
self.action_logger = action_logger
def run(self):
self.factory = HTTPFactory(self.channel_layer, self.action_logger)
reactor.listenTCP(self.port, self.factory, interface=self.host)
reactor.callLater(0, self.backend_reader)
reactor.run(installSignalHandlers=self.signal_handlers)
def backend_reader(self):
"""
Run in a separate thread; reads messages from the backend.
"""
channels = self.factory.reply_channels()
delay = 0.3
# Quit if reactor is stopping
if not reactor.running:
logging.debug("Backend reader quitting due to reactor stop")
return
# Don't do anything if there's no channels to listen on
if channels:
delay = 0.05
channel, message = self.channel_layer.receive_many(channels, block=False)
if channel:
delay = 0
logging.debug("Server got message on %s", channel)
# Deal with the message
self.factory.dispatch_reply(channel, message)
reactor.callLater(delay, self.backend_reader)
|
<commit_before>import logging
import time
from twisted.internet import reactor
from .http_protocol import HTTPFactory
logger = logging.getLogger(__name__)
class Server(object):
def __init__(self, channel_layer, host="127.0.0.1", port=8000, signal_handlers=True, action_logger=None):
self.channel_layer = channel_layer
self.host = host
self.port = port
self.signal_handlers = signal_handlers
self.action_logger = action_logger
def run(self):
self.factory = HTTPFactory(self.channel_layer, self.action_logger)
reactor.listenTCP(self.port, self.factory, interface=self.host)
reactor.callLater(0, self.backend_reader)
reactor.run(installSignalHandlers=self.signal_handlers)
def backend_reader(self):
"""
Run in a separate thread; reads messages from the backend.
"""
channels = self.factory.reply_channels()
# Quit if reactor is stopping
if not reactor.running:
logging.debug("Backend reader quitting due to reactor stop")
return
# Don't do anything if there's no channels to listen on
if channels:
channel, message = self.channel_layer.receive_many(channels, block=False)
if channel:
logging.debug("Server got message on %s", channel)
# Deal with the message
self.factory.dispatch_reply(channel, message)
reactor.callLater(0, self.backend_reader)
<commit_msg>Make daphne serving thread idle better<commit_after>
|
import logging
import time
from twisted.internet import reactor
from .http_protocol import HTTPFactory
logger = logging.getLogger(__name__)
class Server(object):
def __init__(self, channel_layer, host="127.0.0.1", port=8000, signal_handlers=True, action_logger=None):
self.channel_layer = channel_layer
self.host = host
self.port = port
self.signal_handlers = signal_handlers
self.action_logger = action_logger
def run(self):
self.factory = HTTPFactory(self.channel_layer, self.action_logger)
reactor.listenTCP(self.port, self.factory, interface=self.host)
reactor.callLater(0, self.backend_reader)
reactor.run(installSignalHandlers=self.signal_handlers)
def backend_reader(self):
"""
Run in a separate thread; reads messages from the backend.
"""
channels = self.factory.reply_channels()
delay = 0.3
# Quit if reactor is stopping
if not reactor.running:
logging.debug("Backend reader quitting due to reactor stop")
return
# Don't do anything if there's no channels to listen on
if channels:
delay = 0.05
channel, message = self.channel_layer.receive_many(channels, block=False)
if channel:
delay = 0
logging.debug("Server got message on %s", channel)
# Deal with the message
self.factory.dispatch_reply(channel, message)
reactor.callLater(delay, self.backend_reader)
|
import logging
import time
from twisted.internet import reactor
from .http_protocol import HTTPFactory
logger = logging.getLogger(__name__)
class Server(object):
def __init__(self, channel_layer, host="127.0.0.1", port=8000, signal_handlers=True, action_logger=None):
self.channel_layer = channel_layer
self.host = host
self.port = port
self.signal_handlers = signal_handlers
self.action_logger = action_logger
def run(self):
self.factory = HTTPFactory(self.channel_layer, self.action_logger)
reactor.listenTCP(self.port, self.factory, interface=self.host)
reactor.callLater(0, self.backend_reader)
reactor.run(installSignalHandlers=self.signal_handlers)
def backend_reader(self):
"""
Run in a separate thread; reads messages from the backend.
"""
channels = self.factory.reply_channels()
# Quit if reactor is stopping
if not reactor.running:
logging.debug("Backend reader quitting due to reactor stop")
return
# Don't do anything if there's no channels to listen on
if channels:
channel, message = self.channel_layer.receive_many(channels, block=False)
if channel:
logging.debug("Server got message on %s", channel)
# Deal with the message
self.factory.dispatch_reply(channel, message)
reactor.callLater(0, self.backend_reader)
Make daphne serving thread idle betterimport logging
import time
from twisted.internet import reactor
from .http_protocol import HTTPFactory
logger = logging.getLogger(__name__)
class Server(object):
def __init__(self, channel_layer, host="127.0.0.1", port=8000, signal_handlers=True, action_logger=None):
self.channel_layer = channel_layer
self.host = host
self.port = port
self.signal_handlers = signal_handlers
self.action_logger = action_logger
def run(self):
self.factory = HTTPFactory(self.channel_layer, self.action_logger)
reactor.listenTCP(self.port, self.factory, interface=self.host)
reactor.callLater(0, self.backend_reader)
reactor.run(installSignalHandlers=self.signal_handlers)
def backend_reader(self):
"""
Run in a separate thread; reads messages from the backend.
"""
channels = self.factory.reply_channels()
delay = 0.3
# Quit if reactor is stopping
if not reactor.running:
logging.debug("Backend reader quitting due to reactor stop")
return
# Don't do anything if there's no channels to listen on
if channels:
delay = 0.05
channel, message = self.channel_layer.receive_many(channels, block=False)
if channel:
delay = 0
logging.debug("Server got message on %s", channel)
# Deal with the message
self.factory.dispatch_reply(channel, message)
reactor.callLater(delay, self.backend_reader)
|
<commit_before>import logging
import time
from twisted.internet import reactor
from .http_protocol import HTTPFactory
logger = logging.getLogger(__name__)
class Server(object):
def __init__(self, channel_layer, host="127.0.0.1", port=8000, signal_handlers=True, action_logger=None):
self.channel_layer = channel_layer
self.host = host
self.port = port
self.signal_handlers = signal_handlers
self.action_logger = action_logger
def run(self):
self.factory = HTTPFactory(self.channel_layer, self.action_logger)
reactor.listenTCP(self.port, self.factory, interface=self.host)
reactor.callLater(0, self.backend_reader)
reactor.run(installSignalHandlers=self.signal_handlers)
def backend_reader(self):
"""
Run in a separate thread; reads messages from the backend.
"""
channels = self.factory.reply_channels()
# Quit if reactor is stopping
if not reactor.running:
logging.debug("Backend reader quitting due to reactor stop")
return
# Don't do anything if there's no channels to listen on
if channels:
channel, message = self.channel_layer.receive_many(channels, block=False)
if channel:
logging.debug("Server got message on %s", channel)
# Deal with the message
self.factory.dispatch_reply(channel, message)
reactor.callLater(0, self.backend_reader)
<commit_msg>Make daphne serving thread idle better<commit_after>import logging
import time
from twisted.internet import reactor
from .http_protocol import HTTPFactory
logger = logging.getLogger(__name__)
class Server(object):
def __init__(self, channel_layer, host="127.0.0.1", port=8000, signal_handlers=True, action_logger=None):
self.channel_layer = channel_layer
self.host = host
self.port = port
self.signal_handlers = signal_handlers
self.action_logger = action_logger
def run(self):
self.factory = HTTPFactory(self.channel_layer, self.action_logger)
reactor.listenTCP(self.port, self.factory, interface=self.host)
reactor.callLater(0, self.backend_reader)
reactor.run(installSignalHandlers=self.signal_handlers)
def backend_reader(self):
"""
Run in a separate thread; reads messages from the backend.
"""
channels = self.factory.reply_channels()
delay = 0.3
# Quit if reactor is stopping
if not reactor.running:
logging.debug("Backend reader quitting due to reactor stop")
return
# Don't do anything if there's no channels to listen on
if channels:
delay = 0.05
channel, message = self.channel_layer.receive_many(channels, block=False)
if channel:
delay = 0
logging.debug("Server got message on %s", channel)
# Deal with the message
self.factory.dispatch_reply(channel, message)
reactor.callLater(delay, self.backend_reader)
|
2c025094ef9308ba2b1a5bfee224ddcaabbf8438
|
dbbot.py
|
dbbot.py
|
#!/usr/bin/env python
import sys
import optparse
import sqlite3
from datetime import datetime
from os.path import abspath, exists, join
from xml.etree import ElementTree
def main():
parser = _get_option_parser()
options = _get_validated_options(parser)
xml_tree = _get_xml_tree(options, parser)
root_attributes = _get_root_attributes(xml_tree)
db = RobotDatabase(options)
db.push(('INSERT INTO test_run (generated_at, generator) VALUES (?, ?)', root_attributes))
db.commit()
def _exit_with_help(parser, message=None):
if message:
sys.stderr.write('Error: %s\n\n' % message)
parser.print_help()
exit(1)
def _get_option_parser():
parser = optparse.OptionParser()
parser.add_option('--file', dest='file_path')
parser.add_option('--db', dest='db_file_path', default='results.db')
return parser
def _get_xml_tree(options, parser):
try:
xml_tree = ElementTree.parse(options.file_path)
except ElementTree.ParseError:
_exit_with_help(parser, 'Invalid XML file')
return xml_tree
def _get_formatted_timestamp(root_element):
generated_at = root_element.get('generated').split('.')[0]
return datetime.strptime(generated_at, '%Y%m%d %H:%M:%S')
def _get_root_attributes(xml_tree):
root_element = xml_tree.getroot()
return (
_get_formatted_timestamp(root_element),
root_element.get('generator')
)
def _get_validated_options(parser):
if len(sys.argv) < 2:
_exit_with_help(parser)
options, args = parser.parse_args()
if args:
_exit_with_help(parser)
if not exists(options.file_path):
_exit_with_help(parser, 'File not found')
return options
class RobotDatabase(object):
def __init__(self, options):
self.sql_statements = []
self.options = options
self._init_tables()
def _init_tables(self):
self.push(
'''CREATE TABLE if not exists test_run (id INTEGER PRIMARY KEY AUTOINCREMENT,
generated_at TEXT,
generator TEXT)''')
self.commit()
def push(self, *sql_statements):
for statement in sql_statements:
self.sql_statements.append(statement)
def commit(self):
connection = sqlite3.connect(self.options.db_file_path)
cursor = connection.cursor()
for statement in self.sql_statements:
if isinstance(statement, basestring):
cursor.execute(statement)
else:
cursor.execute(*statement)
connection.commit()
self.sql_statements = []
connection.close()
if __name__ == '__main__':
main()
|
Insert test run results to sqlite3 database
|
Insert test run results to sqlite3 database
|
Python
|
apache-2.0
|
robotframework/DbBot
|
Insert test run results to sqlite3 database
|
#!/usr/bin/env python
import sys
import optparse
import sqlite3
from datetime import datetime
from os.path import abspath, exists, join
from xml.etree import ElementTree
def main():
parser = _get_option_parser()
options = _get_validated_options(parser)
xml_tree = _get_xml_tree(options, parser)
root_attributes = _get_root_attributes(xml_tree)
db = RobotDatabase(options)
db.push(('INSERT INTO test_run (generated_at, generator) VALUES (?, ?)', root_attributes))
db.commit()
def _exit_with_help(parser, message=None):
if message:
sys.stderr.write('Error: %s\n\n' % message)
parser.print_help()
exit(1)
def _get_option_parser():
parser = optparse.OptionParser()
parser.add_option('--file', dest='file_path')
parser.add_option('--db', dest='db_file_path', default='results.db')
return parser
def _get_xml_tree(options, parser):
try:
xml_tree = ElementTree.parse(options.file_path)
except ElementTree.ParseError:
_exit_with_help(parser, 'Invalid XML file')
return xml_tree
def _get_formatted_timestamp(root_element):
generated_at = root_element.get('generated').split('.')[0]
return datetime.strptime(generated_at, '%Y%m%d %H:%M:%S')
def _get_root_attributes(xml_tree):
root_element = xml_tree.getroot()
return (
_get_formatted_timestamp(root_element),
root_element.get('generator')
)
def _get_validated_options(parser):
if len(sys.argv) < 2:
_exit_with_help(parser)
options, args = parser.parse_args()
if args:
_exit_with_help(parser)
if not exists(options.file_path):
_exit_with_help(parser, 'File not found')
return options
class RobotDatabase(object):
def __init__(self, options):
self.sql_statements = []
self.options = options
self._init_tables()
def _init_tables(self):
self.push(
'''CREATE TABLE if not exists test_run (id INTEGER PRIMARY KEY AUTOINCREMENT,
generated_at TEXT,
generator TEXT)''')
self.commit()
def push(self, *sql_statements):
for statement in sql_statements:
self.sql_statements.append(statement)
def commit(self):
connection = sqlite3.connect(self.options.db_file_path)
cursor = connection.cursor()
for statement in self.sql_statements:
if isinstance(statement, basestring):
cursor.execute(statement)
else:
cursor.execute(*statement)
connection.commit()
self.sql_statements = []
connection.close()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Insert test run results to sqlite3 database<commit_after>
|
#!/usr/bin/env python
import sys
import optparse
import sqlite3
from datetime import datetime
from os.path import abspath, exists, join
from xml.etree import ElementTree
def main():
parser = _get_option_parser()
options = _get_validated_options(parser)
xml_tree = _get_xml_tree(options, parser)
root_attributes = _get_root_attributes(xml_tree)
db = RobotDatabase(options)
db.push(('INSERT INTO test_run (generated_at, generator) VALUES (?, ?)', root_attributes))
db.commit()
def _exit_with_help(parser, message=None):
if message:
sys.stderr.write('Error: %s\n\n' % message)
parser.print_help()
exit(1)
def _get_option_parser():
parser = optparse.OptionParser()
parser.add_option('--file', dest='file_path')
parser.add_option('--db', dest='db_file_path', default='results.db')
return parser
def _get_xml_tree(options, parser):
try:
xml_tree = ElementTree.parse(options.file_path)
except ElementTree.ParseError:
_exit_with_help(parser, 'Invalid XML file')
return xml_tree
def _get_formatted_timestamp(root_element):
generated_at = root_element.get('generated').split('.')[0]
return datetime.strptime(generated_at, '%Y%m%d %H:%M:%S')
def _get_root_attributes(xml_tree):
root_element = xml_tree.getroot()
return (
_get_formatted_timestamp(root_element),
root_element.get('generator')
)
def _get_validated_options(parser):
if len(sys.argv) < 2:
_exit_with_help(parser)
options, args = parser.parse_args()
if args:
_exit_with_help(parser)
if not exists(options.file_path):
_exit_with_help(parser, 'File not found')
return options
class RobotDatabase(object):
def __init__(self, options):
self.sql_statements = []
self.options = options
self._init_tables()
def _init_tables(self):
self.push(
'''CREATE TABLE if not exists test_run (id INTEGER PRIMARY KEY AUTOINCREMENT,
generated_at TEXT,
generator TEXT)''')
self.commit()
def push(self, *sql_statements):
for statement in sql_statements:
self.sql_statements.append(statement)
def commit(self):
connection = sqlite3.connect(self.options.db_file_path)
cursor = connection.cursor()
for statement in self.sql_statements:
if isinstance(statement, basestring):
cursor.execute(statement)
else:
cursor.execute(*statement)
connection.commit()
self.sql_statements = []
connection.close()
if __name__ == '__main__':
main()
|
Insert test run results to sqlite3 database#!/usr/bin/env python
import sys
import optparse
import sqlite3
from datetime import datetime
from os.path import abspath, exists, join
from xml.etree import ElementTree
def main():
parser = _get_option_parser()
options = _get_validated_options(parser)
xml_tree = _get_xml_tree(options, parser)
root_attributes = _get_root_attributes(xml_tree)
db = RobotDatabase(options)
db.push(('INSERT INTO test_run (generated_at, generator) VALUES (?, ?)', root_attributes))
db.commit()
def _exit_with_help(parser, message=None):
if message:
sys.stderr.write('Error: %s\n\n' % message)
parser.print_help()
exit(1)
def _get_option_parser():
parser = optparse.OptionParser()
parser.add_option('--file', dest='file_path')
parser.add_option('--db', dest='db_file_path', default='results.db')
return parser
def _get_xml_tree(options, parser):
try:
xml_tree = ElementTree.parse(options.file_path)
except ElementTree.ParseError:
_exit_with_help(parser, 'Invalid XML file')
return xml_tree
def _get_formatted_timestamp(root_element):
generated_at = root_element.get('generated').split('.')[0]
return datetime.strptime(generated_at, '%Y%m%d %H:%M:%S')
def _get_root_attributes(xml_tree):
root_element = xml_tree.getroot()
return (
_get_formatted_timestamp(root_element),
root_element.get('generator')
)
def _get_validated_options(parser):
if len(sys.argv) < 2:
_exit_with_help(parser)
options, args = parser.parse_args()
if args:
_exit_with_help(parser)
if not exists(options.file_path):
_exit_with_help(parser, 'File not found')
return options
class RobotDatabase(object):
def __init__(self, options):
self.sql_statements = []
self.options = options
self._init_tables()
def _init_tables(self):
self.push(
'''CREATE TABLE if not exists test_run (id INTEGER PRIMARY KEY AUTOINCREMENT,
generated_at TEXT,
generator TEXT)''')
self.commit()
def push(self, *sql_statements):
for statement in sql_statements:
self.sql_statements.append(statement)
def commit(self):
connection = sqlite3.connect(self.options.db_file_path)
cursor = connection.cursor()
for statement in self.sql_statements:
if isinstance(statement, basestring):
cursor.execute(statement)
else:
cursor.execute(*statement)
connection.commit()
self.sql_statements = []
connection.close()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Insert test run results to sqlite3 database<commit_after>#!/usr/bin/env python
import sys
import optparse
import sqlite3
from datetime import datetime
from os.path import abspath, exists, join
from xml.etree import ElementTree
def main():
parser = _get_option_parser()
options = _get_validated_options(parser)
xml_tree = _get_xml_tree(options, parser)
root_attributes = _get_root_attributes(xml_tree)
db = RobotDatabase(options)
db.push(('INSERT INTO test_run (generated_at, generator) VALUES (?, ?)', root_attributes))
db.commit()
def _exit_with_help(parser, message=None):
if message:
sys.stderr.write('Error: %s\n\n' % message)
parser.print_help()
exit(1)
def _get_option_parser():
parser = optparse.OptionParser()
parser.add_option('--file', dest='file_path')
parser.add_option('--db', dest='db_file_path', default='results.db')
return parser
def _get_xml_tree(options, parser):
try:
xml_tree = ElementTree.parse(options.file_path)
except ElementTree.ParseError:
_exit_with_help(parser, 'Invalid XML file')
return xml_tree
def _get_formatted_timestamp(root_element):
generated_at = root_element.get('generated').split('.')[0]
return datetime.strptime(generated_at, '%Y%m%d %H:%M:%S')
def _get_root_attributes(xml_tree):
root_element = xml_tree.getroot()
return (
_get_formatted_timestamp(root_element),
root_element.get('generator')
)
def _get_validated_options(parser):
if len(sys.argv) < 2:
_exit_with_help(parser)
options, args = parser.parse_args()
if args:
_exit_with_help(parser)
if not exists(options.file_path):
_exit_with_help(parser, 'File not found')
return options
class RobotDatabase(object):
def __init__(self, options):
self.sql_statements = []
self.options = options
self._init_tables()
def _init_tables(self):
self.push(
'''CREATE TABLE if not exists test_run (id INTEGER PRIMARY KEY AUTOINCREMENT,
generated_at TEXT,
generator TEXT)''')
self.commit()
def push(self, *sql_statements):
for statement in sql_statements:
self.sql_statements.append(statement)
def commit(self):
connection = sqlite3.connect(self.options.db_file_path)
cursor = connection.cursor()
for statement in self.sql_statements:
if isinstance(statement, basestring):
cursor.execute(statement)
else:
cursor.execute(*statement)
connection.commit()
self.sql_statements = []
connection.close()
if __name__ == '__main__':
main()
|
|
deb816d30c5accaa8496c6e8f6f491e0a25aefc7
|
erpnext/patches/remove_duplicate_table_mapper_detail.py
|
erpnext/patches/remove_duplicate_table_mapper_detail.py
|
"""
Removes duplicate entries created in
"""
import webnotes
def execute():
res = webnotes.conn.sql("""\
SELECT a.name
FROM
`tabTable Mapper Detail` a,
`tabTable Mapper Detail` b
WHERE
a.parent = b.parent AND
a.from_table = b.from_table AND
a.to_table = b.to_table AND
a.from_field = b.from_field AND
a.to_field = b.to_field AND
a.name < b.name""")
if res and len(res)>0:
name_string = ", ".join(["'" + str(r[0]) + "'" for r in res])
res = webnotes.conn.sql("""\
DELETE FROM `tabTable Mapper Detail`
WHERE name IN (%s)""" % name_string)
|
Patch to remove duplicate entries created due to change in validation_logic in Table Mapper Detail for the following doctypes: * Delivery Note-Receivable Voucher * Purchase Order-Purchase Voucher * Sales Order-Receivable Voucher
|
Patch to remove duplicate entries created due to change in
validation_logic in Table Mapper Detail for the following doctypes:
* Delivery Note-Receivable Voucher
* Purchase Order-Purchase Voucher
* Sales Order-Receivable Voucher
|
Python
|
agpl-3.0
|
gangadhar-kadam/verve_erp,anandpdoshi/erpnext,indictranstech/phrerp,MartinEnder/erpnext-de,gangadharkadam/sher,saurabh6790/omni-apps,indictranstech/vestasi-erpnext,hatwar/focal-erpnext,gangadhar-kadam/latestchurcherp,gmarke/erpnext,hatwar/Das_erpnext,hatwar/focal-erpnext,gangadharkadam/johnerp,gangadharkadam/saloon_erp,indictranstech/phrerp,saurabh6790/medsyn-app1,saurabh6790/medsynaptic-app,hatwar/buyback-erpnext,suyashphadtare/test,gangadharkadam/contributionerp,gangadhar-kadam/powapp,meisterkleister/erpnext,saurabh6790/ON-RISAPP,mbauskar/omnitech-demo-erpnext,hanselke/erpnext-1,saurabh6790/omnit-app,Tejal011089/trufil-erpnext,gangadhar-kadam/church-erpnext,Tejal011089/fbd_erpnext,gsnbng/erpnext,gangadhar-kadam/powapp,saurabh6790/omn-app,hatwar/Das_erpnext,gangadharkadam/tailorerp,gangadhar-kadam/laganerp,gangadharkadam/verveerp,gsnbng/erpnext,indictranstech/Das_Erpnext,Tejal011089/paypal_erpnext,ThiagoGarciaAlves/erpnext,indictranstech/osmosis-erpnext,Tejal011089/huntercamp_erpnext,gangadharkadam/vlinkerp,njmube/erpnext,Tejal011089/Medsyn2_app,gangadharkadam/v5_erp,indictranstech/erpnext,Drooids/erpnext,aruizramon/alec_erpnext,mbauskar/omnitech-demo-erpnext,Tejal011089/fbd_erpnext,tmimori/erpnext,suyashphadtare/sajil-erp,saurabh6790/ON-RISAPP,saurabh6790/test-med-app,suyashphadtare/vestasi-erp-final,gangadharkadam/v4_erp,pombredanne/erpnext,gmarke/erpnext,treejames/erpnext,mahabuber/erpnext,saurabh6790/test-erp,gangadhar-kadam/mtn-erpnext,gangadharkadam/v6_erp,Tejal011089/Medsyn2_app,shft117/SteckerApp,hernad/erpnext,indictranstech/buyback-erp,gangadharkadam/saloon_erp,SPKian/Testing,gangadharkadam/v4_erp,Yellowen/Owrang,indictranstech/biggift-erpnext,gangadhar-kadam/mic-erpnext,indictranstech/erpnext,saurabh6790/OFF-RISAPP,gangadharkadam/v5_erp,rohitwaghchaure/digitales_erpnext,gangadhar-kadam/smrterp,indictranstech/fbd_erpnext,gangadhar-kadam/smrterp,meisterkleister/erpnext,gangadhar-kadam/helpdesk-erpnext,suyashphadtare/gd-erp,saurabh6790/med_new_app,ThiagoGarciaAlves/erpnext,Tejal011089/med2-app,gangadharkadam/tailorerp,ShashaQin/erpnext,hatwar/buyback-erpnext,SPKian/Testing,gangadharkadam/contributionerp,gangadhar-kadam/sapphire_app,gangadharkadam/saloon_erp_install,gangadharkadam/saloon_erp_install,gangadhar-kadam/verve_live_erp,Tejal011089/fbd_erpnext,rohitwaghchaure/New_Theme_Erp,pombredanne/erpnext,saurabh6790/test-erp,indictranstech/focal-erpnext,Tejal011089/trufil-erpnext,sagar30051991/ozsmart-erp,gangadhar-kadam/latestchurcherp,indictranstech/trufil-erpnext,sheafferusa/erpnext,hatwar/Das_erpnext,tmimori/erpnext,saurabh6790/OFF-RISAPP,rohitwaghchaure/erpnext-receipher,fuhongliang/erpnext,shitolepriya/test-erp,mbauskar/alec_frappe5_erpnext,saurabh6790/omni-apps,gangadhar-kadam/sapphire_app,indictranstech/tele-erpnext,saurabh6790/aimobilize-app-backup,gangadharkadam/sher,gangadhar-kadam/verve_erp,Tejal011089/paypal_erpnext,gangadharkadam/letzerp,sheafferusa/erpnext,hanselke/erpnext-1,rohitwaghchaure/New_Theme_Erp,saurabh6790/pow-app,susuchina/ERPNEXT,suyashphadtare/vestasi-update-erp,gangadharkadam/v6_erp,4commerce-technologies-AG/erpnext,indictranstech/internal-erpnext,saurabh6790/test_final_med_app,Yellowen/Owrang,gangadharkadam/v5_erp,hatwar/Das_erpnext,saurabh6790/alert-med-app,Tejal011089/trufil-erpnext,indictranstech/Das_Erpnext,gmarke/erpnext,suyashphadtare/gd-erp,indictranstech/vestasi-erpnext,rohitwaghchaure/erpnext_smart,gangadhar-kadam/latestchurcherp,netfirms/erpnext,SPKian/Testing,suyashphadtare/vestasi-erp-jan-end,mbauskar/omnitech-erpnext,mbauskar/phrerp,hernad/erpnext,indictranstech/fbd_erpnext,gangadhar-kadam/mtn-erpnext,mbauskar/helpdesk-erpnext,gangadhar-kadam/verve_erp,susuchina/ERPNEXT,gangadharkadam/letzerp,gangadharkadam/vlinkerp,indictranstech/vestasi-erpnext,fuhongliang/erpnext,gangadharkadam/johnerp,njmube/erpnext,suyashphadtare/vestasi-erp-jan-end,fuhongliang/erpnext,saurabh6790/test-erp,indictranstech/vestasi-erpnext,mbauskar/sapphire-erpnext,saurabh6790/medsyn-app,gangadharkadam/office_erp,Tejal011089/osmosis_erpnext,suyashphadtare/vestasi-update-erp,Tejal011089/fbd_erpnext,rohitwaghchaure/erpnext-receipher,suyashphadtare/sajil-final-erp,susuchina/ERPNEXT,Tejal011089/paypal_erpnext,ShashaQin/erpnext,gangadharkadam/vlinkerp,gangadhar-kadam/sms-erpnext,saurabh6790/medsyn-app,rohitwaghchaure/GenieManager-erpnext,anandpdoshi/erpnext,indictranstech/biggift-erpnext,indictranstech/trufil-erpnext,gangadhar-kadam/adb-erp,gangadharkadam/saloon_erp,dieface/erpnext,indictranstech/tele-erpnext,saurabh6790/omnisys-app,indictranstech/internal-erpnext,gangadharkadam/letzerp,saurabh6790/medapp,rohitwaghchaure/digitales_erpnext,Tejal011089/huntercamp_erpnext,Tejal011089/digitales_erpnext,sagar30051991/ozsmart-erp,mbauskar/omnitech-erpnext,Suninus/erpnext,gangadhar-kadam/laganerp,mbauskar/phrerp,saurabh6790/medsynaptic1-app,aruizramon/alec_erpnext,gangadhar-kadam/verve_test_erp,mahabuber/erpnext,mbauskar/Das_Erpnext,mbauskar/internal-hr,indictranstech/fbd_erpnext,Drooids/erpnext,Suninus/erpnext,gangadhar-kadam/mic-erpnext,indictranstech/reciphergroup-erpnext,Tejal011089/paypal_erpnext,suyashphadtare/sajil-final-erp,pawaranand/phrerp,indictranstech/Das_Erpnext,netfirms/erpnext,rohitwaghchaure/erpnext_smart,gangadharkadam/office_erp,mbauskar/alec_frappe5_erpnext,gangadhar-kadam/helpdesk-erpnext,gangadhar-kadam/hrerp,gangadhar-kadam/verve_live_erp,fuhongliang/erpnext,aruizramon/alec_erpnext,mbauskar/helpdesk-erpnext,saurabh6790/pow-app,SPKian/Testing2,mbauskar/alec_frappe5_erpnext,geekroot/erpnext,gmarke/erpnext,saurabh6790/aimobilize-app-backup,gangadharkadam/vlinkerp,mbauskar/alec_frappe5_erpnext,mbauskar/omnitech-erpnext,saurabh6790/test-erp,netfirms/erpnext,suyashphadtare/test,saurabh6790/medsyn-app1,gangadhar-kadam/verve_test_erp,indictranstech/buyback-erp,aruizramon/alec_erpnext,indictranstech/phrerp,gangadhar-kadam/verve-erp,shft117/SteckerApp,pombredanne/erpnext,ThiagoGarciaAlves/erpnext,suyashphadtare/vestasi-erp-jan-end,gangadhar-kadam/helpdesk-erpnext,gangadhar-kadam/verve_test_erp,gangadhar-kadam/adb-erp,Drooids/erpnext,mahabuber/erpnext,saurabh6790/omn-app,saurabh6790/trufil_app,gangadhar-kadam/sapphire_app,suyashphadtare/vestasi-erp-1,gangadharkadam/saloon_erp,saurabh6790/aimobilize,gangadharkadam/smrterp,geekroot/erpnext,sheafferusa/erpnext,4commerce-technologies-AG/erpnext,shitolepriya/test-erp,rohitwaghchaure/New_Theme_Erp,Tejal011089/digitales_erpnext,rohitwaghchaure/erpnext-receipher,suyashphadtare/test,indictranstech/reciphergroup-erpnext,anandpdoshi/erpnext,sagar30051991/ozsmart-erp,pawaranand/phrerp,suyashphadtare/vestasi-erp-1,mbauskar/phrerp,indictranstech/Das_Erpnext,saurabh6790/test-med-app,dieface/erpnext,suyashphadtare/vestasi-erp-1,shitolepriya/test-erp,saurabh6790/med_app_rels,indictranstech/osmosis-erpnext,hatwar/buyback-erpnext,Tejal011089/osmosis_erpnext,gangadharkadam/verveerp,gangadharkadam/letzerp,shft117/SteckerApp,Tejal011089/digitales_erpnext,gangadhar-kadam/laganerp,Tejal011089/osmosis_erpnext,indictranstech/focal-erpnext,mbauskar/omnitech-erpnext,SPKian/Testing2,suyashphadtare/gd-erp,susuchina/ERPNEXT,gangadharkadam/contributionerp,mbauskar/helpdesk-erpnext,ShashaQin/erpnext,Suninus/erpnext,mbauskar/sapphire-erpnext,mbauskar/omnitech-demo-erpnext,gangadharkadam/office_erp,indictranstech/biggift-erpnext,BhupeshGupta/erpnext,indictranstech/erpnext,saurabh6790/medsynaptic-app,MartinEnder/erpnext-de,suyashphadtare/vestasi-erp-jan-end,gangadhar-kadam/verve_live_erp,gangadharkadam/smrterp,gangadhar-kadam/powapp,njmube/erpnext,Tejal011089/med2-app,gangadharkadam/verveerp,indictranstech/reciphergroup-erpnext,dieface/erpnext,indictranstech/fbd_erpnext,netfirms/erpnext,hatwar/buyback-erpnext,saurabh6790/trufil_app,geekroot/erpnext,pawaranand/phrerp,mahabuber/erpnext,gangadhar-kadam/hrerp,shitolepriya/test-erp,gangadhar-kadam/verve_erp,SPKian/Testing2,tmimori/erpnext,mbauskar/phrerp,gangadhar-kadam/prjapp,indictranstech/focal-erpnext,rohitwaghchaure/GenieManager-erpnext,hanselke/erpnext-1,gsnbng/erpnext,saurabh6790/tru_app_back,gangadhar-kadam/sms-erpnext,indictranstech/osmosis-erpnext,ThiagoGarciaAlves/erpnext,saurabh6790/omnitech-apps,ShashaQin/erpnext,rohitwaghchaure/digitales_erpnext,gangadhar-kadam/prjapp,gangadhar-kadam/verve_test_erp,4commerce-technologies-AG/erpnext,gangadhar-kadam/nassimapp,treejames/erpnext,mbauskar/Das_Erpnext,dieface/erpnext,MartinEnder/erpnext-de,sagar30051991/ozsmart-erp,mbauskar/Das_Erpnext,suyashphadtare/sajil-erp,mbauskar/helpdesk-erpnext,indictranstech/tele-erpnext,indictranstech/erpnext,shft117/SteckerApp,saurabh6790/med_app_rels,gangadharkadam/v5_erp,gangadharkadam/contributionerp,saurabh6790/omnit-app,Tejal011089/digitales_erpnext,saurabh6790/test_final_med_app,indictranstech/tele-erpnext,suyashphadtare/vestasi-update-erp,gangadharkadam/v4_erp,gangadhar-kadam/nassimapp,hanselke/erpnext-1,mbauskar/omnitech-demo-erpnext,sheafferusa/erpnext,indictranstech/biggift-erpnext,BhupeshGupta/erpnext,Tejal011089/trufil-erpnext,mbauskar/internal-hr,Tejal011089/osmosis_erpnext,indictranstech/phrerp,mbauskar/sapphire-erpnext,BhupeshGupta/erpnext,suyashphadtare/gd-erp,SPKian/Testing2,gangadharkadam/v6_erp,saurabh6790/medapp,gangadharkadam/saloon_erp_install,SPKian/Testing,suyashphadtare/sajil-final-erp,MartinEnder/erpnext-de,indictranstech/focal-erpnext,indictranstech/buyback-erp,meisterkleister/erpnext,hatwar/focal-erpnext,saurabh6790/aimobilize,gangadharkadam/verveerp,mbauskar/internal-hr,rohitwaghchaure/erpnext-receipher,Drooids/erpnext,gangadharkadam/v4_erp,meisterkleister/erpnext,gangadhar-kadam/church-erpnext,suyashphadtare/vestasi-erp-final,saurabh6790/medsynaptic1-app,mbauskar/Das_Erpnext,gangadhar-kadam/helpdesk-erpnext,rohitwaghchaure/GenieManager-erpnext,suyashphadtare/sajil-erp,treejames/erpnext,saurabh6790/tru_app_back,gangadharkadam/sterp,indictranstech/osmosis-erpnext,gangadhar-kadam/verve_live_erp,Tejal011089/huntercamp_erpnext,rohitwaghchaure/erpnext_smart,Aptitudetech/ERPNext,tmimori/erpnext,saurabh6790/alert-med-app,pombredanne/erpnext,hatwar/focal-erpnext,gangadharkadam/saloon_erp_install,anandpdoshi/erpnext,gangadhar-kadam/latestchurcherp,gangadharkadam/sterp,indictranstech/buyback-erp,saurabh6790/omnitech-apps,suyashphadtare/vestasi-erp-final,hernad/erpnext,gangadhar-kadam/verve-erp,njmube/erpnext,rohitwaghchaure/GenieManager-erpnext,indictranstech/internal-erpnext,saurabh6790/omnisys-app,pawaranand/phrerp,hernad/erpnext,indictranstech/internal-erpnext,indictranstech/trufil-erpnext,gsnbng/erpnext,gangadhar-kadam/verve-erp,BhupeshGupta/erpnext,treejames/erpnext,indictranstech/reciphergroup-erpnext,rohitwaghchaure/digitales_erpnext,Tejal011089/huntercamp_erpnext,rohitwaghchaure/New_Theme_Erp,gangadharkadam/v6_erp,indictranstech/trufil-erpnext,Suninus/erpnext,mbauskar/sapphire-erpnext,saurabh6790/med_new_app,geekroot/erpnext
|
Patch to remove duplicate entries created due to change in
validation_logic in Table Mapper Detail for the following doctypes:
* Delivery Note-Receivable Voucher
* Purchase Order-Purchase Voucher
* Sales Order-Receivable Voucher
|
"""
Removes duplicate entries created in
"""
import webnotes
def execute():
res = webnotes.conn.sql("""\
SELECT a.name
FROM
`tabTable Mapper Detail` a,
`tabTable Mapper Detail` b
WHERE
a.parent = b.parent AND
a.from_table = b.from_table AND
a.to_table = b.to_table AND
a.from_field = b.from_field AND
a.to_field = b.to_field AND
a.name < b.name""")
if res and len(res)>0:
name_string = ", ".join(["'" + str(r[0]) + "'" for r in res])
res = webnotes.conn.sql("""\
DELETE FROM `tabTable Mapper Detail`
WHERE name IN (%s)""" % name_string)
|
<commit_before><commit_msg>Patch to remove duplicate entries created due to change in
validation_logic in Table Mapper Detail for the following doctypes:
* Delivery Note-Receivable Voucher
* Purchase Order-Purchase Voucher
* Sales Order-Receivable Voucher<commit_after>
|
"""
Removes duplicate entries created in
"""
import webnotes
def execute():
res = webnotes.conn.sql("""\
SELECT a.name
FROM
`tabTable Mapper Detail` a,
`tabTable Mapper Detail` b
WHERE
a.parent = b.parent AND
a.from_table = b.from_table AND
a.to_table = b.to_table AND
a.from_field = b.from_field AND
a.to_field = b.to_field AND
a.name < b.name""")
if res and len(res)>0:
name_string = ", ".join(["'" + str(r[0]) + "'" for r in res])
res = webnotes.conn.sql("""\
DELETE FROM `tabTable Mapper Detail`
WHERE name IN (%s)""" % name_string)
|
Patch to remove duplicate entries created due to change in
validation_logic in Table Mapper Detail for the following doctypes:
* Delivery Note-Receivable Voucher
* Purchase Order-Purchase Voucher
* Sales Order-Receivable Voucher"""
Removes duplicate entries created in
"""
import webnotes
def execute():
res = webnotes.conn.sql("""\
SELECT a.name
FROM
`tabTable Mapper Detail` a,
`tabTable Mapper Detail` b
WHERE
a.parent = b.parent AND
a.from_table = b.from_table AND
a.to_table = b.to_table AND
a.from_field = b.from_field AND
a.to_field = b.to_field AND
a.name < b.name""")
if res and len(res)>0:
name_string = ", ".join(["'" + str(r[0]) + "'" for r in res])
res = webnotes.conn.sql("""\
DELETE FROM `tabTable Mapper Detail`
WHERE name IN (%s)""" % name_string)
|
<commit_before><commit_msg>Patch to remove duplicate entries created due to change in
validation_logic in Table Mapper Detail for the following doctypes:
* Delivery Note-Receivable Voucher
* Purchase Order-Purchase Voucher
* Sales Order-Receivable Voucher<commit_after>"""
Removes duplicate entries created in
"""
import webnotes
def execute():
res = webnotes.conn.sql("""\
SELECT a.name
FROM
`tabTable Mapper Detail` a,
`tabTable Mapper Detail` b
WHERE
a.parent = b.parent AND
a.from_table = b.from_table AND
a.to_table = b.to_table AND
a.from_field = b.from_field AND
a.to_field = b.to_field AND
a.name < b.name""")
if res and len(res)>0:
name_string = ", ".join(["'" + str(r[0]) + "'" for r in res])
res = webnotes.conn.sql("""\
DELETE FROM `tabTable Mapper Detail`
WHERE name IN (%s)""" % name_string)
|
|
8f246f28809025ad18f4910d75f703d37ec31b11
|
examples/write_once.py
|
examples/write_once.py
|
"""An example of writing an API to scrapper hacker news once, and then enabling usage everywhere"""
import hug
import requests
@hug.local()
@hug.cli()
@hug.get()
def top_post(section:hug.types.one_of(('news', 'newest', 'show'))='news'):
"""Returns the top post from the provided section"""
content = requests.get('https://news.ycombinator.com/{0}'.format(section)).content
text = content.decode('utf-8')
return text.split('<tr class=\'athing\'>')[1].split("<a href")[1].split(">")[1].split("<")[0]
|
Add example of a write once API
|
Add example of a write once API
|
Python
|
mit
|
MuhammadAlkarouri/hug,timothycrosley/hug,timothycrosley/hug,MuhammadAlkarouri/hug,MuhammadAlkarouri/hug,timothycrosley/hug
|
Add example of a write once API
|
"""An example of writing an API to scrapper hacker news once, and then enabling usage everywhere"""
import hug
import requests
@hug.local()
@hug.cli()
@hug.get()
def top_post(section:hug.types.one_of(('news', 'newest', 'show'))='news'):
"""Returns the top post from the provided section"""
content = requests.get('https://news.ycombinator.com/{0}'.format(section)).content
text = content.decode('utf-8')
return text.split('<tr class=\'athing\'>')[1].split("<a href")[1].split(">")[1].split("<")[0]
|
<commit_before><commit_msg>Add example of a write once API<commit_after>
|
"""An example of writing an API to scrapper hacker news once, and then enabling usage everywhere"""
import hug
import requests
@hug.local()
@hug.cli()
@hug.get()
def top_post(section:hug.types.one_of(('news', 'newest', 'show'))='news'):
"""Returns the top post from the provided section"""
content = requests.get('https://news.ycombinator.com/{0}'.format(section)).content
text = content.decode('utf-8')
return text.split('<tr class=\'athing\'>')[1].split("<a href")[1].split(">")[1].split("<")[0]
|
Add example of a write once API"""An example of writing an API to scrapper hacker news once, and then enabling usage everywhere"""
import hug
import requests
@hug.local()
@hug.cli()
@hug.get()
def top_post(section:hug.types.one_of(('news', 'newest', 'show'))='news'):
"""Returns the top post from the provided section"""
content = requests.get('https://news.ycombinator.com/{0}'.format(section)).content
text = content.decode('utf-8')
return text.split('<tr class=\'athing\'>')[1].split("<a href")[1].split(">")[1].split("<")[0]
|
<commit_before><commit_msg>Add example of a write once API<commit_after>"""An example of writing an API to scrapper hacker news once, and then enabling usage everywhere"""
import hug
import requests
@hug.local()
@hug.cli()
@hug.get()
def top_post(section:hug.types.one_of(('news', 'newest', 'show'))='news'):
"""Returns the top post from the provided section"""
content = requests.get('https://news.ycombinator.com/{0}'.format(section)).content
text = content.decode('utf-8')
return text.split('<tr class=\'athing\'>')[1].split("<a href")[1].split(">")[1].split("<")[0]
|
|
4a9cb81075a5b8821cbc81ebee635db1ebcc769d
|
experiments/someimp.py
|
experiments/someimp.py
|
#!/usr/bin/env python3
import threading
from time import sleep
def mymap(func, elements):
return [func(ele) for ele in elements]
assert mymap(lambda x: x*x, [1, 2, 3, 5]) == [1, 4, 9, 25]
def dbounce(func, wait):
timeout = None
def exec():
nonlocal timeout
if timeout:
timeout.cancel()
timeout = threading.Timer(wait, func)
timeout.start()
return exec
def log():
print("log called")
f = dbounce(log, 5)
f()
f()
sleep(4)
f()
f()
sleep(8)
|
Implement JS functions in Python
|
Implement JS functions in Python
|
Python
|
unlicense
|
fleith/coding,fleith/coding,fleith/coding
|
Implement JS functions in Python
|
#!/usr/bin/env python3
import threading
from time import sleep
def mymap(func, elements):
return [func(ele) for ele in elements]
assert mymap(lambda x: x*x, [1, 2, 3, 5]) == [1, 4, 9, 25]
def dbounce(func, wait):
timeout = None
def exec():
nonlocal timeout
if timeout:
timeout.cancel()
timeout = threading.Timer(wait, func)
timeout.start()
return exec
def log():
print("log called")
f = dbounce(log, 5)
f()
f()
sleep(4)
f()
f()
sleep(8)
|
<commit_before><commit_msg>Implement JS functions in Python<commit_after>
|
#!/usr/bin/env python3
import threading
from time import sleep
def mymap(func, elements):
return [func(ele) for ele in elements]
assert mymap(lambda x: x*x, [1, 2, 3, 5]) == [1, 4, 9, 25]
def dbounce(func, wait):
timeout = None
def exec():
nonlocal timeout
if timeout:
timeout.cancel()
timeout = threading.Timer(wait, func)
timeout.start()
return exec
def log():
print("log called")
f = dbounce(log, 5)
f()
f()
sleep(4)
f()
f()
sleep(8)
|
Implement JS functions in Python#!/usr/bin/env python3
import threading
from time import sleep
def mymap(func, elements):
return [func(ele) for ele in elements]
assert mymap(lambda x: x*x, [1, 2, 3, 5]) == [1, 4, 9, 25]
def dbounce(func, wait):
timeout = None
def exec():
nonlocal timeout
if timeout:
timeout.cancel()
timeout = threading.Timer(wait, func)
timeout.start()
return exec
def log():
print("log called")
f = dbounce(log, 5)
f()
f()
sleep(4)
f()
f()
sleep(8)
|
<commit_before><commit_msg>Implement JS functions in Python<commit_after>#!/usr/bin/env python3
import threading
from time import sleep
def mymap(func, elements):
return [func(ele) for ele in elements]
assert mymap(lambda x: x*x, [1, 2, 3, 5]) == [1, 4, 9, 25]
def dbounce(func, wait):
timeout = None
def exec():
nonlocal timeout
if timeout:
timeout.cancel()
timeout = threading.Timer(wait, func)
timeout.start()
return exec
def log():
print("log called")
f = dbounce(log, 5)
f()
f()
sleep(4)
f()
f()
sleep(8)
|
|
c661d22facbc35b633213b1591bd3aef676ab634
|
tests/test_spinsolve.py
|
tests/test_spinsolve.py
|
""" Tests for the fileio.spinsolve submodule """
import nmrglue as ng
from pathlib import Path
from setup import DATA_DIR
def test_acqu():
""" read nmr_fid.dx """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "nmr_fid.dx")
assert dic["acqu"]["Sample"] == "EtOH"
assert dic["acqu"]["Solvent"] == "None"
def test_jcamp_dx():
""" read nmr_fid.dx """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "nmr_fid.dx")
assert data.size == 32768
assert data.shape == (32768,)
assert "Magritek Spinsolve" in dic["dx"]["_comments"][0]
def test_data1d():
""" read nmr_fid.dx """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "data.1d")
assert dic["spectrum"]["xDim"] == 32768
assert len(dic["spectrum"]["xaxis"]) == 32768
assert data.size == 32768
assert data.shape == (32768,)
def test_guess_acqu():
""" guess_udic based on acqu dictionary """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "nmr_fid.dx")
udic = ng.spinsolve.guess_udic(dic, data)
assert udic[0]["sw"] == 5000
assert 43.49 < udic[0]["obs"] < 43.50
assert 206 < udic[0]["car"] < 207
assert udic[0]["size"] == 32768
assert udic[0]["label"] == "1H"
def test_guess_jcamp_dx():
""" guess_udic based on dx dictionary """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "nmr_fid.dx")
# Drop acqu dict that would be used as default
dic["acqu"] = {}
udic = ng.spinsolve.guess_udic(dic, data)
assert 4999 < udic[0]["sw"] < 5001
assert 43.49 < udic[0]["obs"] < 43.50
assert 206 < udic[0]["car"] < 207
assert udic[0]["size"] == 32768
assert udic[0]["label"] == "1H"
|
Add test file for ng.fileio.spinsolve
|
Add test file for ng.fileio.spinsolve
|
Python
|
bsd-3-clause
|
kaustubhmote/nmrglue,jjhelmus/nmrglue,kaustubhmote/nmrglue,jjhelmus/nmrglue
|
Add test file for ng.fileio.spinsolve
|
""" Tests for the fileio.spinsolve submodule """
import nmrglue as ng
from pathlib import Path
from setup import DATA_DIR
def test_acqu():
""" read nmr_fid.dx """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "nmr_fid.dx")
assert dic["acqu"]["Sample"] == "EtOH"
assert dic["acqu"]["Solvent"] == "None"
def test_jcamp_dx():
""" read nmr_fid.dx """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "nmr_fid.dx")
assert data.size == 32768
assert data.shape == (32768,)
assert "Magritek Spinsolve" in dic["dx"]["_comments"][0]
def test_data1d():
""" read nmr_fid.dx """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "data.1d")
assert dic["spectrum"]["xDim"] == 32768
assert len(dic["spectrum"]["xaxis"]) == 32768
assert data.size == 32768
assert data.shape == (32768,)
def test_guess_acqu():
""" guess_udic based on acqu dictionary """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "nmr_fid.dx")
udic = ng.spinsolve.guess_udic(dic, data)
assert udic[0]["sw"] == 5000
assert 43.49 < udic[0]["obs"] < 43.50
assert 206 < udic[0]["car"] < 207
assert udic[0]["size"] == 32768
assert udic[0]["label"] == "1H"
def test_guess_jcamp_dx():
""" guess_udic based on dx dictionary """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "nmr_fid.dx")
# Drop acqu dict that would be used as default
dic["acqu"] = {}
udic = ng.spinsolve.guess_udic(dic, data)
assert 4999 < udic[0]["sw"] < 5001
assert 43.49 < udic[0]["obs"] < 43.50
assert 206 < udic[0]["car"] < 207
assert udic[0]["size"] == 32768
assert udic[0]["label"] == "1H"
|
<commit_before><commit_msg>Add test file for ng.fileio.spinsolve<commit_after>
|
""" Tests for the fileio.spinsolve submodule """
import nmrglue as ng
from pathlib import Path
from setup import DATA_DIR
def test_acqu():
""" read nmr_fid.dx """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "nmr_fid.dx")
assert dic["acqu"]["Sample"] == "EtOH"
assert dic["acqu"]["Solvent"] == "None"
def test_jcamp_dx():
""" read nmr_fid.dx """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "nmr_fid.dx")
assert data.size == 32768
assert data.shape == (32768,)
assert "Magritek Spinsolve" in dic["dx"]["_comments"][0]
def test_data1d():
""" read nmr_fid.dx """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "data.1d")
assert dic["spectrum"]["xDim"] == 32768
assert len(dic["spectrum"]["xaxis"]) == 32768
assert data.size == 32768
assert data.shape == (32768,)
def test_guess_acqu():
""" guess_udic based on acqu dictionary """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "nmr_fid.dx")
udic = ng.spinsolve.guess_udic(dic, data)
assert udic[0]["sw"] == 5000
assert 43.49 < udic[0]["obs"] < 43.50
assert 206 < udic[0]["car"] < 207
assert udic[0]["size"] == 32768
assert udic[0]["label"] == "1H"
def test_guess_jcamp_dx():
""" guess_udic based on dx dictionary """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "nmr_fid.dx")
# Drop acqu dict that would be used as default
dic["acqu"] = {}
udic = ng.spinsolve.guess_udic(dic, data)
assert 4999 < udic[0]["sw"] < 5001
assert 43.49 < udic[0]["obs"] < 43.50
assert 206 < udic[0]["car"] < 207
assert udic[0]["size"] == 32768
assert udic[0]["label"] == "1H"
|
Add test file for ng.fileio.spinsolve""" Tests for the fileio.spinsolve submodule """
import nmrglue as ng
from pathlib import Path
from setup import DATA_DIR
def test_acqu():
""" read nmr_fid.dx """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "nmr_fid.dx")
assert dic["acqu"]["Sample"] == "EtOH"
assert dic["acqu"]["Solvent"] == "None"
def test_jcamp_dx():
""" read nmr_fid.dx """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "nmr_fid.dx")
assert data.size == 32768
assert data.shape == (32768,)
assert "Magritek Spinsolve" in dic["dx"]["_comments"][0]
def test_data1d():
""" read nmr_fid.dx """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "data.1d")
assert dic["spectrum"]["xDim"] == 32768
assert len(dic["spectrum"]["xaxis"]) == 32768
assert data.size == 32768
assert data.shape == (32768,)
def test_guess_acqu():
""" guess_udic based on acqu dictionary """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "nmr_fid.dx")
udic = ng.spinsolve.guess_udic(dic, data)
assert udic[0]["sw"] == 5000
assert 43.49 < udic[0]["obs"] < 43.50
assert 206 < udic[0]["car"] < 207
assert udic[0]["size"] == 32768
assert udic[0]["label"] == "1H"
def test_guess_jcamp_dx():
""" guess_udic based on dx dictionary """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "nmr_fid.dx")
# Drop acqu dict that would be used as default
dic["acqu"] = {}
udic = ng.spinsolve.guess_udic(dic, data)
assert 4999 < udic[0]["sw"] < 5001
assert 43.49 < udic[0]["obs"] < 43.50
assert 206 < udic[0]["car"] < 207
assert udic[0]["size"] == 32768
assert udic[0]["label"] == "1H"
|
<commit_before><commit_msg>Add test file for ng.fileio.spinsolve<commit_after>""" Tests for the fileio.spinsolve submodule """
import nmrglue as ng
from pathlib import Path
from setup import DATA_DIR
def test_acqu():
""" read nmr_fid.dx """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "nmr_fid.dx")
assert dic["acqu"]["Sample"] == "EtOH"
assert dic["acqu"]["Solvent"] == "None"
def test_jcamp_dx():
""" read nmr_fid.dx """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "nmr_fid.dx")
assert data.size == 32768
assert data.shape == (32768,)
assert "Magritek Spinsolve" in dic["dx"]["_comments"][0]
def test_data1d():
""" read nmr_fid.dx """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "data.1d")
assert dic["spectrum"]["xDim"] == 32768
assert len(dic["spectrum"]["xaxis"]) == 32768
assert data.size == 32768
assert data.shape == (32768,)
def test_guess_acqu():
""" guess_udic based on acqu dictionary """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "nmr_fid.dx")
udic = ng.spinsolve.guess_udic(dic, data)
assert udic[0]["sw"] == 5000
assert 43.49 < udic[0]["obs"] < 43.50
assert 206 < udic[0]["car"] < 207
assert udic[0]["size"] == 32768
assert udic[0]["label"] == "1H"
def test_guess_jcamp_dx():
""" guess_udic based on dx dictionary """
dic, data = ng.spinsolve.read(Path(DATA_DIR) / "spinsolve" / "ethanol", "nmr_fid.dx")
# Drop acqu dict that would be used as default
dic["acqu"] = {}
udic = ng.spinsolve.guess_udic(dic, data)
assert 4999 < udic[0]["sw"] < 5001
assert 43.49 < udic[0]["obs"] < 43.50
assert 206 < udic[0]["car"] < 207
assert udic[0]["size"] == 32768
assert udic[0]["label"] == "1H"
|
|
5356001d4244e67d6b70f0d4715b153b7d851293
|
ci/run_all_spiders.py
|
ci/run_all_spiders.py
|
from scrapy.utils.project import get_project_settings
from scrapy.crawler import CrawlerProcess
if __name__ == '__main__':
settings = get_project_settings()
settings.set('LOG_FILE', 'all_spiders.log')
settings.set('LOG_LEVEL', 'WARN')
settings.set('TELNETCONSOLE_ENABLED', False)
settings.set('FEED_URI', 'output.ndgeojson')
settings.set('FEED_FORMAT', 'ndgeojson')
settings.get('ITEM_PIPELINES')['locations.pipelines.ApplySpiderNamePipeline'] = 100
process = CrawlerProcess(settings)
spider = process.spider_loader.list()[0]
process.crawl(spider)
# for spider_name in process.spiders.list():
# process.crawl(spider_name)
process.start()
|
Add script that runs all the spiders
|
Add script that runs all the spiders
|
Python
|
mit
|
iandees/all-the-places,iandees/all-the-places,iandees/all-the-places
|
Add script that runs all the spiders
|
from scrapy.utils.project import get_project_settings
from scrapy.crawler import CrawlerProcess
if __name__ == '__main__':
settings = get_project_settings()
settings.set('LOG_FILE', 'all_spiders.log')
settings.set('LOG_LEVEL', 'WARN')
settings.set('TELNETCONSOLE_ENABLED', False)
settings.set('FEED_URI', 'output.ndgeojson')
settings.set('FEED_FORMAT', 'ndgeojson')
settings.get('ITEM_PIPELINES')['locations.pipelines.ApplySpiderNamePipeline'] = 100
process = CrawlerProcess(settings)
spider = process.spider_loader.list()[0]
process.crawl(spider)
# for spider_name in process.spiders.list():
# process.crawl(spider_name)
process.start()
|
<commit_before><commit_msg>Add script that runs all the spiders<commit_after>
|
from scrapy.utils.project import get_project_settings
from scrapy.crawler import CrawlerProcess
if __name__ == '__main__':
settings = get_project_settings()
settings.set('LOG_FILE', 'all_spiders.log')
settings.set('LOG_LEVEL', 'WARN')
settings.set('TELNETCONSOLE_ENABLED', False)
settings.set('FEED_URI', 'output.ndgeojson')
settings.set('FEED_FORMAT', 'ndgeojson')
settings.get('ITEM_PIPELINES')['locations.pipelines.ApplySpiderNamePipeline'] = 100
process = CrawlerProcess(settings)
spider = process.spider_loader.list()[0]
process.crawl(spider)
# for spider_name in process.spiders.list():
# process.crawl(spider_name)
process.start()
|
Add script that runs all the spidersfrom scrapy.utils.project import get_project_settings
from scrapy.crawler import CrawlerProcess
if __name__ == '__main__':
settings = get_project_settings()
settings.set('LOG_FILE', 'all_spiders.log')
settings.set('LOG_LEVEL', 'WARN')
settings.set('TELNETCONSOLE_ENABLED', False)
settings.set('FEED_URI', 'output.ndgeojson')
settings.set('FEED_FORMAT', 'ndgeojson')
settings.get('ITEM_PIPELINES')['locations.pipelines.ApplySpiderNamePipeline'] = 100
process = CrawlerProcess(settings)
spider = process.spider_loader.list()[0]
process.crawl(spider)
# for spider_name in process.spiders.list():
# process.crawl(spider_name)
process.start()
|
<commit_before><commit_msg>Add script that runs all the spiders<commit_after>from scrapy.utils.project import get_project_settings
from scrapy.crawler import CrawlerProcess
if __name__ == '__main__':
settings = get_project_settings()
settings.set('LOG_FILE', 'all_spiders.log')
settings.set('LOG_LEVEL', 'WARN')
settings.set('TELNETCONSOLE_ENABLED', False)
settings.set('FEED_URI', 'output.ndgeojson')
settings.set('FEED_FORMAT', 'ndgeojson')
settings.get('ITEM_PIPELINES')['locations.pipelines.ApplySpiderNamePipeline'] = 100
process = CrawlerProcess(settings)
spider = process.spider_loader.list()[0]
process.crawl(spider)
# for spider_name in process.spiders.list():
# process.crawl(spider_name)
process.start()
|
|
dbaf49af9553257c63f3374103ccdc1e6c40f20c
|
test/integration/ggrc_basic_permissions/test_undeleteable.py
|
test/integration/ggrc_basic_permissions/test_undeleteable.py
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: andraz@reciprocitylabs.com
# Maintained By: andraz@reciprocitylabs.com
"""
Test that some objects cannot be deleted by anyone.
"""
from integration.ggrc import TestCase
from integration.ggrc.api_helper import Api
from integration.ggrc.generator import ObjectGenerator
class TestReader(TestCase):
"""Test that some objects cannot be deleted by anyone."""
def setUp(self):
TestCase.setUp(self)
self.api = Api()
self.object_generator = ObjectGenerator()
self.init_users()
self.init_objects()
def init_users(self):
""" Init users needed by the test cases """
users = [("creator", "Creator"), ("reader", "Reader"),
("editor", "Editor"), ("admin", "gGRC Admin")]
self.users = {}
for (name, role) in users:
_, user = self.object_generator.generate_person(
data={"name": name}, user_role=role)
self.users[name] = user
def init_objects(self):
"""Creates the objects used by all the tests"""
self.api.set_user(self.users["admin"])
_, person = self.object_generator.generate_person()
self.objects = [person]
def test_undeletable_objects(self):
"""No user shoud be allowed to delete these objects."""
for role, user in self.users.iteritems():
self.api.set_user(user)
for obj in self.objects:
response = self.api.delete(obj)
self.assertEqual(response.status_code, 403,
"{} can delete {}".format(role, obj.type))
|
Add a test for objects that cannot be deleted
|
Add a test for objects that cannot be deleted
|
Python
|
apache-2.0
|
josthkko/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,NejcZupec/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,kr41/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,selahssea/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core,NejcZupec/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core
|
Add a test for objects that cannot be deleted
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: andraz@reciprocitylabs.com
# Maintained By: andraz@reciprocitylabs.com
"""
Test that some objects cannot be deleted by anyone.
"""
from integration.ggrc import TestCase
from integration.ggrc.api_helper import Api
from integration.ggrc.generator import ObjectGenerator
class TestReader(TestCase):
"""Test that some objects cannot be deleted by anyone."""
def setUp(self):
TestCase.setUp(self)
self.api = Api()
self.object_generator = ObjectGenerator()
self.init_users()
self.init_objects()
def init_users(self):
""" Init users needed by the test cases """
users = [("creator", "Creator"), ("reader", "Reader"),
("editor", "Editor"), ("admin", "gGRC Admin")]
self.users = {}
for (name, role) in users:
_, user = self.object_generator.generate_person(
data={"name": name}, user_role=role)
self.users[name] = user
def init_objects(self):
"""Creates the objects used by all the tests"""
self.api.set_user(self.users["admin"])
_, person = self.object_generator.generate_person()
self.objects = [person]
def test_undeletable_objects(self):
"""No user shoud be allowed to delete these objects."""
for role, user in self.users.iteritems():
self.api.set_user(user)
for obj in self.objects:
response = self.api.delete(obj)
self.assertEqual(response.status_code, 403,
"{} can delete {}".format(role, obj.type))
|
<commit_before><commit_msg>Add a test for objects that cannot be deleted<commit_after>
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: andraz@reciprocitylabs.com
# Maintained By: andraz@reciprocitylabs.com
"""
Test that some objects cannot be deleted by anyone.
"""
from integration.ggrc import TestCase
from integration.ggrc.api_helper import Api
from integration.ggrc.generator import ObjectGenerator
class TestReader(TestCase):
"""Test that some objects cannot be deleted by anyone."""
def setUp(self):
TestCase.setUp(self)
self.api = Api()
self.object_generator = ObjectGenerator()
self.init_users()
self.init_objects()
def init_users(self):
""" Init users needed by the test cases """
users = [("creator", "Creator"), ("reader", "Reader"),
("editor", "Editor"), ("admin", "gGRC Admin")]
self.users = {}
for (name, role) in users:
_, user = self.object_generator.generate_person(
data={"name": name}, user_role=role)
self.users[name] = user
def init_objects(self):
"""Creates the objects used by all the tests"""
self.api.set_user(self.users["admin"])
_, person = self.object_generator.generate_person()
self.objects = [person]
def test_undeletable_objects(self):
"""No user shoud be allowed to delete these objects."""
for role, user in self.users.iteritems():
self.api.set_user(user)
for obj in self.objects:
response = self.api.delete(obj)
self.assertEqual(response.status_code, 403,
"{} can delete {}".format(role, obj.type))
|
Add a test for objects that cannot be deleted# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: andraz@reciprocitylabs.com
# Maintained By: andraz@reciprocitylabs.com
"""
Test that some objects cannot be deleted by anyone.
"""
from integration.ggrc import TestCase
from integration.ggrc.api_helper import Api
from integration.ggrc.generator import ObjectGenerator
class TestReader(TestCase):
"""Test that some objects cannot be deleted by anyone."""
def setUp(self):
TestCase.setUp(self)
self.api = Api()
self.object_generator = ObjectGenerator()
self.init_users()
self.init_objects()
def init_users(self):
""" Init users needed by the test cases """
users = [("creator", "Creator"), ("reader", "Reader"),
("editor", "Editor"), ("admin", "gGRC Admin")]
self.users = {}
for (name, role) in users:
_, user = self.object_generator.generate_person(
data={"name": name}, user_role=role)
self.users[name] = user
def init_objects(self):
"""Creates the objects used by all the tests"""
self.api.set_user(self.users["admin"])
_, person = self.object_generator.generate_person()
self.objects = [person]
def test_undeletable_objects(self):
"""No user shoud be allowed to delete these objects."""
for role, user in self.users.iteritems():
self.api.set_user(user)
for obj in self.objects:
response = self.api.delete(obj)
self.assertEqual(response.status_code, 403,
"{} can delete {}".format(role, obj.type))
|
<commit_before><commit_msg>Add a test for objects that cannot be deleted<commit_after># Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: andraz@reciprocitylabs.com
# Maintained By: andraz@reciprocitylabs.com
"""
Test that some objects cannot be deleted by anyone.
"""
from integration.ggrc import TestCase
from integration.ggrc.api_helper import Api
from integration.ggrc.generator import ObjectGenerator
class TestReader(TestCase):
"""Test that some objects cannot be deleted by anyone."""
def setUp(self):
TestCase.setUp(self)
self.api = Api()
self.object_generator = ObjectGenerator()
self.init_users()
self.init_objects()
def init_users(self):
""" Init users needed by the test cases """
users = [("creator", "Creator"), ("reader", "Reader"),
("editor", "Editor"), ("admin", "gGRC Admin")]
self.users = {}
for (name, role) in users:
_, user = self.object_generator.generate_person(
data={"name": name}, user_role=role)
self.users[name] = user
def init_objects(self):
"""Creates the objects used by all the tests"""
self.api.set_user(self.users["admin"])
_, person = self.object_generator.generate_person()
self.objects = [person]
def test_undeletable_objects(self):
"""No user shoud be allowed to delete these objects."""
for role, user in self.users.iteritems():
self.api.set_user(user)
for obj in self.objects:
response = self.api.delete(obj)
self.assertEqual(response.status_code, 403,
"{} can delete {}".format(role, obj.type))
|
|
8a394794c5c663ef11ef9e44df5448d00c859357
|
interface/plugin/farmanager/01autoguid/__init__.py
|
interface/plugin/farmanager/01autoguid/__init__.py
|
"""
Need to change all GUIDs for your every new plugin, is daunting, so let's
generate them from strings that are unique for plugins.
Low-level Far Manager API is here:
* https://api.farmanager.com/en/exported_functions/getglobalinfow.html
"""
# --- utility functions ---
import hashlib
def getuuid(data):
"""Generate UUID from `data` string"""
if type(data) != bytes:
data = data.encode('utf-8')
h = hashlib.sha256(data).hexdigest()[:32].upper()
for i, pos in enumerate([8, 12, 16, 20]):
h = h[:i+pos] + '-' + h[i+pos:]
return h
# --- plugin interface
def GetGlobalInfoW(info):
""" Called by Far Manager, plugin needs to fill the info """
info["Title"] = "____" # should be set and non-empty
info["Author"] = "_" # should be set and non-empty
info["Description"] = "Simple Python plugin" # should be set
info["Guid"] = getuuid(info["Title"])
def GetPluginInfoW(info):
info["MenuString"] = "01autoguid"
info["Guid"] = getuuid(info["MenuString"])
def OpenW(info):
print("[open] " + __file__)
|
Add 01autoguid/ plugin with own GUIDs autogenerated
|
Add 01autoguid/ plugin with own GUIDs autogenerated
|
Python
|
unlicense
|
techtonik/discovery,techtonik/discovery,techtonik/discovery
|
Add 01autoguid/ plugin with own GUIDs autogenerated
|
"""
Need to change all GUIDs for your every new plugin, is daunting, so let's
generate them from strings that are unique for plugins.
Low-level Far Manager API is here:
* https://api.farmanager.com/en/exported_functions/getglobalinfow.html
"""
# --- utility functions ---
import hashlib
def getuuid(data):
"""Generate UUID from `data` string"""
if type(data) != bytes:
data = data.encode('utf-8')
h = hashlib.sha256(data).hexdigest()[:32].upper()
for i, pos in enumerate([8, 12, 16, 20]):
h = h[:i+pos] + '-' + h[i+pos:]
return h
# --- plugin interface
def GetGlobalInfoW(info):
""" Called by Far Manager, plugin needs to fill the info """
info["Title"] = "____" # should be set and non-empty
info["Author"] = "_" # should be set and non-empty
info["Description"] = "Simple Python plugin" # should be set
info["Guid"] = getuuid(info["Title"])
def GetPluginInfoW(info):
info["MenuString"] = "01autoguid"
info["Guid"] = getuuid(info["MenuString"])
def OpenW(info):
print("[open] " + __file__)
|
<commit_before><commit_msg>Add 01autoguid/ plugin with own GUIDs autogenerated<commit_after>
|
"""
Need to change all GUIDs for your every new plugin, is daunting, so let's
generate them from strings that are unique for plugins.
Low-level Far Manager API is here:
* https://api.farmanager.com/en/exported_functions/getglobalinfow.html
"""
# --- utility functions ---
import hashlib
def getuuid(data):
"""Generate UUID from `data` string"""
if type(data) != bytes:
data = data.encode('utf-8')
h = hashlib.sha256(data).hexdigest()[:32].upper()
for i, pos in enumerate([8, 12, 16, 20]):
h = h[:i+pos] + '-' + h[i+pos:]
return h
# --- plugin interface
def GetGlobalInfoW(info):
""" Called by Far Manager, plugin needs to fill the info """
info["Title"] = "____" # should be set and non-empty
info["Author"] = "_" # should be set and non-empty
info["Description"] = "Simple Python plugin" # should be set
info["Guid"] = getuuid(info["Title"])
def GetPluginInfoW(info):
info["MenuString"] = "01autoguid"
info["Guid"] = getuuid(info["MenuString"])
def OpenW(info):
print("[open] " + __file__)
|
Add 01autoguid/ plugin with own GUIDs autogenerated"""
Need to change all GUIDs for your every new plugin, is daunting, so let's
generate them from strings that are unique for plugins.
Low-level Far Manager API is here:
* https://api.farmanager.com/en/exported_functions/getglobalinfow.html
"""
# --- utility functions ---
import hashlib
def getuuid(data):
"""Generate UUID from `data` string"""
if type(data) != bytes:
data = data.encode('utf-8')
h = hashlib.sha256(data).hexdigest()[:32].upper()
for i, pos in enumerate([8, 12, 16, 20]):
h = h[:i+pos] + '-' + h[i+pos:]
return h
# --- plugin interface
def GetGlobalInfoW(info):
""" Called by Far Manager, plugin needs to fill the info """
info["Title"] = "____" # should be set and non-empty
info["Author"] = "_" # should be set and non-empty
info["Description"] = "Simple Python plugin" # should be set
info["Guid"] = getuuid(info["Title"])
def GetPluginInfoW(info):
info["MenuString"] = "01autoguid"
info["Guid"] = getuuid(info["MenuString"])
def OpenW(info):
print("[open] " + __file__)
|
<commit_before><commit_msg>Add 01autoguid/ plugin with own GUIDs autogenerated<commit_after>"""
Need to change all GUIDs for your every new plugin, is daunting, so let's
generate them from strings that are unique for plugins.
Low-level Far Manager API is here:
* https://api.farmanager.com/en/exported_functions/getglobalinfow.html
"""
# --- utility functions ---
import hashlib
def getuuid(data):
"""Generate UUID from `data` string"""
if type(data) != bytes:
data = data.encode('utf-8')
h = hashlib.sha256(data).hexdigest()[:32].upper()
for i, pos in enumerate([8, 12, 16, 20]):
h = h[:i+pos] + '-' + h[i+pos:]
return h
# --- plugin interface
def GetGlobalInfoW(info):
""" Called by Far Manager, plugin needs to fill the info """
info["Title"] = "____" # should be set and non-empty
info["Author"] = "_" # should be set and non-empty
info["Description"] = "Simple Python plugin" # should be set
info["Guid"] = getuuid(info["Title"])
def GetPluginInfoW(info):
info["MenuString"] = "01autoguid"
info["Guid"] = getuuid(info["MenuString"])
def OpenW(info):
print("[open] " + __file__)
|
|
e0aeb2ebc1bb817ae59bc8b0550ae8b5fecbeba3
|
chipy_org/apps/meetings/feeds.py
|
chipy_org/apps/meetings/feeds.py
|
from django_ical.views import ICalFeed
from .models import Meeting
from datetime import timedelta
class MeetingFeed(ICalFeed):
"""
A iCal feed for meetings
"""
product_id = '-//chipy.org//Meeting//EN'
timezone = 'CST'
def items(self):
return Meeting.objects.order_by('-when').all()
def item_description(self, item):
description = 'RSVP at http://chipy.org\n\n'
for topic in item.topics.all():
description += '{title} by {speaker}\n{description}\n\n'.format(
title=topic.title,
speaker=topic.presentors.all()[0].name,
description=topic.description)
return description
def item_link(self, item):
return ''
def item_location(self, item):
return item.where.address
def item_start_datetime(self, item):
return item.when
def item_end_datetime(self, item):
return item.when + timedelta(hours=1)
def item_title(self, item):
return 'ChiPy Meeting'
|
from django_ical.views import ICalFeed
from .models import Meeting
from datetime import timedelta
class MeetingFeed(ICalFeed):
"""
A iCal feed for meetings
"""
product_id = '-//chipy.org//Meeting//EN'
timezone = 'CST'
def items(self):
return Meeting.objects.order_by('-when').all()
def item_description(self, item):
description = 'RSVP at http://chipy.org\n\n'
for topic in item.topics.all():
description += u'{title} by {speaker}\n{description}\n\n'.format(
title=topic.title,
speaker=topic.presentors.all()[0].name,
description=topic.description)
return description
def item_link(self, item):
return ''
def item_location(self, item):
return item.where.address
def item_start_datetime(self, item):
return item.when
def item_end_datetime(self, item):
return item.when + timedelta(hours=1)
def item_title(self, item):
return 'ChiPy Meeting'
|
Use unicode for ical description
|
Use unicode for ical description
|
Python
|
mit
|
bharathelangovan/chipy.org,tanyaschlusser/chipy.org,bharathelangovan/chipy.org,brianray/chipy.org,agfor/chipy.org,chicagopython/chipy.org,chicagopython/chipy.org,brianray/chipy.org,tanyaschlusser/chipy.org,brianray/chipy.org,agfor/chipy.org,agfor/chipy.org,tanyaschlusser/chipy.org,chicagopython/chipy.org,bharathelangovan/chipy.org,chicagopython/chipy.org
|
from django_ical.views import ICalFeed
from .models import Meeting
from datetime import timedelta
class MeetingFeed(ICalFeed):
"""
A iCal feed for meetings
"""
product_id = '-//chipy.org//Meeting//EN'
timezone = 'CST'
def items(self):
return Meeting.objects.order_by('-when').all()
def item_description(self, item):
description = 'RSVP at http://chipy.org\n\n'
for topic in item.topics.all():
description += '{title} by {speaker}\n{description}\n\n'.format(
title=topic.title,
speaker=topic.presentors.all()[0].name,
description=topic.description)
return description
def item_link(self, item):
return ''
def item_location(self, item):
return item.where.address
def item_start_datetime(self, item):
return item.when
def item_end_datetime(self, item):
return item.when + timedelta(hours=1)
def item_title(self, item):
return 'ChiPy Meeting'
Use unicode for ical description
|
from django_ical.views import ICalFeed
from .models import Meeting
from datetime import timedelta
class MeetingFeed(ICalFeed):
"""
A iCal feed for meetings
"""
product_id = '-//chipy.org//Meeting//EN'
timezone = 'CST'
def items(self):
return Meeting.objects.order_by('-when').all()
def item_description(self, item):
description = 'RSVP at http://chipy.org\n\n'
for topic in item.topics.all():
description += u'{title} by {speaker}\n{description}\n\n'.format(
title=topic.title,
speaker=topic.presentors.all()[0].name,
description=topic.description)
return description
def item_link(self, item):
return ''
def item_location(self, item):
return item.where.address
def item_start_datetime(self, item):
return item.when
def item_end_datetime(self, item):
return item.when + timedelta(hours=1)
def item_title(self, item):
return 'ChiPy Meeting'
|
<commit_before>from django_ical.views import ICalFeed
from .models import Meeting
from datetime import timedelta
class MeetingFeed(ICalFeed):
"""
A iCal feed for meetings
"""
product_id = '-//chipy.org//Meeting//EN'
timezone = 'CST'
def items(self):
return Meeting.objects.order_by('-when').all()
def item_description(self, item):
description = 'RSVP at http://chipy.org\n\n'
for topic in item.topics.all():
description += '{title} by {speaker}\n{description}\n\n'.format(
title=topic.title,
speaker=topic.presentors.all()[0].name,
description=topic.description)
return description
def item_link(self, item):
return ''
def item_location(self, item):
return item.where.address
def item_start_datetime(self, item):
return item.when
def item_end_datetime(self, item):
return item.when + timedelta(hours=1)
def item_title(self, item):
return 'ChiPy Meeting'
<commit_msg>Use unicode for ical description<commit_after>
|
from django_ical.views import ICalFeed
from .models import Meeting
from datetime import timedelta
class MeetingFeed(ICalFeed):
"""
A iCal feed for meetings
"""
product_id = '-//chipy.org//Meeting//EN'
timezone = 'CST'
def items(self):
return Meeting.objects.order_by('-when').all()
def item_description(self, item):
description = 'RSVP at http://chipy.org\n\n'
for topic in item.topics.all():
description += u'{title} by {speaker}\n{description}\n\n'.format(
title=topic.title,
speaker=topic.presentors.all()[0].name,
description=topic.description)
return description
def item_link(self, item):
return ''
def item_location(self, item):
return item.where.address
def item_start_datetime(self, item):
return item.when
def item_end_datetime(self, item):
return item.when + timedelta(hours=1)
def item_title(self, item):
return 'ChiPy Meeting'
|
from django_ical.views import ICalFeed
from .models import Meeting
from datetime import timedelta
class MeetingFeed(ICalFeed):
"""
A iCal feed for meetings
"""
product_id = '-//chipy.org//Meeting//EN'
timezone = 'CST'
def items(self):
return Meeting.objects.order_by('-when').all()
def item_description(self, item):
description = 'RSVP at http://chipy.org\n\n'
for topic in item.topics.all():
description += '{title} by {speaker}\n{description}\n\n'.format(
title=topic.title,
speaker=topic.presentors.all()[0].name,
description=topic.description)
return description
def item_link(self, item):
return ''
def item_location(self, item):
return item.where.address
def item_start_datetime(self, item):
return item.when
def item_end_datetime(self, item):
return item.when + timedelta(hours=1)
def item_title(self, item):
return 'ChiPy Meeting'
Use unicode for ical descriptionfrom django_ical.views import ICalFeed
from .models import Meeting
from datetime import timedelta
class MeetingFeed(ICalFeed):
"""
A iCal feed for meetings
"""
product_id = '-//chipy.org//Meeting//EN'
timezone = 'CST'
def items(self):
return Meeting.objects.order_by('-when').all()
def item_description(self, item):
description = 'RSVP at http://chipy.org\n\n'
for topic in item.topics.all():
description += u'{title} by {speaker}\n{description}\n\n'.format(
title=topic.title,
speaker=topic.presentors.all()[0].name,
description=topic.description)
return description
def item_link(self, item):
return ''
def item_location(self, item):
return item.where.address
def item_start_datetime(self, item):
return item.when
def item_end_datetime(self, item):
return item.when + timedelta(hours=1)
def item_title(self, item):
return 'ChiPy Meeting'
|
<commit_before>from django_ical.views import ICalFeed
from .models import Meeting
from datetime import timedelta
class MeetingFeed(ICalFeed):
"""
A iCal feed for meetings
"""
product_id = '-//chipy.org//Meeting//EN'
timezone = 'CST'
def items(self):
return Meeting.objects.order_by('-when').all()
def item_description(self, item):
description = 'RSVP at http://chipy.org\n\n'
for topic in item.topics.all():
description += '{title} by {speaker}\n{description}\n\n'.format(
title=topic.title,
speaker=topic.presentors.all()[0].name,
description=topic.description)
return description
def item_link(self, item):
return ''
def item_location(self, item):
return item.where.address
def item_start_datetime(self, item):
return item.when
def item_end_datetime(self, item):
return item.when + timedelta(hours=1)
def item_title(self, item):
return 'ChiPy Meeting'
<commit_msg>Use unicode for ical description<commit_after>from django_ical.views import ICalFeed
from .models import Meeting
from datetime import timedelta
class MeetingFeed(ICalFeed):
"""
A iCal feed for meetings
"""
product_id = '-//chipy.org//Meeting//EN'
timezone = 'CST'
def items(self):
return Meeting.objects.order_by('-when').all()
def item_description(self, item):
description = 'RSVP at http://chipy.org\n\n'
for topic in item.topics.all():
description += u'{title} by {speaker}\n{description}\n\n'.format(
title=topic.title,
speaker=topic.presentors.all()[0].name,
description=topic.description)
return description
def item_link(self, item):
return ''
def item_location(self, item):
return item.where.address
def item_start_datetime(self, item):
return item.when
def item_end_datetime(self, item):
return item.when + timedelta(hours=1)
def item_title(self, item):
return 'ChiPy Meeting'
|
bd23a87d28a1d0a1f82b0fd17abfababafba0dc7
|
viaduct/api/page.py
|
viaduct/api/page.py
|
from flask.ext.login import current_user
from viaduct.models.page import Page, PagePermission, PageRevision
from viaduct import db
from flask import request, url_for, render_template
from viaduct.models.group import Group
class PageAPI:
@staticmethod
def remove_page(path):
page = Page.query.filter(Page.path==path).first()
if not page:
return False
for rev in page.revisions.all():
db.session.delete(rev)
for perm in page.permissions.all():
db.session.delete(perm)
db.session.commit()
db.session.delete(page)
db.session.commit()
return True
@staticmethod
def get_footer():
footer = Page.query.filter(Page.path == 'footer').first()
if not footer:
footer = Page('footer')
if footer.revisions.count() > 0:
revision = footer.revisions.order_by(PageRevision.id.desc()).first()
exists = True
else:
revision = PageRevision(footer, current_user,
'', '<b> No footer found </b>'
'', True)
exists = False
print vars(footer)
return render_template('page/get_footer.htm', footer_revision=revision, footer=footer, exists=exists)
|
from flask.ext.login import current_user
from viaduct.models.page import Page, PageRevision
from viaduct import db
from flask import render_template
class PageAPI:
@staticmethod
def remove_page(path):
page = Page.query.filter(Page.path == path).first()
if not page:
return False
for rev in page.revisions.all():
db.session.delete(rev)
for perm in page.permissions.all():
db.session.delete(perm)
db.session.commit()
db.session.delete(page)
db.session.commit()
return True
@staticmethod
def get_footer():
footer = Page.query.filter(Page.path == 'footer').first()
if not footer:
footer = Page('footer')
if footer.revisions.count() > 0:
revision = footer.revisions.order_by(PageRevision.id.desc()).\
first()
exists = True
else:
revision = PageRevision(footer, current_user, '',
'<b> No footer found </b>' '', True)
exists = False
return render_template('page/get_footer.htm', footer_revision=revision,
footer=footer, exists=exists)
|
Remove footer print and make file PEP8 compliant
|
Remove footer print and make file PEP8 compliant
|
Python
|
mit
|
viaict/viaduct,viaict/viaduct,viaict/viaduct,viaict/viaduct,viaict/viaduct
|
from flask.ext.login import current_user
from viaduct.models.page import Page, PagePermission, PageRevision
from viaduct import db
from flask import request, url_for, render_template
from viaduct.models.group import Group
class PageAPI:
@staticmethod
def remove_page(path):
page = Page.query.filter(Page.path==path).first()
if not page:
return False
for rev in page.revisions.all():
db.session.delete(rev)
for perm in page.permissions.all():
db.session.delete(perm)
db.session.commit()
db.session.delete(page)
db.session.commit()
return True
@staticmethod
def get_footer():
footer = Page.query.filter(Page.path == 'footer').first()
if not footer:
footer = Page('footer')
if footer.revisions.count() > 0:
revision = footer.revisions.order_by(PageRevision.id.desc()).first()
exists = True
else:
revision = PageRevision(footer, current_user,
'', '<b> No footer found </b>'
'', True)
exists = False
print vars(footer)
return render_template('page/get_footer.htm', footer_revision=revision, footer=footer, exists=exists)Remove footer print and make file PEP8 compliant
|
from flask.ext.login import current_user
from viaduct.models.page import Page, PageRevision
from viaduct import db
from flask import render_template
class PageAPI:
@staticmethod
def remove_page(path):
page = Page.query.filter(Page.path == path).first()
if not page:
return False
for rev in page.revisions.all():
db.session.delete(rev)
for perm in page.permissions.all():
db.session.delete(perm)
db.session.commit()
db.session.delete(page)
db.session.commit()
return True
@staticmethod
def get_footer():
footer = Page.query.filter(Page.path == 'footer').first()
if not footer:
footer = Page('footer')
if footer.revisions.count() > 0:
revision = footer.revisions.order_by(PageRevision.id.desc()).\
first()
exists = True
else:
revision = PageRevision(footer, current_user, '',
'<b> No footer found </b>' '', True)
exists = False
return render_template('page/get_footer.htm', footer_revision=revision,
footer=footer, exists=exists)
|
<commit_before>from flask.ext.login import current_user
from viaduct.models.page import Page, PagePermission, PageRevision
from viaduct import db
from flask import request, url_for, render_template
from viaduct.models.group import Group
class PageAPI:
@staticmethod
def remove_page(path):
page = Page.query.filter(Page.path==path).first()
if not page:
return False
for rev in page.revisions.all():
db.session.delete(rev)
for perm in page.permissions.all():
db.session.delete(perm)
db.session.commit()
db.session.delete(page)
db.session.commit()
return True
@staticmethod
def get_footer():
footer = Page.query.filter(Page.path == 'footer').first()
if not footer:
footer = Page('footer')
if footer.revisions.count() > 0:
revision = footer.revisions.order_by(PageRevision.id.desc()).first()
exists = True
else:
revision = PageRevision(footer, current_user,
'', '<b> No footer found </b>'
'', True)
exists = False
print vars(footer)
return render_template('page/get_footer.htm', footer_revision=revision, footer=footer, exists=exists)<commit_msg>Remove footer print and make file PEP8 compliant<commit_after>
|
from flask.ext.login import current_user
from viaduct.models.page import Page, PageRevision
from viaduct import db
from flask import render_template
class PageAPI:
@staticmethod
def remove_page(path):
page = Page.query.filter(Page.path == path).first()
if not page:
return False
for rev in page.revisions.all():
db.session.delete(rev)
for perm in page.permissions.all():
db.session.delete(perm)
db.session.commit()
db.session.delete(page)
db.session.commit()
return True
@staticmethod
def get_footer():
footer = Page.query.filter(Page.path == 'footer').first()
if not footer:
footer = Page('footer')
if footer.revisions.count() > 0:
revision = footer.revisions.order_by(PageRevision.id.desc()).\
first()
exists = True
else:
revision = PageRevision(footer, current_user, '',
'<b> No footer found </b>' '', True)
exists = False
return render_template('page/get_footer.htm', footer_revision=revision,
footer=footer, exists=exists)
|
from flask.ext.login import current_user
from viaduct.models.page import Page, PagePermission, PageRevision
from viaduct import db
from flask import request, url_for, render_template
from viaduct.models.group import Group
class PageAPI:
@staticmethod
def remove_page(path):
page = Page.query.filter(Page.path==path).first()
if not page:
return False
for rev in page.revisions.all():
db.session.delete(rev)
for perm in page.permissions.all():
db.session.delete(perm)
db.session.commit()
db.session.delete(page)
db.session.commit()
return True
@staticmethod
def get_footer():
footer = Page.query.filter(Page.path == 'footer').first()
if not footer:
footer = Page('footer')
if footer.revisions.count() > 0:
revision = footer.revisions.order_by(PageRevision.id.desc()).first()
exists = True
else:
revision = PageRevision(footer, current_user,
'', '<b> No footer found </b>'
'', True)
exists = False
print vars(footer)
return render_template('page/get_footer.htm', footer_revision=revision, footer=footer, exists=exists)Remove footer print and make file PEP8 compliantfrom flask.ext.login import current_user
from viaduct.models.page import Page, PageRevision
from viaduct import db
from flask import render_template
class PageAPI:
@staticmethod
def remove_page(path):
page = Page.query.filter(Page.path == path).first()
if not page:
return False
for rev in page.revisions.all():
db.session.delete(rev)
for perm in page.permissions.all():
db.session.delete(perm)
db.session.commit()
db.session.delete(page)
db.session.commit()
return True
@staticmethod
def get_footer():
footer = Page.query.filter(Page.path == 'footer').first()
if not footer:
footer = Page('footer')
if footer.revisions.count() > 0:
revision = footer.revisions.order_by(PageRevision.id.desc()).\
first()
exists = True
else:
revision = PageRevision(footer, current_user, '',
'<b> No footer found </b>' '', True)
exists = False
return render_template('page/get_footer.htm', footer_revision=revision,
footer=footer, exists=exists)
|
<commit_before>from flask.ext.login import current_user
from viaduct.models.page import Page, PagePermission, PageRevision
from viaduct import db
from flask import request, url_for, render_template
from viaduct.models.group import Group
class PageAPI:
@staticmethod
def remove_page(path):
page = Page.query.filter(Page.path==path).first()
if not page:
return False
for rev in page.revisions.all():
db.session.delete(rev)
for perm in page.permissions.all():
db.session.delete(perm)
db.session.commit()
db.session.delete(page)
db.session.commit()
return True
@staticmethod
def get_footer():
footer = Page.query.filter(Page.path == 'footer').first()
if not footer:
footer = Page('footer')
if footer.revisions.count() > 0:
revision = footer.revisions.order_by(PageRevision.id.desc()).first()
exists = True
else:
revision = PageRevision(footer, current_user,
'', '<b> No footer found </b>'
'', True)
exists = False
print vars(footer)
return render_template('page/get_footer.htm', footer_revision=revision, footer=footer, exists=exists)<commit_msg>Remove footer print and make file PEP8 compliant<commit_after>from flask.ext.login import current_user
from viaduct.models.page import Page, PageRevision
from viaduct import db
from flask import render_template
class PageAPI:
@staticmethod
def remove_page(path):
page = Page.query.filter(Page.path == path).first()
if not page:
return False
for rev in page.revisions.all():
db.session.delete(rev)
for perm in page.permissions.all():
db.session.delete(perm)
db.session.commit()
db.session.delete(page)
db.session.commit()
return True
@staticmethod
def get_footer():
footer = Page.query.filter(Page.path == 'footer').first()
if not footer:
footer = Page('footer')
if footer.revisions.count() > 0:
revision = footer.revisions.order_by(PageRevision.id.desc()).\
first()
exists = True
else:
revision = PageRevision(footer, current_user, '',
'<b> No footer found </b>' '', True)
exists = False
return render_template('page/get_footer.htm', footer_revision=revision,
footer=footer, exists=exists)
|
86127511bfc0521969f0d78264f01b91695e1309
|
data_api/migrations/0018_auto_20151114_2159.py
|
data_api/migrations/0018_auto_20151114_2159.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
('data_api', '0017_blob_mime_type'),
]
operations = [
migrations.RemoveField(
model_name='localcomputer',
name='group',
),
migrations.AddField(
model_name='localcomputer',
name='group',
field=models.ForeignKey(to='auth.Group', null=True),
),
]
|
Change local_computer group to foreign key from m2m
|
Change local_computer group to foreign key from m2m
|
Python
|
mit
|
bwootton/Dator,bwootton/Dator,bwootton/Dator,bwootton/Dator
|
Change local_computer group to foreign key from m2m
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
('data_api', '0017_blob_mime_type'),
]
operations = [
migrations.RemoveField(
model_name='localcomputer',
name='group',
),
migrations.AddField(
model_name='localcomputer',
name='group',
field=models.ForeignKey(to='auth.Group', null=True),
),
]
|
<commit_before><commit_msg>Change local_computer group to foreign key from m2m<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
('data_api', '0017_blob_mime_type'),
]
operations = [
migrations.RemoveField(
model_name='localcomputer',
name='group',
),
migrations.AddField(
model_name='localcomputer',
name='group',
field=models.ForeignKey(to='auth.Group', null=True),
),
]
|
Change local_computer group to foreign key from m2m# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
('data_api', '0017_blob_mime_type'),
]
operations = [
migrations.RemoveField(
model_name='localcomputer',
name='group',
),
migrations.AddField(
model_name='localcomputer',
name='group',
field=models.ForeignKey(to='auth.Group', null=True),
),
]
|
<commit_before><commit_msg>Change local_computer group to foreign key from m2m<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
('data_api', '0017_blob_mime_type'),
]
operations = [
migrations.RemoveField(
model_name='localcomputer',
name='group',
),
migrations.AddField(
model_name='localcomputer',
name='group',
field=models.ForeignKey(to='auth.Group', null=True),
),
]
|
|
f22f3953783052734c807639307cf13731323ee5
|
rsk_mind/datasource/datasource_svmlight.py
|
rsk_mind/datasource/datasource_svmlight.py
|
from datasource import Datasource
from ..dataset import Dataset
import os
class SVMLightDatasource(Datasource):
def __init__(self, path):
super(SVMLightDatasource, self).__init__(path)
def read(self):
# NOTE: svmlight format does not include
# names for features, it only uses indexes.
# So headers will be auto-genenerated from
# indexes.
header = []
rows = []
with open(self.path, "r+b") as infile:
lines = infile.readlines()
# iterate over lines
for line in lines:
# split on " "
tokens = line.split(" ")
# first token is the target(y/n , 1/0)
target = tokens[0]
# get last token (it's form is indexN:value)
# and determine the length of features
last_index = tokens[len(tokens)-1].split(":")[0]
# length of features is last_index+1 (zero based)
# create a list of zeroes
row = [0] * (last_index+1)
# set value to row index according to current row
for i in (1, len(tokens)):
parts = tokens[i].split(":")
feat_index = parts[0]
feat_value = parts[1]
row[feat_index] = feat_value
# append target to the end of row
row.append(target)
# append row vector to rows
rows.append(row)
# create header
for i in (0, len(rows[0])-1):
header.append("f_"+str(i))
header.append("target")
return Dataset(header, rows)
def write(dataset):
# NOTE consider the last feature as the Target
# Get the size of features excluding the last one
# because it's the target.
features_with_target = len(dataset.header)
features_without_target = features_with_target-1
# open file to write
with open(self.path, "w+b") as outfile:
# iterate over row
for row in dataset.rows:
# create a string for the row
row_str = ""
# iterate over features
for i in range(0, features_without_target):
if row[i] != 0.0:
row_str += "{}:{} ".format(i, row[i])
# append target value to the begining and rstrip
target_value = row[features_without_target]
row_str = ("{} ".format(target_value) + row_str).rstrip(" ")
# write row_str to file
outfile.write(row_str)
outfile.write(os.linesep)
|
Add support for svmlight format
|
Add support for svmlight format
This commit adds a Datasource that can read
a file in svmlight format and can write a
Dataset object in an svmlight format file.
|
Python
|
mit
|
rsk-mind/rsk-mind-framework
|
Add support for svmlight format
This commit adds a Datasource that can read
a file in svmlight format and can write a
Dataset object in an svmlight format file.
|
from datasource import Datasource
from ..dataset import Dataset
import os
class SVMLightDatasource(Datasource):
def __init__(self, path):
super(SVMLightDatasource, self).__init__(path)
def read(self):
# NOTE: svmlight format does not include
# names for features, it only uses indexes.
# So headers will be auto-genenerated from
# indexes.
header = []
rows = []
with open(self.path, "r+b") as infile:
lines = infile.readlines()
# iterate over lines
for line in lines:
# split on " "
tokens = line.split(" ")
# first token is the target(y/n , 1/0)
target = tokens[0]
# get last token (it's form is indexN:value)
# and determine the length of features
last_index = tokens[len(tokens)-1].split(":")[0]
# length of features is last_index+1 (zero based)
# create a list of zeroes
row = [0] * (last_index+1)
# set value to row index according to current row
for i in (1, len(tokens)):
parts = tokens[i].split(":")
feat_index = parts[0]
feat_value = parts[1]
row[feat_index] = feat_value
# append target to the end of row
row.append(target)
# append row vector to rows
rows.append(row)
# create header
for i in (0, len(rows[0])-1):
header.append("f_"+str(i))
header.append("target")
return Dataset(header, rows)
def write(dataset):
# NOTE consider the last feature as the Target
# Get the size of features excluding the last one
# because it's the target.
features_with_target = len(dataset.header)
features_without_target = features_with_target-1
# open file to write
with open(self.path, "w+b") as outfile:
# iterate over row
for row in dataset.rows:
# create a string for the row
row_str = ""
# iterate over features
for i in range(0, features_without_target):
if row[i] != 0.0:
row_str += "{}:{} ".format(i, row[i])
# append target value to the begining and rstrip
target_value = row[features_without_target]
row_str = ("{} ".format(target_value) + row_str).rstrip(" ")
# write row_str to file
outfile.write(row_str)
outfile.write(os.linesep)
|
<commit_before><commit_msg>Add support for svmlight format
This commit adds a Datasource that can read
a file in svmlight format and can write a
Dataset object in an svmlight format file.<commit_after>
|
from datasource import Datasource
from ..dataset import Dataset
import os
class SVMLightDatasource(Datasource):
def __init__(self, path):
super(SVMLightDatasource, self).__init__(path)
def read(self):
# NOTE: svmlight format does not include
# names for features, it only uses indexes.
# So headers will be auto-genenerated from
# indexes.
header = []
rows = []
with open(self.path, "r+b") as infile:
lines = infile.readlines()
# iterate over lines
for line in lines:
# split on " "
tokens = line.split(" ")
# first token is the target(y/n , 1/0)
target = tokens[0]
# get last token (it's form is indexN:value)
# and determine the length of features
last_index = tokens[len(tokens)-1].split(":")[0]
# length of features is last_index+1 (zero based)
# create a list of zeroes
row = [0] * (last_index+1)
# set value to row index according to current row
for i in (1, len(tokens)):
parts = tokens[i].split(":")
feat_index = parts[0]
feat_value = parts[1]
row[feat_index] = feat_value
# append target to the end of row
row.append(target)
# append row vector to rows
rows.append(row)
# create header
for i in (0, len(rows[0])-1):
header.append("f_"+str(i))
header.append("target")
return Dataset(header, rows)
def write(dataset):
# NOTE consider the last feature as the Target
# Get the size of features excluding the last one
# because it's the target.
features_with_target = len(dataset.header)
features_without_target = features_with_target-1
# open file to write
with open(self.path, "w+b") as outfile:
# iterate over row
for row in dataset.rows:
# create a string for the row
row_str = ""
# iterate over features
for i in range(0, features_without_target):
if row[i] != 0.0:
row_str += "{}:{} ".format(i, row[i])
# append target value to the begining and rstrip
target_value = row[features_without_target]
row_str = ("{} ".format(target_value) + row_str).rstrip(" ")
# write row_str to file
outfile.write(row_str)
outfile.write(os.linesep)
|
Add support for svmlight format
This commit adds a Datasource that can read
a file in svmlight format and can write a
Dataset object in an svmlight format file.from datasource import Datasource
from ..dataset import Dataset
import os
class SVMLightDatasource(Datasource):
def __init__(self, path):
super(SVMLightDatasource, self).__init__(path)
def read(self):
# NOTE: svmlight format does not include
# names for features, it only uses indexes.
# So headers will be auto-genenerated from
# indexes.
header = []
rows = []
with open(self.path, "r+b") as infile:
lines = infile.readlines()
# iterate over lines
for line in lines:
# split on " "
tokens = line.split(" ")
# first token is the target(y/n , 1/0)
target = tokens[0]
# get last token (it's form is indexN:value)
# and determine the length of features
last_index = tokens[len(tokens)-1].split(":")[0]
# length of features is last_index+1 (zero based)
# create a list of zeroes
row = [0] * (last_index+1)
# set value to row index according to current row
for i in (1, len(tokens)):
parts = tokens[i].split(":")
feat_index = parts[0]
feat_value = parts[1]
row[feat_index] = feat_value
# append target to the end of row
row.append(target)
# append row vector to rows
rows.append(row)
# create header
for i in (0, len(rows[0])-1):
header.append("f_"+str(i))
header.append("target")
return Dataset(header, rows)
def write(dataset):
# NOTE consider the last feature as the Target
# Get the size of features excluding the last one
# because it's the target.
features_with_target = len(dataset.header)
features_without_target = features_with_target-1
# open file to write
with open(self.path, "w+b") as outfile:
# iterate over row
for row in dataset.rows:
# create a string for the row
row_str = ""
# iterate over features
for i in range(0, features_without_target):
if row[i] != 0.0:
row_str += "{}:{} ".format(i, row[i])
# append target value to the begining and rstrip
target_value = row[features_without_target]
row_str = ("{} ".format(target_value) + row_str).rstrip(" ")
# write row_str to file
outfile.write(row_str)
outfile.write(os.linesep)
|
<commit_before><commit_msg>Add support for svmlight format
This commit adds a Datasource that can read
a file in svmlight format and can write a
Dataset object in an svmlight format file.<commit_after>from datasource import Datasource
from ..dataset import Dataset
import os
class SVMLightDatasource(Datasource):
def __init__(self, path):
super(SVMLightDatasource, self).__init__(path)
def read(self):
# NOTE: svmlight format does not include
# names for features, it only uses indexes.
# So headers will be auto-genenerated from
# indexes.
header = []
rows = []
with open(self.path, "r+b") as infile:
lines = infile.readlines()
# iterate over lines
for line in lines:
# split on " "
tokens = line.split(" ")
# first token is the target(y/n , 1/0)
target = tokens[0]
# get last token (it's form is indexN:value)
# and determine the length of features
last_index = tokens[len(tokens)-1].split(":")[0]
# length of features is last_index+1 (zero based)
# create a list of zeroes
row = [0] * (last_index+1)
# set value to row index according to current row
for i in (1, len(tokens)):
parts = tokens[i].split(":")
feat_index = parts[0]
feat_value = parts[1]
row[feat_index] = feat_value
# append target to the end of row
row.append(target)
# append row vector to rows
rows.append(row)
# create header
for i in (0, len(rows[0])-1):
header.append("f_"+str(i))
header.append("target")
return Dataset(header, rows)
def write(dataset):
# NOTE consider the last feature as the Target
# Get the size of features excluding the last one
# because it's the target.
features_with_target = len(dataset.header)
features_without_target = features_with_target-1
# open file to write
with open(self.path, "w+b") as outfile:
# iterate over row
for row in dataset.rows:
# create a string for the row
row_str = ""
# iterate over features
for i in range(0, features_without_target):
if row[i] != 0.0:
row_str += "{}:{} ".format(i, row[i])
# append target value to the begining and rstrip
target_value = row[features_without_target]
row_str = ("{} ".format(target_value) + row_str).rstrip(" ")
# write row_str to file
outfile.write(row_str)
outfile.write(os.linesep)
|
|
f9140bf301c4ac3d159a8ff3383d8c0ab007de8c
|
search/management/commands/remove_index.py
|
search/management/commands/remove_index.py
|
from django.core.management import BaseCommand
from search.search_utils import ElasticAPI
class Command(BaseCommand):
def handle(self, *args, **kwargs):
api = ElasticAPI()
api.delete_index()
|
Add remove index management command
|
Add remove index management command
|
Python
|
mit
|
MasterFacilityList/mfl_api,MasterFacilityList/mfl_api,MasterFacilityList/mfl_api,MasterFacilityList/mfl_api,MasterFacilityList/mfl_api
|
Add remove index management command
|
from django.core.management import BaseCommand
from search.search_utils import ElasticAPI
class Command(BaseCommand):
def handle(self, *args, **kwargs):
api = ElasticAPI()
api.delete_index()
|
<commit_before><commit_msg>Add remove index management command<commit_after>
|
from django.core.management import BaseCommand
from search.search_utils import ElasticAPI
class Command(BaseCommand):
def handle(self, *args, **kwargs):
api = ElasticAPI()
api.delete_index()
|
Add remove index management commandfrom django.core.management import BaseCommand
from search.search_utils import ElasticAPI
class Command(BaseCommand):
def handle(self, *args, **kwargs):
api = ElasticAPI()
api.delete_index()
|
<commit_before><commit_msg>Add remove index management command<commit_after>from django.core.management import BaseCommand
from search.search_utils import ElasticAPI
class Command(BaseCommand):
def handle(self, *args, **kwargs):
api = ElasticAPI()
api.delete_index()
|
|
4b6756bd8305190a5d1dc1d2e8e9a0b94d5baa40
|
tests/test_grid.py
|
tests/test_grid.py
|
import pytest
from aimaPy.grid import *
compare = lambda x, y: all([elm_x == y[i] for i, elm_x in enumerate(x)])
def test_distance():
assert distance((1, 2), (5, 5)) == 5.0
def test_distance_squared():
assert distance_squared((1, 2), (5, 5)) == 25.0
def test_clip():
list_ = [clip(x, 0, 1) for x in [-1, 0.5, 10]]
res = [0, 0.5, 1]
assert compare(list_, res)
def test_vector_clip():
assert vector_clip((-1, 10), (0, 0), (9, 9)) == (0, 9)
if __name__ == '__main__':
pytest.main()
|
import pytest
from aimaPy.grid import *
compare_list = lambda x, y: all([elm_x == y[i] for i, elm_x in enumerate(x)])
def test_distance():
assert distance((1, 2), (5, 5)) == 5.0
def test_distance_squared():
assert distance_squared((1, 2), (5, 5)) == 25.0
def test_clip():
list_ = [clip(x, 0, 1) for x in [-1, 0.5, 10]]
res = [0, 0.5, 1]
assert compare_list(list_, res)
def test_vector_clip():
assert vector_clip((-1, 10), (0, 0), (9, 9)) == (0, 9)
if __name__ == '__main__':
pytest.main()
|
Change name of compare function in test grid
|
Change name of compare function in test grid
|
Python
|
mit
|
phaller0513/aima-python,AWPorter/aima-python,grantvk/aima-python,SeanCameronConklin/aima-python,SeanCameronConklin/aima-python,SnShine/aima-python,AWPorter/aima-python,chandlercr/aima-python,NolanBecker/aima-python,jottenlips/aima-python,WmHHooper/aima-python,grantvk/aima-python,AmberJBlue/aima-python,jottenlips/aima-python,Chipe1/aima-python,willhess/aima-python,aimacode/aima-python,abbeymiles/aima-python,JoeLaMartina/aima-python,abbeymiles/aima-python,SimeonFritz/aima-python,armadill-odyssey/aima-python,reachtarunhere/aima-python,reachtarunhere/aima-python,SimeonFritz/aima-python,NolanBecker/aima-python,abbeymiles/aima-python,MircoT/aima-python,JoeLaMartina/aima-python,JamesDickenson/aima-python,Fruit-Snacks/aima-python,WmHHooper/aima-python,SnShine/aima-python,AmberJBlue/aima-python,JoeLaMartina/AlphametricProject,austinban/aima-python,chandlercr/aima-python,WhittKinley/aima-python,WhittKinley/ConnectProject,JamesDickenson/aima-python,chandlercr/aima-python,WmHHooper/aima-python,armadill-odyssey/aima-python,SimeonFritz/aima-python,armadill-odyssey/aima-python,Chipe1/aima-python,zayneanderson/aima-python,aimacode/aima-python,austinban/aima-python,WhittKinley/aima-python,WmHHooper/aima-python,jottenlips/aima-python,sofmonk/aima-python,phaller0513/aima-python,jo-tez/aima-python,zayneanderson/aima-python,Fruit-Snacks/aima-python,JoeLaMartina/aima-python,willhess/aima-python,sofmonk/aima-python,WhittKinley/aima-python,JamesDickenson/aima-python,JoeLaMartina/AlphametricProject,SeanCameronConklin/aima-python,phaller0513/aima-python,zayneanderson/aima-python,austinban/aima-python,WhittKinley/ConnectProject,JoeLaMartina/AlphametricProject,Fruit-Snacks/aima-python,AWPorter/aima-python,grantvk/aima-python,jo-tez/aima-python,willhess/aima-python,NolanBecker/aima-python,AmberJBlue/aima-python,WhittKinley/ConnectProject
|
import pytest
from aimaPy.grid import *
compare = lambda x, y: all([elm_x == y[i] for i, elm_x in enumerate(x)])
def test_distance():
assert distance((1, 2), (5, 5)) == 5.0
def test_distance_squared():
assert distance_squared((1, 2), (5, 5)) == 25.0
def test_clip():
list_ = [clip(x, 0, 1) for x in [-1, 0.5, 10]]
res = [0, 0.5, 1]
assert compare(list_, res)
def test_vector_clip():
assert vector_clip((-1, 10), (0, 0), (9, 9)) == (0, 9)
if __name__ == '__main__':
pytest.main()
Change name of compare function in test grid
|
import pytest
from aimaPy.grid import *
compare_list = lambda x, y: all([elm_x == y[i] for i, elm_x in enumerate(x)])
def test_distance():
assert distance((1, 2), (5, 5)) == 5.0
def test_distance_squared():
assert distance_squared((1, 2), (5, 5)) == 25.0
def test_clip():
list_ = [clip(x, 0, 1) for x in [-1, 0.5, 10]]
res = [0, 0.5, 1]
assert compare_list(list_, res)
def test_vector_clip():
assert vector_clip((-1, 10), (0, 0), (9, 9)) == (0, 9)
if __name__ == '__main__':
pytest.main()
|
<commit_before>import pytest
from aimaPy.grid import *
compare = lambda x, y: all([elm_x == y[i] for i, elm_x in enumerate(x)])
def test_distance():
assert distance((1, 2), (5, 5)) == 5.0
def test_distance_squared():
assert distance_squared((1, 2), (5, 5)) == 25.0
def test_clip():
list_ = [clip(x, 0, 1) for x in [-1, 0.5, 10]]
res = [0, 0.5, 1]
assert compare(list_, res)
def test_vector_clip():
assert vector_clip((-1, 10), (0, 0), (9, 9)) == (0, 9)
if __name__ == '__main__':
pytest.main()
<commit_msg>Change name of compare function in test grid<commit_after>
|
import pytest
from aimaPy.grid import *
compare_list = lambda x, y: all([elm_x == y[i] for i, elm_x in enumerate(x)])
def test_distance():
assert distance((1, 2), (5, 5)) == 5.0
def test_distance_squared():
assert distance_squared((1, 2), (5, 5)) == 25.0
def test_clip():
list_ = [clip(x, 0, 1) for x in [-1, 0.5, 10]]
res = [0, 0.5, 1]
assert compare_list(list_, res)
def test_vector_clip():
assert vector_clip((-1, 10), (0, 0), (9, 9)) == (0, 9)
if __name__ == '__main__':
pytest.main()
|
import pytest
from aimaPy.grid import *
compare = lambda x, y: all([elm_x == y[i] for i, elm_x in enumerate(x)])
def test_distance():
assert distance((1, 2), (5, 5)) == 5.0
def test_distance_squared():
assert distance_squared((1, 2), (5, 5)) == 25.0
def test_clip():
list_ = [clip(x, 0, 1) for x in [-1, 0.5, 10]]
res = [0, 0.5, 1]
assert compare(list_, res)
def test_vector_clip():
assert vector_clip((-1, 10), (0, 0), (9, 9)) == (0, 9)
if __name__ == '__main__':
pytest.main()
Change name of compare function in test gridimport pytest
from aimaPy.grid import *
compare_list = lambda x, y: all([elm_x == y[i] for i, elm_x in enumerate(x)])
def test_distance():
assert distance((1, 2), (5, 5)) == 5.0
def test_distance_squared():
assert distance_squared((1, 2), (5, 5)) == 25.0
def test_clip():
list_ = [clip(x, 0, 1) for x in [-1, 0.5, 10]]
res = [0, 0.5, 1]
assert compare_list(list_, res)
def test_vector_clip():
assert vector_clip((-1, 10), (0, 0), (9, 9)) == (0, 9)
if __name__ == '__main__':
pytest.main()
|
<commit_before>import pytest
from aimaPy.grid import *
compare = lambda x, y: all([elm_x == y[i] for i, elm_x in enumerate(x)])
def test_distance():
assert distance((1, 2), (5, 5)) == 5.0
def test_distance_squared():
assert distance_squared((1, 2), (5, 5)) == 25.0
def test_clip():
list_ = [clip(x, 0, 1) for x in [-1, 0.5, 10]]
res = [0, 0.5, 1]
assert compare(list_, res)
def test_vector_clip():
assert vector_clip((-1, 10), (0, 0), (9, 9)) == (0, 9)
if __name__ == '__main__':
pytest.main()
<commit_msg>Change name of compare function in test grid<commit_after>import pytest
from aimaPy.grid import *
compare_list = lambda x, y: all([elm_x == y[i] for i, elm_x in enumerate(x)])
def test_distance():
assert distance((1, 2), (5, 5)) == 5.0
def test_distance_squared():
assert distance_squared((1, 2), (5, 5)) == 25.0
def test_clip():
list_ = [clip(x, 0, 1) for x in [-1, 0.5, 10]]
res = [0, 0.5, 1]
assert compare_list(list_, res)
def test_vector_clip():
assert vector_clip((-1, 10), (0, 0), (9, 9)) == (0, 9)
if __name__ == '__main__':
pytest.main()
|
13a92d18816de9aea90094422788532da05fc475
|
tests/test_list.py
|
tests/test_list.py
|
""" Test list packing and unpacking. """
import xcffib
import struct
class TestList(object):
def test_struct_pack_uses_List(self):
# suppose we have a list of ints...
ints = struct.pack("=IIII", *range(4))
# Unpacker wants a cffi.cdata
cffi_ints = xcffib.bytes_to_cdata(ints)
l = xcffib.List(xcffib.Unpacker(cffi_ints), "I", count=4)
ints2 = struct.pack("=IIII", *l)
# after packing and unpacking, we should still have those ints
assert ints == ints2
|
Test that list packing is idempotent
|
Test that list packing is idempotent
|
Python
|
apache-2.0
|
tych0/xcffib
|
Test that list packing is idempotent
|
""" Test list packing and unpacking. """
import xcffib
import struct
class TestList(object):
def test_struct_pack_uses_List(self):
# suppose we have a list of ints...
ints = struct.pack("=IIII", *range(4))
# Unpacker wants a cffi.cdata
cffi_ints = xcffib.bytes_to_cdata(ints)
l = xcffib.List(xcffib.Unpacker(cffi_ints), "I", count=4)
ints2 = struct.pack("=IIII", *l)
# after packing and unpacking, we should still have those ints
assert ints == ints2
|
<commit_before><commit_msg>Test that list packing is idempotent<commit_after>
|
""" Test list packing and unpacking. """
import xcffib
import struct
class TestList(object):
def test_struct_pack_uses_List(self):
# suppose we have a list of ints...
ints = struct.pack("=IIII", *range(4))
# Unpacker wants a cffi.cdata
cffi_ints = xcffib.bytes_to_cdata(ints)
l = xcffib.List(xcffib.Unpacker(cffi_ints), "I", count=4)
ints2 = struct.pack("=IIII", *l)
# after packing and unpacking, we should still have those ints
assert ints == ints2
|
Test that list packing is idempotent""" Test list packing and unpacking. """
import xcffib
import struct
class TestList(object):
def test_struct_pack_uses_List(self):
# suppose we have a list of ints...
ints = struct.pack("=IIII", *range(4))
# Unpacker wants a cffi.cdata
cffi_ints = xcffib.bytes_to_cdata(ints)
l = xcffib.List(xcffib.Unpacker(cffi_ints), "I", count=4)
ints2 = struct.pack("=IIII", *l)
# after packing and unpacking, we should still have those ints
assert ints == ints2
|
<commit_before><commit_msg>Test that list packing is idempotent<commit_after>""" Test list packing and unpacking. """
import xcffib
import struct
class TestList(object):
def test_struct_pack_uses_List(self):
# suppose we have a list of ints...
ints = struct.pack("=IIII", *range(4))
# Unpacker wants a cffi.cdata
cffi_ints = xcffib.bytes_to_cdata(ints)
l = xcffib.List(xcffib.Unpacker(cffi_ints), "I", count=4)
ints2 = struct.pack("=IIII", *l)
# after packing and unpacking, we should still have those ints
assert ints == ints2
|
|
642858629b118789ce6cd175bed0b19569cd0152
|
linguist/managers.py
|
linguist/managers.py
|
# -*- coding: utf-8 -*-
import functools
from django.db import models
from django.db.query import QuerySet
from .models import Translation
from .mixins import LinguistMixin
from .utils.i18n import get_cache_key
def get_value_as_list(value):
"""
Ensure the given returned value is a list.
"""
if not isinstance(value, (list, tuple)):
value = [value]
return value
def set_instance_cache(instance, translation):
"""
Sets Linguist cache for the given instance with data from the given translation.
"""
cache_key = get_cache_key(**dict(
identifier=instance._linguist.identifier,
object_id=instance.pk,
language=translation.language,
field_name=translation.field_name))
if cache_key not in instance._linguist:
instance._linguist[cache_key] = translation
return instance
def validate_instance(instance):
"""
Validates the given instance.
"""
if not isinstance(instance, LinguistMixin):
raise Exception('%s must be an instance of LinguistMixin' % instance)
return True
def get_translation_lookups(instance, fields=None, languages=None):
"""
Returns a dict to pass to Translation.objects.filter().
"""
lookups = dict(identifier=instance.identifier, object_id=instance.pk)
if fields is not None:
lookups['field_name__in'] = fields
if languages is not None:
lookups['language__in'] = languages
return lookups
class LinguistManager(models.Manager):
"""
Linguist Manager.
"""
def with_translations(self, instances, fields=None, languages=None):
"""
Prefetches translations for the given model instances.
"""
instances = get_value_as_list(instances)
for instance in instances:
validate_instance(instance)
lookups = get_translation_lookups(instance, fields, languages)
translations = Translation.objects.filter(**lookups)
for translation in translations:
set_instance_cache(instance, translation)
|
Add LinguistManager with with_translations() method.
|
Add LinguistManager with with_translations() method.
|
Python
|
mit
|
ulule/django-linguist
|
Add LinguistManager with with_translations() method.
|
# -*- coding: utf-8 -*-
import functools
from django.db import models
from django.db.query import QuerySet
from .models import Translation
from .mixins import LinguistMixin
from .utils.i18n import get_cache_key
def get_value_as_list(value):
"""
Ensure the given returned value is a list.
"""
if not isinstance(value, (list, tuple)):
value = [value]
return value
def set_instance_cache(instance, translation):
"""
Sets Linguist cache for the given instance with data from the given translation.
"""
cache_key = get_cache_key(**dict(
identifier=instance._linguist.identifier,
object_id=instance.pk,
language=translation.language,
field_name=translation.field_name))
if cache_key not in instance._linguist:
instance._linguist[cache_key] = translation
return instance
def validate_instance(instance):
"""
Validates the given instance.
"""
if not isinstance(instance, LinguistMixin):
raise Exception('%s must be an instance of LinguistMixin' % instance)
return True
def get_translation_lookups(instance, fields=None, languages=None):
"""
Returns a dict to pass to Translation.objects.filter().
"""
lookups = dict(identifier=instance.identifier, object_id=instance.pk)
if fields is not None:
lookups['field_name__in'] = fields
if languages is not None:
lookups['language__in'] = languages
return lookups
class LinguistManager(models.Manager):
"""
Linguist Manager.
"""
def with_translations(self, instances, fields=None, languages=None):
"""
Prefetches translations for the given model instances.
"""
instances = get_value_as_list(instances)
for instance in instances:
validate_instance(instance)
lookups = get_translation_lookups(instance, fields, languages)
translations = Translation.objects.filter(**lookups)
for translation in translations:
set_instance_cache(instance, translation)
|
<commit_before><commit_msg>Add LinguistManager with with_translations() method.<commit_after>
|
# -*- coding: utf-8 -*-
import functools
from django.db import models
from django.db.query import QuerySet
from .models import Translation
from .mixins import LinguistMixin
from .utils.i18n import get_cache_key
def get_value_as_list(value):
"""
Ensure the given returned value is a list.
"""
if not isinstance(value, (list, tuple)):
value = [value]
return value
def set_instance_cache(instance, translation):
"""
Sets Linguist cache for the given instance with data from the given translation.
"""
cache_key = get_cache_key(**dict(
identifier=instance._linguist.identifier,
object_id=instance.pk,
language=translation.language,
field_name=translation.field_name))
if cache_key not in instance._linguist:
instance._linguist[cache_key] = translation
return instance
def validate_instance(instance):
"""
Validates the given instance.
"""
if not isinstance(instance, LinguistMixin):
raise Exception('%s must be an instance of LinguistMixin' % instance)
return True
def get_translation_lookups(instance, fields=None, languages=None):
"""
Returns a dict to pass to Translation.objects.filter().
"""
lookups = dict(identifier=instance.identifier, object_id=instance.pk)
if fields is not None:
lookups['field_name__in'] = fields
if languages is not None:
lookups['language__in'] = languages
return lookups
class LinguistManager(models.Manager):
"""
Linguist Manager.
"""
def with_translations(self, instances, fields=None, languages=None):
"""
Prefetches translations for the given model instances.
"""
instances = get_value_as_list(instances)
for instance in instances:
validate_instance(instance)
lookups = get_translation_lookups(instance, fields, languages)
translations = Translation.objects.filter(**lookups)
for translation in translations:
set_instance_cache(instance, translation)
|
Add LinguistManager with with_translations() method.# -*- coding: utf-8 -*-
import functools
from django.db import models
from django.db.query import QuerySet
from .models import Translation
from .mixins import LinguistMixin
from .utils.i18n import get_cache_key
def get_value_as_list(value):
"""
Ensure the given returned value is a list.
"""
if not isinstance(value, (list, tuple)):
value = [value]
return value
def set_instance_cache(instance, translation):
"""
Sets Linguist cache for the given instance with data from the given translation.
"""
cache_key = get_cache_key(**dict(
identifier=instance._linguist.identifier,
object_id=instance.pk,
language=translation.language,
field_name=translation.field_name))
if cache_key not in instance._linguist:
instance._linguist[cache_key] = translation
return instance
def validate_instance(instance):
"""
Validates the given instance.
"""
if not isinstance(instance, LinguistMixin):
raise Exception('%s must be an instance of LinguistMixin' % instance)
return True
def get_translation_lookups(instance, fields=None, languages=None):
"""
Returns a dict to pass to Translation.objects.filter().
"""
lookups = dict(identifier=instance.identifier, object_id=instance.pk)
if fields is not None:
lookups['field_name__in'] = fields
if languages is not None:
lookups['language__in'] = languages
return lookups
class LinguistManager(models.Manager):
"""
Linguist Manager.
"""
def with_translations(self, instances, fields=None, languages=None):
"""
Prefetches translations for the given model instances.
"""
instances = get_value_as_list(instances)
for instance in instances:
validate_instance(instance)
lookups = get_translation_lookups(instance, fields, languages)
translations = Translation.objects.filter(**lookups)
for translation in translations:
set_instance_cache(instance, translation)
|
<commit_before><commit_msg>Add LinguistManager with with_translations() method.<commit_after># -*- coding: utf-8 -*-
import functools
from django.db import models
from django.db.query import QuerySet
from .models import Translation
from .mixins import LinguistMixin
from .utils.i18n import get_cache_key
def get_value_as_list(value):
"""
Ensure the given returned value is a list.
"""
if not isinstance(value, (list, tuple)):
value = [value]
return value
def set_instance_cache(instance, translation):
"""
Sets Linguist cache for the given instance with data from the given translation.
"""
cache_key = get_cache_key(**dict(
identifier=instance._linguist.identifier,
object_id=instance.pk,
language=translation.language,
field_name=translation.field_name))
if cache_key not in instance._linguist:
instance._linguist[cache_key] = translation
return instance
def validate_instance(instance):
"""
Validates the given instance.
"""
if not isinstance(instance, LinguistMixin):
raise Exception('%s must be an instance of LinguistMixin' % instance)
return True
def get_translation_lookups(instance, fields=None, languages=None):
"""
Returns a dict to pass to Translation.objects.filter().
"""
lookups = dict(identifier=instance.identifier, object_id=instance.pk)
if fields is not None:
lookups['field_name__in'] = fields
if languages is not None:
lookups['language__in'] = languages
return lookups
class LinguistManager(models.Manager):
"""
Linguist Manager.
"""
def with_translations(self, instances, fields=None, languages=None):
"""
Prefetches translations for the given model instances.
"""
instances = get_value_as_list(instances)
for instance in instances:
validate_instance(instance)
lookups = get_translation_lookups(instance, fields, languages)
translations = Translation.objects.filter(**lookups)
for translation in translations:
set_instance_cache(instance, translation)
|
|
5f7a694c72821110091d6aff5ee854681137bdcc
|
tests/testuser.py
|
tests/testuser.py
|
import unittest
from steam import user
class ProfileTestCase(unittest.TestCase):
VALID_ID64 = 76561198014028523
INVALID_ID64 = 123
# This is weird but there should be no reason that it's invalid
# So Valve, if you see this, be gewd guys and make 33 bit (condensed)
# IDs work properly. Or at least put a more appropriate error. Currently
# It's impossible to distinguish between this and a bad ID (all are code 8)
WEIRD_ID64 = (VALID_ID64 >> 33 << 33) ^ VALID_ID64
class VanityTestCase(unittest.TestCase):
VALID_VANITY = "stragglerastic"
INVALID_VANITY = "*F*SDF9"
def test_invalid_vanity(self):
vanity = user.vanity_url(self.INVALID_VANITY)
self.assertRaises(user.VanityError, lambda: vanity.id64)
def test_pathed_vanity(self):
vanity = user.vanity_url('/' + self.VALID_VANITY + '/')
self.assertEqual(vanity.id64, ProfileTestCase.VALID_ID64)
def test_valid_vanity(self):
vanity = user.vanity_url(self.VALID_VANITY)
self.assertEqual(vanity.id64, ProfileTestCase.VALID_ID64)
class ProfileIdTestCase(ProfileTestCase):
def test_invalid_id(self):
profile = user.profile(self.INVALID_ID64)
self.assertRaises(user.ProfileNotFoundError, lambda: profile.id64)
def test_pathed_id(self):
profile = user.profile('/' + str(self.VALID_ID64) + '/')
self.assertEqual(profile.id64, self.VALID_ID64)
def test_valid_id(self):
profile = user.profile(self.VALID_ID64)
self.assertEqual(profile.id64, self.VALID_ID64)
def test_weird_id(self):
profile = user.profile(self.WEIRD_ID64)
self.assertRaises(user.ProfileNotFoundError, lambda: profile.id64)
|
Add initial steam.user test fixtures
|
Add initial steam.user test fixtures
|
Python
|
isc
|
miedzinski/steamodd,Lagg/steamodd
|
Add initial steam.user test fixtures
|
import unittest
from steam import user
class ProfileTestCase(unittest.TestCase):
VALID_ID64 = 76561198014028523
INVALID_ID64 = 123
# This is weird but there should be no reason that it's invalid
# So Valve, if you see this, be gewd guys and make 33 bit (condensed)
# IDs work properly. Or at least put a more appropriate error. Currently
# It's impossible to distinguish between this and a bad ID (all are code 8)
WEIRD_ID64 = (VALID_ID64 >> 33 << 33) ^ VALID_ID64
class VanityTestCase(unittest.TestCase):
VALID_VANITY = "stragglerastic"
INVALID_VANITY = "*F*SDF9"
def test_invalid_vanity(self):
vanity = user.vanity_url(self.INVALID_VANITY)
self.assertRaises(user.VanityError, lambda: vanity.id64)
def test_pathed_vanity(self):
vanity = user.vanity_url('/' + self.VALID_VANITY + '/')
self.assertEqual(vanity.id64, ProfileTestCase.VALID_ID64)
def test_valid_vanity(self):
vanity = user.vanity_url(self.VALID_VANITY)
self.assertEqual(vanity.id64, ProfileTestCase.VALID_ID64)
class ProfileIdTestCase(ProfileTestCase):
def test_invalid_id(self):
profile = user.profile(self.INVALID_ID64)
self.assertRaises(user.ProfileNotFoundError, lambda: profile.id64)
def test_pathed_id(self):
profile = user.profile('/' + str(self.VALID_ID64) + '/')
self.assertEqual(profile.id64, self.VALID_ID64)
def test_valid_id(self):
profile = user.profile(self.VALID_ID64)
self.assertEqual(profile.id64, self.VALID_ID64)
def test_weird_id(self):
profile = user.profile(self.WEIRD_ID64)
self.assertRaises(user.ProfileNotFoundError, lambda: profile.id64)
|
<commit_before><commit_msg>Add initial steam.user test fixtures<commit_after>
|
import unittest
from steam import user
class ProfileTestCase(unittest.TestCase):
VALID_ID64 = 76561198014028523
INVALID_ID64 = 123
# This is weird but there should be no reason that it's invalid
# So Valve, if you see this, be gewd guys and make 33 bit (condensed)
# IDs work properly. Or at least put a more appropriate error. Currently
# It's impossible to distinguish between this and a bad ID (all are code 8)
WEIRD_ID64 = (VALID_ID64 >> 33 << 33) ^ VALID_ID64
class VanityTestCase(unittest.TestCase):
VALID_VANITY = "stragglerastic"
INVALID_VANITY = "*F*SDF9"
def test_invalid_vanity(self):
vanity = user.vanity_url(self.INVALID_VANITY)
self.assertRaises(user.VanityError, lambda: vanity.id64)
def test_pathed_vanity(self):
vanity = user.vanity_url('/' + self.VALID_VANITY + '/')
self.assertEqual(vanity.id64, ProfileTestCase.VALID_ID64)
def test_valid_vanity(self):
vanity = user.vanity_url(self.VALID_VANITY)
self.assertEqual(vanity.id64, ProfileTestCase.VALID_ID64)
class ProfileIdTestCase(ProfileTestCase):
def test_invalid_id(self):
profile = user.profile(self.INVALID_ID64)
self.assertRaises(user.ProfileNotFoundError, lambda: profile.id64)
def test_pathed_id(self):
profile = user.profile('/' + str(self.VALID_ID64) + '/')
self.assertEqual(profile.id64, self.VALID_ID64)
def test_valid_id(self):
profile = user.profile(self.VALID_ID64)
self.assertEqual(profile.id64, self.VALID_ID64)
def test_weird_id(self):
profile = user.profile(self.WEIRD_ID64)
self.assertRaises(user.ProfileNotFoundError, lambda: profile.id64)
|
Add initial steam.user test fixturesimport unittest
from steam import user
class ProfileTestCase(unittest.TestCase):
VALID_ID64 = 76561198014028523
INVALID_ID64 = 123
# This is weird but there should be no reason that it's invalid
# So Valve, if you see this, be gewd guys and make 33 bit (condensed)
# IDs work properly. Or at least put a more appropriate error. Currently
# It's impossible to distinguish between this and a bad ID (all are code 8)
WEIRD_ID64 = (VALID_ID64 >> 33 << 33) ^ VALID_ID64
class VanityTestCase(unittest.TestCase):
VALID_VANITY = "stragglerastic"
INVALID_VANITY = "*F*SDF9"
def test_invalid_vanity(self):
vanity = user.vanity_url(self.INVALID_VANITY)
self.assertRaises(user.VanityError, lambda: vanity.id64)
def test_pathed_vanity(self):
vanity = user.vanity_url('/' + self.VALID_VANITY + '/')
self.assertEqual(vanity.id64, ProfileTestCase.VALID_ID64)
def test_valid_vanity(self):
vanity = user.vanity_url(self.VALID_VANITY)
self.assertEqual(vanity.id64, ProfileTestCase.VALID_ID64)
class ProfileIdTestCase(ProfileTestCase):
def test_invalid_id(self):
profile = user.profile(self.INVALID_ID64)
self.assertRaises(user.ProfileNotFoundError, lambda: profile.id64)
def test_pathed_id(self):
profile = user.profile('/' + str(self.VALID_ID64) + '/')
self.assertEqual(profile.id64, self.VALID_ID64)
def test_valid_id(self):
profile = user.profile(self.VALID_ID64)
self.assertEqual(profile.id64, self.VALID_ID64)
def test_weird_id(self):
profile = user.profile(self.WEIRD_ID64)
self.assertRaises(user.ProfileNotFoundError, lambda: profile.id64)
|
<commit_before><commit_msg>Add initial steam.user test fixtures<commit_after>import unittest
from steam import user
class ProfileTestCase(unittest.TestCase):
VALID_ID64 = 76561198014028523
INVALID_ID64 = 123
# This is weird but there should be no reason that it's invalid
# So Valve, if you see this, be gewd guys and make 33 bit (condensed)
# IDs work properly. Or at least put a more appropriate error. Currently
# It's impossible to distinguish between this and a bad ID (all are code 8)
WEIRD_ID64 = (VALID_ID64 >> 33 << 33) ^ VALID_ID64
class VanityTestCase(unittest.TestCase):
VALID_VANITY = "stragglerastic"
INVALID_VANITY = "*F*SDF9"
def test_invalid_vanity(self):
vanity = user.vanity_url(self.INVALID_VANITY)
self.assertRaises(user.VanityError, lambda: vanity.id64)
def test_pathed_vanity(self):
vanity = user.vanity_url('/' + self.VALID_VANITY + '/')
self.assertEqual(vanity.id64, ProfileTestCase.VALID_ID64)
def test_valid_vanity(self):
vanity = user.vanity_url(self.VALID_VANITY)
self.assertEqual(vanity.id64, ProfileTestCase.VALID_ID64)
class ProfileIdTestCase(ProfileTestCase):
def test_invalid_id(self):
profile = user.profile(self.INVALID_ID64)
self.assertRaises(user.ProfileNotFoundError, lambda: profile.id64)
def test_pathed_id(self):
profile = user.profile('/' + str(self.VALID_ID64) + '/')
self.assertEqual(profile.id64, self.VALID_ID64)
def test_valid_id(self):
profile = user.profile(self.VALID_ID64)
self.assertEqual(profile.id64, self.VALID_ID64)
def test_weird_id(self):
profile = user.profile(self.WEIRD_ID64)
self.assertRaises(user.ProfileNotFoundError, lambda: profile.id64)
|
|
3750cc97ac69c160f908b9e47b52ed831c8d9170
|
ka_find_missing_descs.py
|
ka_find_missing_descs.py
|
#!/usr/bin/env python3
from kapi import *
import utils
import argparse, sys
import time
import json
def read_cmd():
"""Reading command line options."""
desc = "Program for finding KA content without descriptions."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-s','--subject', dest='subject', default = 'root', help = 'Link given course.')
parser.add_argument('-c','--content', dest='content', required = True, help = 'Content kind: video|exercise')
return parser.parse_args()
# Currently, article type does not seem to work.
CONTENT_TYPES = ['video', 'exercise']
def print_slugs_without_descriptions(content):
unique_content_ids = set()
for c in content:
if c['id'] in unique_content_ids:
continue
else:
unique_content_ids.add(c['id'])
if c['translated_description'] is None:
#print(c['node_slug'], 'WARNING: Empty description, returning None')
print(c['node_slug'])
elif not c['translated_description']:
#print(c['node_slug'], 'WARNING: Empty description!')
print(c['node_slug'])
if __name__ == '__main__':
opts = read_cmd()
topic = opts.subject
content_type = opts.content.lower()
if content_type not in CONTENT_TYPES:
print("ERROR: content argument: ", opts.content)
print("Possibilities are: ", CONTENT_TYPES)
exit(1)
khan_tree = KhanContentTree('en', content_type)
tree = khan_tree.get()
if topic != 'root':
subtree = find_ka_topic(tree, topic)
else:
subtree = tree
if not subtree:
print("ERROR: Could not find subtree for topic: %s\n" % (topic))
sys.exit(1)
content = []
kapi_tree_get_content_items(subtree, content, content_type)
print_slugs_without_descriptions(content)
|
Add script for finding missing descriptions in KA content
|
Add script for finding missing descriptions in KA content
|
Python
|
mit
|
danielhollas/AmaraUpload,danielhollas/AmaraUpload
|
Add script for finding missing descriptions in KA content
|
#!/usr/bin/env python3
from kapi import *
import utils
import argparse, sys
import time
import json
def read_cmd():
"""Reading command line options."""
desc = "Program for finding KA content without descriptions."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-s','--subject', dest='subject', default = 'root', help = 'Link given course.')
parser.add_argument('-c','--content', dest='content', required = True, help = 'Content kind: video|exercise')
return parser.parse_args()
# Currently, article type does not seem to work.
CONTENT_TYPES = ['video', 'exercise']
def print_slugs_without_descriptions(content):
unique_content_ids = set()
for c in content:
if c['id'] in unique_content_ids:
continue
else:
unique_content_ids.add(c['id'])
if c['translated_description'] is None:
#print(c['node_slug'], 'WARNING: Empty description, returning None')
print(c['node_slug'])
elif not c['translated_description']:
#print(c['node_slug'], 'WARNING: Empty description!')
print(c['node_slug'])
if __name__ == '__main__':
opts = read_cmd()
topic = opts.subject
content_type = opts.content.lower()
if content_type not in CONTENT_TYPES:
print("ERROR: content argument: ", opts.content)
print("Possibilities are: ", CONTENT_TYPES)
exit(1)
khan_tree = KhanContentTree('en', content_type)
tree = khan_tree.get()
if topic != 'root':
subtree = find_ka_topic(tree, topic)
else:
subtree = tree
if not subtree:
print("ERROR: Could not find subtree for topic: %s\n" % (topic))
sys.exit(1)
content = []
kapi_tree_get_content_items(subtree, content, content_type)
print_slugs_without_descriptions(content)
|
<commit_before><commit_msg>Add script for finding missing descriptions in KA content<commit_after>
|
#!/usr/bin/env python3
from kapi import *
import utils
import argparse, sys
import time
import json
def read_cmd():
"""Reading command line options."""
desc = "Program for finding KA content without descriptions."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-s','--subject', dest='subject', default = 'root', help = 'Link given course.')
parser.add_argument('-c','--content', dest='content', required = True, help = 'Content kind: video|exercise')
return parser.parse_args()
# Currently, article type does not seem to work.
CONTENT_TYPES = ['video', 'exercise']
def print_slugs_without_descriptions(content):
unique_content_ids = set()
for c in content:
if c['id'] in unique_content_ids:
continue
else:
unique_content_ids.add(c['id'])
if c['translated_description'] is None:
#print(c['node_slug'], 'WARNING: Empty description, returning None')
print(c['node_slug'])
elif not c['translated_description']:
#print(c['node_slug'], 'WARNING: Empty description!')
print(c['node_slug'])
if __name__ == '__main__':
opts = read_cmd()
topic = opts.subject
content_type = opts.content.lower()
if content_type not in CONTENT_TYPES:
print("ERROR: content argument: ", opts.content)
print("Possibilities are: ", CONTENT_TYPES)
exit(1)
khan_tree = KhanContentTree('en', content_type)
tree = khan_tree.get()
if topic != 'root':
subtree = find_ka_topic(tree, topic)
else:
subtree = tree
if not subtree:
print("ERROR: Could not find subtree for topic: %s\n" % (topic))
sys.exit(1)
content = []
kapi_tree_get_content_items(subtree, content, content_type)
print_slugs_without_descriptions(content)
|
Add script for finding missing descriptions in KA content#!/usr/bin/env python3
from kapi import *
import utils
import argparse, sys
import time
import json
def read_cmd():
"""Reading command line options."""
desc = "Program for finding KA content without descriptions."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-s','--subject', dest='subject', default = 'root', help = 'Link given course.')
parser.add_argument('-c','--content', dest='content', required = True, help = 'Content kind: video|exercise')
return parser.parse_args()
# Currently, article type does not seem to work.
CONTENT_TYPES = ['video', 'exercise']
def print_slugs_without_descriptions(content):
unique_content_ids = set()
for c in content:
if c['id'] in unique_content_ids:
continue
else:
unique_content_ids.add(c['id'])
if c['translated_description'] is None:
#print(c['node_slug'], 'WARNING: Empty description, returning None')
print(c['node_slug'])
elif not c['translated_description']:
#print(c['node_slug'], 'WARNING: Empty description!')
print(c['node_slug'])
if __name__ == '__main__':
opts = read_cmd()
topic = opts.subject
content_type = opts.content.lower()
if content_type not in CONTENT_TYPES:
print("ERROR: content argument: ", opts.content)
print("Possibilities are: ", CONTENT_TYPES)
exit(1)
khan_tree = KhanContentTree('en', content_type)
tree = khan_tree.get()
if topic != 'root':
subtree = find_ka_topic(tree, topic)
else:
subtree = tree
if not subtree:
print("ERROR: Could not find subtree for topic: %s\n" % (topic))
sys.exit(1)
content = []
kapi_tree_get_content_items(subtree, content, content_type)
print_slugs_without_descriptions(content)
|
<commit_before><commit_msg>Add script for finding missing descriptions in KA content<commit_after>#!/usr/bin/env python3
from kapi import *
import utils
import argparse, sys
import time
import json
def read_cmd():
"""Reading command line options."""
desc = "Program for finding KA content without descriptions."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-s','--subject', dest='subject', default = 'root', help = 'Link given course.')
parser.add_argument('-c','--content', dest='content', required = True, help = 'Content kind: video|exercise')
return parser.parse_args()
# Currently, article type does not seem to work.
CONTENT_TYPES = ['video', 'exercise']
def print_slugs_without_descriptions(content):
unique_content_ids = set()
for c in content:
if c['id'] in unique_content_ids:
continue
else:
unique_content_ids.add(c['id'])
if c['translated_description'] is None:
#print(c['node_slug'], 'WARNING: Empty description, returning None')
print(c['node_slug'])
elif not c['translated_description']:
#print(c['node_slug'], 'WARNING: Empty description!')
print(c['node_slug'])
if __name__ == '__main__':
opts = read_cmd()
topic = opts.subject
content_type = opts.content.lower()
if content_type not in CONTENT_TYPES:
print("ERROR: content argument: ", opts.content)
print("Possibilities are: ", CONTENT_TYPES)
exit(1)
khan_tree = KhanContentTree('en', content_type)
tree = khan_tree.get()
if topic != 'root':
subtree = find_ka_topic(tree, topic)
else:
subtree = tree
if not subtree:
print("ERROR: Could not find subtree for topic: %s\n" % (topic))
sys.exit(1)
content = []
kapi_tree_get_content_items(subtree, content, content_type)
print_slugs_without_descriptions(content)
|
|
8038040d1132de7648a3795a32605da1213bb741
|
main.py
|
main.py
|
import pyb
LEDS = [pyb.LED(i) for i in range(1,5)]
while True:
for led in LEDS:
led.toggle()
pyb.delay(100)
|
Add basic blinking LED script
|
Add basic blinking LED script
|
Python
|
mit
|
Tyler314/led_matrix,Tyler314/led_matrix
|
Add basic blinking LED script
|
import pyb
LEDS = [pyb.LED(i) for i in range(1,5)]
while True:
for led in LEDS:
led.toggle()
pyb.delay(100)
|
<commit_before><commit_msg>Add basic blinking LED script<commit_after>
|
import pyb
LEDS = [pyb.LED(i) for i in range(1,5)]
while True:
for led in LEDS:
led.toggle()
pyb.delay(100)
|
Add basic blinking LED scriptimport pyb
LEDS = [pyb.LED(i) for i in range(1,5)]
while True:
for led in LEDS:
led.toggle()
pyb.delay(100)
|
<commit_before><commit_msg>Add basic blinking LED script<commit_after>import pyb
LEDS = [pyb.LED(i) for i in range(1,5)]
while True:
for led in LEDS:
led.toggle()
pyb.delay(100)
|
|
364014ecc42150f8ad5959ebcdcc94ae07f38c01
|
tests/test_commands.py
|
tests/test_commands.py
|
from pim.commands.init import _defaults, _make_package
from pim.commands.install import install
from pim.commands.uninstall import uninstall
from click.testing import CliRunner
def _create_test_package():
d = _defaults()
d['description'] = 'test package'
_make_package(d, True)
return d
def test_install_and_uninstall():
pkg_to_install = 'nose'
runner = CliRunner()
with runner.isolated_filesystem():
d = _create_test_package()
result = runner.invoke(install, ['-g', pkg_to_install])
# _install([pkg_to_install], globally=True)
with open('requirements.txt', 'r') as f:
lines = f.readlines()
assert pkg_to_install in lines
result = runner.invoke(uninstall, ['-g', pkg_to_install])
with open('requirements.txt', 'r') as f:
lines = f.readlines()
assert pkg_to_install not in lines
|
Create test package and round trip install/uninstall
|
TST: Create test package and round trip install/uninstall
|
Python
|
mit
|
freeman-lab/pim
|
TST: Create test package and round trip install/uninstall
|
from pim.commands.init import _defaults, _make_package
from pim.commands.install import install
from pim.commands.uninstall import uninstall
from click.testing import CliRunner
def _create_test_package():
d = _defaults()
d['description'] = 'test package'
_make_package(d, True)
return d
def test_install_and_uninstall():
pkg_to_install = 'nose'
runner = CliRunner()
with runner.isolated_filesystem():
d = _create_test_package()
result = runner.invoke(install, ['-g', pkg_to_install])
# _install([pkg_to_install], globally=True)
with open('requirements.txt', 'r') as f:
lines = f.readlines()
assert pkg_to_install in lines
result = runner.invoke(uninstall, ['-g', pkg_to_install])
with open('requirements.txt', 'r') as f:
lines = f.readlines()
assert pkg_to_install not in lines
|
<commit_before><commit_msg>TST: Create test package and round trip install/uninstall<commit_after>
|
from pim.commands.init import _defaults, _make_package
from pim.commands.install import install
from pim.commands.uninstall import uninstall
from click.testing import CliRunner
def _create_test_package():
d = _defaults()
d['description'] = 'test package'
_make_package(d, True)
return d
def test_install_and_uninstall():
pkg_to_install = 'nose'
runner = CliRunner()
with runner.isolated_filesystem():
d = _create_test_package()
result = runner.invoke(install, ['-g', pkg_to_install])
# _install([pkg_to_install], globally=True)
with open('requirements.txt', 'r') as f:
lines = f.readlines()
assert pkg_to_install in lines
result = runner.invoke(uninstall, ['-g', pkg_to_install])
with open('requirements.txt', 'r') as f:
lines = f.readlines()
assert pkg_to_install not in lines
|
TST: Create test package and round trip install/uninstallfrom pim.commands.init import _defaults, _make_package
from pim.commands.install import install
from pim.commands.uninstall import uninstall
from click.testing import CliRunner
def _create_test_package():
d = _defaults()
d['description'] = 'test package'
_make_package(d, True)
return d
def test_install_and_uninstall():
pkg_to_install = 'nose'
runner = CliRunner()
with runner.isolated_filesystem():
d = _create_test_package()
result = runner.invoke(install, ['-g', pkg_to_install])
# _install([pkg_to_install], globally=True)
with open('requirements.txt', 'r') as f:
lines = f.readlines()
assert pkg_to_install in lines
result = runner.invoke(uninstall, ['-g', pkg_to_install])
with open('requirements.txt', 'r') as f:
lines = f.readlines()
assert pkg_to_install not in lines
|
<commit_before><commit_msg>TST: Create test package and round trip install/uninstall<commit_after>from pim.commands.init import _defaults, _make_package
from pim.commands.install import install
from pim.commands.uninstall import uninstall
from click.testing import CliRunner
def _create_test_package():
d = _defaults()
d['description'] = 'test package'
_make_package(d, True)
return d
def test_install_and_uninstall():
pkg_to_install = 'nose'
runner = CliRunner()
with runner.isolated_filesystem():
d = _create_test_package()
result = runner.invoke(install, ['-g', pkg_to_install])
# _install([pkg_to_install], globally=True)
with open('requirements.txt', 'r') as f:
lines = f.readlines()
assert pkg_to_install in lines
result = runner.invoke(uninstall, ['-g', pkg_to_install])
with open('requirements.txt', 'r') as f:
lines = f.readlines()
assert pkg_to_install not in lines
|
|
57051d3e59a4664a536588c19ae0581cb92f1350
|
timed/redmine/admin.py
|
timed/redmine/admin.py
|
from django.contrib import admin
from timed.projects.admin import ProjectAdmin
from timed.projects.models import Project
from timed_adfinis.redmine.models import RedmineProject
admin.site.unregister(Project)
class RedmineProjectInline(admin.StackedInline):
model = RedmineProject
@admin.register(Project)
class ProjectAdmin(ProjectAdmin):
"""Adfinis specific project including Redmine issue configuration."""
inlines = ProjectAdmin.inlines + [RedmineProjectInline, ]
|
Add RedmineProject as inline of ProjectAdmin
|
Add RedmineProject as inline of ProjectAdmin
|
Python
|
agpl-3.0
|
adfinis-sygroup/timed-backend,adfinis-sygroup/timed-backend,adfinis-sygroup/timed-backend
|
Add RedmineProject as inline of ProjectAdmin
|
from django.contrib import admin
from timed.projects.admin import ProjectAdmin
from timed.projects.models import Project
from timed_adfinis.redmine.models import RedmineProject
admin.site.unregister(Project)
class RedmineProjectInline(admin.StackedInline):
model = RedmineProject
@admin.register(Project)
class ProjectAdmin(ProjectAdmin):
"""Adfinis specific project including Redmine issue configuration."""
inlines = ProjectAdmin.inlines + [RedmineProjectInline, ]
|
<commit_before><commit_msg>Add RedmineProject as inline of ProjectAdmin<commit_after>
|
from django.contrib import admin
from timed.projects.admin import ProjectAdmin
from timed.projects.models import Project
from timed_adfinis.redmine.models import RedmineProject
admin.site.unregister(Project)
class RedmineProjectInline(admin.StackedInline):
model = RedmineProject
@admin.register(Project)
class ProjectAdmin(ProjectAdmin):
"""Adfinis specific project including Redmine issue configuration."""
inlines = ProjectAdmin.inlines + [RedmineProjectInline, ]
|
Add RedmineProject as inline of ProjectAdminfrom django.contrib import admin
from timed.projects.admin import ProjectAdmin
from timed.projects.models import Project
from timed_adfinis.redmine.models import RedmineProject
admin.site.unregister(Project)
class RedmineProjectInline(admin.StackedInline):
model = RedmineProject
@admin.register(Project)
class ProjectAdmin(ProjectAdmin):
"""Adfinis specific project including Redmine issue configuration."""
inlines = ProjectAdmin.inlines + [RedmineProjectInline, ]
|
<commit_before><commit_msg>Add RedmineProject as inline of ProjectAdmin<commit_after>from django.contrib import admin
from timed.projects.admin import ProjectAdmin
from timed.projects.models import Project
from timed_adfinis.redmine.models import RedmineProject
admin.site.unregister(Project)
class RedmineProjectInline(admin.StackedInline):
model = RedmineProject
@admin.register(Project)
class ProjectAdmin(ProjectAdmin):
"""Adfinis specific project including Redmine issue configuration."""
inlines = ProjectAdmin.inlines + [RedmineProjectInline, ]
|
|
520dc3ecf931845beab6a4e0e9343633bbd22c73
|
main.py
|
main.py
|
from numpy import array
from time import sleep
class GameOfLife(object):
def __init__(self, n, starting=[]):
self.game = array([[0]*n]*n)
self.size = n
for i, j in starting:
self.game[i, j] = 1
def get_square_pos(self, i, j):
im = i - 1
iM = i + 2
jm = j - 1
jM = j + 2
if im < 0:
im = 0
if jm < 0:
jm = 0
return ((im, iM), (jm, jM))
def get_rule(self, i, j):
x, y = self.get_square_pos(i, j)
rule_n = self.game[x[0]:x[1], y[0]:y[1]].sum()
alive = self.game[i, j] == 1
# If currently alive
if alive:
rule_n -= 1
# Any live cell with fewer than two live neighbours dies, as if
# caused by under-population.
if rule_n < 2:
return 0
# Any live cell with two or three live neighbours lives on to the
# next generation.
elif rule_n < 4:
return 1
# Any live cell with more than three live neighbours dies, as if by
# overcrowding.
else:
return 0
# If currently dead
else:
# Any dead cell with exactly three live neighbours becomes a live
# cell, as if by reproduction.
if rule_n == 3:
return 1
else:
return 0
def step(self):
self.game_new = self.game.copy()
for j in range(self.size):
for i in range(self.size):
self.game_new[i, j] = self.get_rule(i, j)
self.game = self.game_new
def play(self):
count = 0
while(True):
print("Step", count)
self.draw()
self.step()
count += 1
sleep(1)
def draw(self):
drawing = ""
for j in range(self.size):
for i in range(self.size):
if self.game[i, j]:
drawing += "X"
else:
drawing += " "
drawing += "\n"
drawing += "\n"
print(drawing)
|
Create the Game class which has the rules implemented, can go a step forward and can draw itself.
|
Create the Game class which has the rules implemented, can
go a step forward and can draw itself.
|
Python
|
mit
|
nightmarebadger/conways-game-of-life-python
|
Create the Game class which has the rules implemented, can
go a step forward and can draw itself.
|
from numpy import array
from time import sleep
class GameOfLife(object):
def __init__(self, n, starting=[]):
self.game = array([[0]*n]*n)
self.size = n
for i, j in starting:
self.game[i, j] = 1
def get_square_pos(self, i, j):
im = i - 1
iM = i + 2
jm = j - 1
jM = j + 2
if im < 0:
im = 0
if jm < 0:
jm = 0
return ((im, iM), (jm, jM))
def get_rule(self, i, j):
x, y = self.get_square_pos(i, j)
rule_n = self.game[x[0]:x[1], y[0]:y[1]].sum()
alive = self.game[i, j] == 1
# If currently alive
if alive:
rule_n -= 1
# Any live cell with fewer than two live neighbours dies, as if
# caused by under-population.
if rule_n < 2:
return 0
# Any live cell with two or three live neighbours lives on to the
# next generation.
elif rule_n < 4:
return 1
# Any live cell with more than three live neighbours dies, as if by
# overcrowding.
else:
return 0
# If currently dead
else:
# Any dead cell with exactly three live neighbours becomes a live
# cell, as if by reproduction.
if rule_n == 3:
return 1
else:
return 0
def step(self):
self.game_new = self.game.copy()
for j in range(self.size):
for i in range(self.size):
self.game_new[i, j] = self.get_rule(i, j)
self.game = self.game_new
def play(self):
count = 0
while(True):
print("Step", count)
self.draw()
self.step()
count += 1
sleep(1)
def draw(self):
drawing = ""
for j in range(self.size):
for i in range(self.size):
if self.game[i, j]:
drawing += "X"
else:
drawing += " "
drawing += "\n"
drawing += "\n"
print(drawing)
|
<commit_before><commit_msg>Create the Game class which has the rules implemented, can
go a step forward and can draw itself.<commit_after>
|
from numpy import array
from time import sleep
class GameOfLife(object):
def __init__(self, n, starting=[]):
self.game = array([[0]*n]*n)
self.size = n
for i, j in starting:
self.game[i, j] = 1
def get_square_pos(self, i, j):
im = i - 1
iM = i + 2
jm = j - 1
jM = j + 2
if im < 0:
im = 0
if jm < 0:
jm = 0
return ((im, iM), (jm, jM))
def get_rule(self, i, j):
x, y = self.get_square_pos(i, j)
rule_n = self.game[x[0]:x[1], y[0]:y[1]].sum()
alive = self.game[i, j] == 1
# If currently alive
if alive:
rule_n -= 1
# Any live cell with fewer than two live neighbours dies, as if
# caused by under-population.
if rule_n < 2:
return 0
# Any live cell with two or three live neighbours lives on to the
# next generation.
elif rule_n < 4:
return 1
# Any live cell with more than three live neighbours dies, as if by
# overcrowding.
else:
return 0
# If currently dead
else:
# Any dead cell with exactly three live neighbours becomes a live
# cell, as if by reproduction.
if rule_n == 3:
return 1
else:
return 0
def step(self):
self.game_new = self.game.copy()
for j in range(self.size):
for i in range(self.size):
self.game_new[i, j] = self.get_rule(i, j)
self.game = self.game_new
def play(self):
count = 0
while(True):
print("Step", count)
self.draw()
self.step()
count += 1
sleep(1)
def draw(self):
drawing = ""
for j in range(self.size):
for i in range(self.size):
if self.game[i, j]:
drawing += "X"
else:
drawing += " "
drawing += "\n"
drawing += "\n"
print(drawing)
|
Create the Game class which has the rules implemented, can
go a step forward and can draw itself.from numpy import array
from time import sleep
class GameOfLife(object):
def __init__(self, n, starting=[]):
self.game = array([[0]*n]*n)
self.size = n
for i, j in starting:
self.game[i, j] = 1
def get_square_pos(self, i, j):
im = i - 1
iM = i + 2
jm = j - 1
jM = j + 2
if im < 0:
im = 0
if jm < 0:
jm = 0
return ((im, iM), (jm, jM))
def get_rule(self, i, j):
x, y = self.get_square_pos(i, j)
rule_n = self.game[x[0]:x[1], y[0]:y[1]].sum()
alive = self.game[i, j] == 1
# If currently alive
if alive:
rule_n -= 1
# Any live cell with fewer than two live neighbours dies, as if
# caused by under-population.
if rule_n < 2:
return 0
# Any live cell with two or three live neighbours lives on to the
# next generation.
elif rule_n < 4:
return 1
# Any live cell with more than three live neighbours dies, as if by
# overcrowding.
else:
return 0
# If currently dead
else:
# Any dead cell with exactly three live neighbours becomes a live
# cell, as if by reproduction.
if rule_n == 3:
return 1
else:
return 0
def step(self):
self.game_new = self.game.copy()
for j in range(self.size):
for i in range(self.size):
self.game_new[i, j] = self.get_rule(i, j)
self.game = self.game_new
def play(self):
count = 0
while(True):
print("Step", count)
self.draw()
self.step()
count += 1
sleep(1)
def draw(self):
drawing = ""
for j in range(self.size):
for i in range(self.size):
if self.game[i, j]:
drawing += "X"
else:
drawing += " "
drawing += "\n"
drawing += "\n"
print(drawing)
|
<commit_before><commit_msg>Create the Game class which has the rules implemented, can
go a step forward and can draw itself.<commit_after>from numpy import array
from time import sleep
class GameOfLife(object):
def __init__(self, n, starting=[]):
self.game = array([[0]*n]*n)
self.size = n
for i, j in starting:
self.game[i, j] = 1
def get_square_pos(self, i, j):
im = i - 1
iM = i + 2
jm = j - 1
jM = j + 2
if im < 0:
im = 0
if jm < 0:
jm = 0
return ((im, iM), (jm, jM))
def get_rule(self, i, j):
x, y = self.get_square_pos(i, j)
rule_n = self.game[x[0]:x[1], y[0]:y[1]].sum()
alive = self.game[i, j] == 1
# If currently alive
if alive:
rule_n -= 1
# Any live cell with fewer than two live neighbours dies, as if
# caused by under-population.
if rule_n < 2:
return 0
# Any live cell with two or three live neighbours lives on to the
# next generation.
elif rule_n < 4:
return 1
# Any live cell with more than three live neighbours dies, as if by
# overcrowding.
else:
return 0
# If currently dead
else:
# Any dead cell with exactly three live neighbours becomes a live
# cell, as if by reproduction.
if rule_n == 3:
return 1
else:
return 0
def step(self):
self.game_new = self.game.copy()
for j in range(self.size):
for i in range(self.size):
self.game_new[i, j] = self.get_rule(i, j)
self.game = self.game_new
def play(self):
count = 0
while(True):
print("Step", count)
self.draw()
self.step()
count += 1
sleep(1)
def draw(self):
drawing = ""
for j in range(self.size):
for i in range(self.size):
if self.game[i, j]:
drawing += "X"
else:
drawing += " "
drawing += "\n"
drawing += "\n"
print(drawing)
|
|
b6c1e11682dea0acd6b78f5fc0dfb5220eb5db70
|
tools/xmldir2tree.py
|
tools/xmldir2tree.py
|
#!/usr/bin/env python3
from collections import OrderedDict
try:
from lxml import etree
except:
from xml.etree import ElementTree as etree
class Node(object):
def __init__(self, name=None, depth=0):
self.name = name
self.depth = depth
self.sources = set()
self.children = OrderedDict()
def process(self, elem, source):
self.sources.add(source)
for attr in elem.attrib:
self.add('@' + attr, source)
if not list(elem) and elem.text and elem.text.strip():
self.add('#text', source)
for child in elem:
name = child.tag if child.tag != etree.Comment else '#comment'
node = self.add(name, source)
node.process(child, source)
def add(self, name, source):
node = self.children.setdefault(name, Node(name, self.depth + 1))
node.sources.add(source)
return node
def visualize(node, indent=2, source_count=0, parent_ratio=None):
source_count = source_count or len(node.sources)
ratio = len(node.sources) / source_count
if node.name:
note = ""
if ratio < 1.0 and ratio != parent_ratio:
note = " # in %.3f%%" % (ratio*100)
print((' ' * indent) * node.depth + node.name + note)
for child in node.children.values():
visualize(child, indent, source_count, ratio)
if __name__ == '__main__':
import glob, os, sys
args = sys.argv[1:]
tree = Node()
for pth in args:
sources = glob.glob(pth) if not os.path.isdir(pth) else (
os.path.join(dirpath, fname)
for dirpath, dirnames, filenames in os.walk(pth)
for fname in filenames if not fname.startswith('.'))
for source in sources:
try:
print("Processing", source, file=sys.stderr)
root = etree.parse(source).getroot()
except etree.XMLSyntaxError:
print("XML syntax error in:", source, file=sys.stderr)
else:
tree.name = root.tag
tree.process(root, source)
visualize(tree)
|
Add script calculating the tree structure of a set of XML files
|
Add script calculating the tree structure of a set of XML files
|
Python
|
cc0-1.0
|
Kungbib/datalab,Kungbib/datalab
|
Add script calculating the tree structure of a set of XML files
|
#!/usr/bin/env python3
from collections import OrderedDict
try:
from lxml import etree
except:
from xml.etree import ElementTree as etree
class Node(object):
def __init__(self, name=None, depth=0):
self.name = name
self.depth = depth
self.sources = set()
self.children = OrderedDict()
def process(self, elem, source):
self.sources.add(source)
for attr in elem.attrib:
self.add('@' + attr, source)
if not list(elem) and elem.text and elem.text.strip():
self.add('#text', source)
for child in elem:
name = child.tag if child.tag != etree.Comment else '#comment'
node = self.add(name, source)
node.process(child, source)
def add(self, name, source):
node = self.children.setdefault(name, Node(name, self.depth + 1))
node.sources.add(source)
return node
def visualize(node, indent=2, source_count=0, parent_ratio=None):
source_count = source_count or len(node.sources)
ratio = len(node.sources) / source_count
if node.name:
note = ""
if ratio < 1.0 and ratio != parent_ratio:
note = " # in %.3f%%" % (ratio*100)
print((' ' * indent) * node.depth + node.name + note)
for child in node.children.values():
visualize(child, indent, source_count, ratio)
if __name__ == '__main__':
import glob, os, sys
args = sys.argv[1:]
tree = Node()
for pth in args:
sources = glob.glob(pth) if not os.path.isdir(pth) else (
os.path.join(dirpath, fname)
for dirpath, dirnames, filenames in os.walk(pth)
for fname in filenames if not fname.startswith('.'))
for source in sources:
try:
print("Processing", source, file=sys.stderr)
root = etree.parse(source).getroot()
except etree.XMLSyntaxError:
print("XML syntax error in:", source, file=sys.stderr)
else:
tree.name = root.tag
tree.process(root, source)
visualize(tree)
|
<commit_before><commit_msg>Add script calculating the tree structure of a set of XML files<commit_after>
|
#!/usr/bin/env python3
from collections import OrderedDict
try:
from lxml import etree
except:
from xml.etree import ElementTree as etree
class Node(object):
def __init__(self, name=None, depth=0):
self.name = name
self.depth = depth
self.sources = set()
self.children = OrderedDict()
def process(self, elem, source):
self.sources.add(source)
for attr in elem.attrib:
self.add('@' + attr, source)
if not list(elem) and elem.text and elem.text.strip():
self.add('#text', source)
for child in elem:
name = child.tag if child.tag != etree.Comment else '#comment'
node = self.add(name, source)
node.process(child, source)
def add(self, name, source):
node = self.children.setdefault(name, Node(name, self.depth + 1))
node.sources.add(source)
return node
def visualize(node, indent=2, source_count=0, parent_ratio=None):
source_count = source_count or len(node.sources)
ratio = len(node.sources) / source_count
if node.name:
note = ""
if ratio < 1.0 and ratio != parent_ratio:
note = " # in %.3f%%" % (ratio*100)
print((' ' * indent) * node.depth + node.name + note)
for child in node.children.values():
visualize(child, indent, source_count, ratio)
if __name__ == '__main__':
import glob, os, sys
args = sys.argv[1:]
tree = Node()
for pth in args:
sources = glob.glob(pth) if not os.path.isdir(pth) else (
os.path.join(dirpath, fname)
for dirpath, dirnames, filenames in os.walk(pth)
for fname in filenames if not fname.startswith('.'))
for source in sources:
try:
print("Processing", source, file=sys.stderr)
root = etree.parse(source).getroot()
except etree.XMLSyntaxError:
print("XML syntax error in:", source, file=sys.stderr)
else:
tree.name = root.tag
tree.process(root, source)
visualize(tree)
|
Add script calculating the tree structure of a set of XML files#!/usr/bin/env python3
from collections import OrderedDict
try:
from lxml import etree
except:
from xml.etree import ElementTree as etree
class Node(object):
def __init__(self, name=None, depth=0):
self.name = name
self.depth = depth
self.sources = set()
self.children = OrderedDict()
def process(self, elem, source):
self.sources.add(source)
for attr in elem.attrib:
self.add('@' + attr, source)
if not list(elem) and elem.text and elem.text.strip():
self.add('#text', source)
for child in elem:
name = child.tag if child.tag != etree.Comment else '#comment'
node = self.add(name, source)
node.process(child, source)
def add(self, name, source):
node = self.children.setdefault(name, Node(name, self.depth + 1))
node.sources.add(source)
return node
def visualize(node, indent=2, source_count=0, parent_ratio=None):
source_count = source_count or len(node.sources)
ratio = len(node.sources) / source_count
if node.name:
note = ""
if ratio < 1.0 and ratio != parent_ratio:
note = " # in %.3f%%" % (ratio*100)
print((' ' * indent) * node.depth + node.name + note)
for child in node.children.values():
visualize(child, indent, source_count, ratio)
if __name__ == '__main__':
import glob, os, sys
args = sys.argv[1:]
tree = Node()
for pth in args:
sources = glob.glob(pth) if not os.path.isdir(pth) else (
os.path.join(dirpath, fname)
for dirpath, dirnames, filenames in os.walk(pth)
for fname in filenames if not fname.startswith('.'))
for source in sources:
try:
print("Processing", source, file=sys.stderr)
root = etree.parse(source).getroot()
except etree.XMLSyntaxError:
print("XML syntax error in:", source, file=sys.stderr)
else:
tree.name = root.tag
tree.process(root, source)
visualize(tree)
|
<commit_before><commit_msg>Add script calculating the tree structure of a set of XML files<commit_after>#!/usr/bin/env python3
from collections import OrderedDict
try:
from lxml import etree
except:
from xml.etree import ElementTree as etree
class Node(object):
def __init__(self, name=None, depth=0):
self.name = name
self.depth = depth
self.sources = set()
self.children = OrderedDict()
def process(self, elem, source):
self.sources.add(source)
for attr in elem.attrib:
self.add('@' + attr, source)
if not list(elem) and elem.text and elem.text.strip():
self.add('#text', source)
for child in elem:
name = child.tag if child.tag != etree.Comment else '#comment'
node = self.add(name, source)
node.process(child, source)
def add(self, name, source):
node = self.children.setdefault(name, Node(name, self.depth + 1))
node.sources.add(source)
return node
def visualize(node, indent=2, source_count=0, parent_ratio=None):
source_count = source_count or len(node.sources)
ratio = len(node.sources) / source_count
if node.name:
note = ""
if ratio < 1.0 and ratio != parent_ratio:
note = " # in %.3f%%" % (ratio*100)
print((' ' * indent) * node.depth + node.name + note)
for child in node.children.values():
visualize(child, indent, source_count, ratio)
if __name__ == '__main__':
import glob, os, sys
args = sys.argv[1:]
tree = Node()
for pth in args:
sources = glob.glob(pth) if not os.path.isdir(pth) else (
os.path.join(dirpath, fname)
for dirpath, dirnames, filenames in os.walk(pth)
for fname in filenames if not fname.startswith('.'))
for source in sources:
try:
print("Processing", source, file=sys.stderr)
root = etree.parse(source).getroot()
except etree.XMLSyntaxError:
print("XML syntax error in:", source, file=sys.stderr)
else:
tree.name = root.tag
tree.process(root, source)
visualize(tree)
|
|
40dd0738490a8fe27067b19e0539533a55c3b71c
|
physicalproperty/__init__.py
|
physicalproperty/__init__.py
|
# -*- coding: utf-8 -*-
"""
Base Library (:mod:`physicalproperty`)
======================================
.. currentmodule:: physicalproperty
"""
from physicalproperty import PhysicalProperty
__version__ = "0.0.1"
|
Add init for multi-file module
|
Add init for multi-file module
|
Python
|
mit
|
jrsmith3/physicalproperty,jrsmith3/tec,jrsmith3/ibei,jrsmith3/tec,jrsmith3/physicalproperty
|
Add init for multi-file module
|
# -*- coding: utf-8 -*-
"""
Base Library (:mod:`physicalproperty`)
======================================
.. currentmodule:: physicalproperty
"""
from physicalproperty import PhysicalProperty
__version__ = "0.0.1"
|
<commit_before><commit_msg>Add init for multi-file module<commit_after>
|
# -*- coding: utf-8 -*-
"""
Base Library (:mod:`physicalproperty`)
======================================
.. currentmodule:: physicalproperty
"""
from physicalproperty import PhysicalProperty
__version__ = "0.0.1"
|
Add init for multi-file module# -*- coding: utf-8 -*-
"""
Base Library (:mod:`physicalproperty`)
======================================
.. currentmodule:: physicalproperty
"""
from physicalproperty import PhysicalProperty
__version__ = "0.0.1"
|
<commit_before><commit_msg>Add init for multi-file module<commit_after># -*- coding: utf-8 -*-
"""
Base Library (:mod:`physicalproperty`)
======================================
.. currentmodule:: physicalproperty
"""
from physicalproperty import PhysicalProperty
__version__ = "0.0.1"
|
|
3cc9c4a72a863b12a298cc7b3d8927a22d9149d2
|
agent.py
|
agent.py
|
import json
import logbook
from piper.db.core import LazyDatabaseMixin
from piper.utils import oneshot
class Agent(LazyDatabaseMixin):
"""
Listener endpoint that recieves requests and executes them
"""
_properties = None
FIELDS_TO_DB = (
# Main fields
'id',
'fqdn',
'config',
'local',
'active',
# Relationship fields
'building',
# Bulk data
'properties',
# Timestamps
'created',
)
def __init__(self, config):
self.config = config
self.id = None
self.building = None
self.status = None
self.log = logbook.Logger(self.__class__.__name__)
def update(self):
"""
Update state of the agent in the database
"""
...
@property
def properties(self):
"""
Grab system properties for the agent. Will be cached once it's ran.
:returns: Dictionary of system values
"""
if self._properties is None:
facter = oneshot('facter --json')
data = json.loads(facter)
self._properties = data
return self._properties
|
Add basic skeleton for Agent()
|
Add basic skeleton for Agent()
|
Python
|
mit
|
thiderman/piper
|
Add basic skeleton for Agent()
|
import json
import logbook
from piper.db.core import LazyDatabaseMixin
from piper.utils import oneshot
class Agent(LazyDatabaseMixin):
"""
Listener endpoint that recieves requests and executes them
"""
_properties = None
FIELDS_TO_DB = (
# Main fields
'id',
'fqdn',
'config',
'local',
'active',
# Relationship fields
'building',
# Bulk data
'properties',
# Timestamps
'created',
)
def __init__(self, config):
self.config = config
self.id = None
self.building = None
self.status = None
self.log = logbook.Logger(self.__class__.__name__)
def update(self):
"""
Update state of the agent in the database
"""
...
@property
def properties(self):
"""
Grab system properties for the agent. Will be cached once it's ran.
:returns: Dictionary of system values
"""
if self._properties is None:
facter = oneshot('facter --json')
data = json.loads(facter)
self._properties = data
return self._properties
|
<commit_before><commit_msg>Add basic skeleton for Agent()<commit_after>
|
import json
import logbook
from piper.db.core import LazyDatabaseMixin
from piper.utils import oneshot
class Agent(LazyDatabaseMixin):
"""
Listener endpoint that recieves requests and executes them
"""
_properties = None
FIELDS_TO_DB = (
# Main fields
'id',
'fqdn',
'config',
'local',
'active',
# Relationship fields
'building',
# Bulk data
'properties',
# Timestamps
'created',
)
def __init__(self, config):
self.config = config
self.id = None
self.building = None
self.status = None
self.log = logbook.Logger(self.__class__.__name__)
def update(self):
"""
Update state of the agent in the database
"""
...
@property
def properties(self):
"""
Grab system properties for the agent. Will be cached once it's ran.
:returns: Dictionary of system values
"""
if self._properties is None:
facter = oneshot('facter --json')
data = json.loads(facter)
self._properties = data
return self._properties
|
Add basic skeleton for Agent()import json
import logbook
from piper.db.core import LazyDatabaseMixin
from piper.utils import oneshot
class Agent(LazyDatabaseMixin):
"""
Listener endpoint that recieves requests and executes them
"""
_properties = None
FIELDS_TO_DB = (
# Main fields
'id',
'fqdn',
'config',
'local',
'active',
# Relationship fields
'building',
# Bulk data
'properties',
# Timestamps
'created',
)
def __init__(self, config):
self.config = config
self.id = None
self.building = None
self.status = None
self.log = logbook.Logger(self.__class__.__name__)
def update(self):
"""
Update state of the agent in the database
"""
...
@property
def properties(self):
"""
Grab system properties for the agent. Will be cached once it's ran.
:returns: Dictionary of system values
"""
if self._properties is None:
facter = oneshot('facter --json')
data = json.loads(facter)
self._properties = data
return self._properties
|
<commit_before><commit_msg>Add basic skeleton for Agent()<commit_after>import json
import logbook
from piper.db.core import LazyDatabaseMixin
from piper.utils import oneshot
class Agent(LazyDatabaseMixin):
"""
Listener endpoint that recieves requests and executes them
"""
_properties = None
FIELDS_TO_DB = (
# Main fields
'id',
'fqdn',
'config',
'local',
'active',
# Relationship fields
'building',
# Bulk data
'properties',
# Timestamps
'created',
)
def __init__(self, config):
self.config = config
self.id = None
self.building = None
self.status = None
self.log = logbook.Logger(self.__class__.__name__)
def update(self):
"""
Update state of the agent in the database
"""
...
@property
def properties(self):
"""
Grab system properties for the agent. Will be cached once it's ran.
:returns: Dictionary of system values
"""
if self._properties is None:
facter = oneshot('facter --json')
data = json.loads(facter)
self._properties = data
return self._properties
|
|
c3617a33e4829b65cca8f19a55caa4093e737405
|
local.py
|
local.py
|
#!/usr/bin/python
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('archvyrt', 'console_scripts', 'archvyrt')()
)
|
Add simple script to run archvyrt without being installed.
|
Add simple script to run archvyrt without being installed.
|
Python
|
mit
|
andrekeller/archvyrt
|
Add simple script to run archvyrt without being installed.
|
#!/usr/bin/python
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('archvyrt', 'console_scripts', 'archvyrt')()
)
|
<commit_before><commit_msg>Add simple script to run archvyrt without being installed.<commit_after>
|
#!/usr/bin/python
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('archvyrt', 'console_scripts', 'archvyrt')()
)
|
Add simple script to run archvyrt without being installed.#!/usr/bin/python
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('archvyrt', 'console_scripts', 'archvyrt')()
)
|
<commit_before><commit_msg>Add simple script to run archvyrt without being installed.<commit_after>#!/usr/bin/python
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('archvyrt', 'console_scripts', 'archvyrt')()
)
|
|
02903171a6aeec4089e028bd4dccfb2e6acd5fb1
|
plumeria/plugins/rubygems.py
|
plumeria/plugins/rubygems.py
|
from plumeria import config
from plumeria.command import commands, CommandError
from plumeria.util import http
from plumeria.util.ratelimit import rate_limit
api_key = config.create("rubygems", "key",
fallback="",
comment="An API key from RubyGems.org (make an account, edit your profile)")
@commands.register("rubygems", "gems", category="Development")
@rate_limit()
async def gems(message):
"""
Search the RubyGems repository for a package.
Example::
/gems discord
Response::
\u2022 discordrb (2.1.3) - A Ruby implementation of the[...]
\u2022 lita-discord (0.1.1) - A Discord adapter for Lit[...]
\u2022 omniauth-discord (0.1.3) - Discord OAuth2 Strate[...]
\u2022 rediscord (1.0.0) - keep record id sync with dyn[...]
"""
q = message.content.strip()
if not q:
raise CommandError("Search term required!")
r = await http.get("https://rubygems.org/api/v1/search.json", params=[
('query', q),
], headers=[
('Authorization', api_key())
])
data = r.json()
if len(data):
return "\n".join(map(lambda e:
"\u2022 **{name}** ({version}) - {desc} <{url}>".format(
name=e['name'],
version=e['version'],
desc=e['info'],
url=e['project_uri']),
data))
else:
raise CommandError("no results found")
|
Add RubyGems plugin to search packages.
|
Add RubyGems plugin to search packages.
|
Python
|
mit
|
sk89q/Plumeria,sk89q/Plumeria,sk89q/Plumeria
|
Add RubyGems plugin to search packages.
|
from plumeria import config
from plumeria.command import commands, CommandError
from plumeria.util import http
from plumeria.util.ratelimit import rate_limit
api_key = config.create("rubygems", "key",
fallback="",
comment="An API key from RubyGems.org (make an account, edit your profile)")
@commands.register("rubygems", "gems", category="Development")
@rate_limit()
async def gems(message):
"""
Search the RubyGems repository for a package.
Example::
/gems discord
Response::
\u2022 discordrb (2.1.3) - A Ruby implementation of the[...]
\u2022 lita-discord (0.1.1) - A Discord adapter for Lit[...]
\u2022 omniauth-discord (0.1.3) - Discord OAuth2 Strate[...]
\u2022 rediscord (1.0.0) - keep record id sync with dyn[...]
"""
q = message.content.strip()
if not q:
raise CommandError("Search term required!")
r = await http.get("https://rubygems.org/api/v1/search.json", params=[
('query', q),
], headers=[
('Authorization', api_key())
])
data = r.json()
if len(data):
return "\n".join(map(lambda e:
"\u2022 **{name}** ({version}) - {desc} <{url}>".format(
name=e['name'],
version=e['version'],
desc=e['info'],
url=e['project_uri']),
data))
else:
raise CommandError("no results found")
|
<commit_before><commit_msg>Add RubyGems plugin to search packages.<commit_after>
|
from plumeria import config
from plumeria.command import commands, CommandError
from plumeria.util import http
from plumeria.util.ratelimit import rate_limit
api_key = config.create("rubygems", "key",
fallback="",
comment="An API key from RubyGems.org (make an account, edit your profile)")
@commands.register("rubygems", "gems", category="Development")
@rate_limit()
async def gems(message):
"""
Search the RubyGems repository for a package.
Example::
/gems discord
Response::
\u2022 discordrb (2.1.3) - A Ruby implementation of the[...]
\u2022 lita-discord (0.1.1) - A Discord adapter for Lit[...]
\u2022 omniauth-discord (0.1.3) - Discord OAuth2 Strate[...]
\u2022 rediscord (1.0.0) - keep record id sync with dyn[...]
"""
q = message.content.strip()
if not q:
raise CommandError("Search term required!")
r = await http.get("https://rubygems.org/api/v1/search.json", params=[
('query', q),
], headers=[
('Authorization', api_key())
])
data = r.json()
if len(data):
return "\n".join(map(lambda e:
"\u2022 **{name}** ({version}) - {desc} <{url}>".format(
name=e['name'],
version=e['version'],
desc=e['info'],
url=e['project_uri']),
data))
else:
raise CommandError("no results found")
|
Add RubyGems plugin to search packages.from plumeria import config
from plumeria.command import commands, CommandError
from plumeria.util import http
from plumeria.util.ratelimit import rate_limit
api_key = config.create("rubygems", "key",
fallback="",
comment="An API key from RubyGems.org (make an account, edit your profile)")
@commands.register("rubygems", "gems", category="Development")
@rate_limit()
async def gems(message):
"""
Search the RubyGems repository for a package.
Example::
/gems discord
Response::
\u2022 discordrb (2.1.3) - A Ruby implementation of the[...]
\u2022 lita-discord (0.1.1) - A Discord adapter for Lit[...]
\u2022 omniauth-discord (0.1.3) - Discord OAuth2 Strate[...]
\u2022 rediscord (1.0.0) - keep record id sync with dyn[...]
"""
q = message.content.strip()
if not q:
raise CommandError("Search term required!")
r = await http.get("https://rubygems.org/api/v1/search.json", params=[
('query', q),
], headers=[
('Authorization', api_key())
])
data = r.json()
if len(data):
return "\n".join(map(lambda e:
"\u2022 **{name}** ({version}) - {desc} <{url}>".format(
name=e['name'],
version=e['version'],
desc=e['info'],
url=e['project_uri']),
data))
else:
raise CommandError("no results found")
|
<commit_before><commit_msg>Add RubyGems plugin to search packages.<commit_after>from plumeria import config
from plumeria.command import commands, CommandError
from plumeria.util import http
from plumeria.util.ratelimit import rate_limit
api_key = config.create("rubygems", "key",
fallback="",
comment="An API key from RubyGems.org (make an account, edit your profile)")
@commands.register("rubygems", "gems", category="Development")
@rate_limit()
async def gems(message):
"""
Search the RubyGems repository for a package.
Example::
/gems discord
Response::
\u2022 discordrb (2.1.3) - A Ruby implementation of the[...]
\u2022 lita-discord (0.1.1) - A Discord adapter for Lit[...]
\u2022 omniauth-discord (0.1.3) - Discord OAuth2 Strate[...]
\u2022 rediscord (1.0.0) - keep record id sync with dyn[...]
"""
q = message.content.strip()
if not q:
raise CommandError("Search term required!")
r = await http.get("https://rubygems.org/api/v1/search.json", params=[
('query', q),
], headers=[
('Authorization', api_key())
])
data = r.json()
if len(data):
return "\n".join(map(lambda e:
"\u2022 **{name}** ({version}) - {desc} <{url}>".format(
name=e['name'],
version=e['version'],
desc=e['info'],
url=e['project_uri']),
data))
else:
raise CommandError("no results found")
|
|
e0a43a72af49e05156131684cadcba9889bc709f
|
graystruct/tests/test_handler.py
|
graystruct/tests/test_handler.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Simon Jagoe
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the 3-clause BSD license. See the LICENSE.txt file for details.
from __future__ import absolute_import
import json
import logging
import os
import unittest
import zlib
from structlog import wrap_logger
from mock import Mock
from ..encoder import GELFEncoder
from ..handler import GELFHandler
class TestingGELFHandler(GELFHandler):
def __init__(self, mock, *args, **kwargs):
super(TestingGELFHandler, self).__init__(*args, **kwargs)
self._mock = mock
def send(self, s):
self._mock(s)
class TestGELFHandler(unittest.TestCase):
def test(self):
# Given
std_logger = logging.getLogger(__name__)
collector = Mock()
handler = TestingGELFHandler(collector, 'localhost')
std_logger.addHandler(handler)
std_logger.setLevel(logging.DEBUG)
logger = wrap_logger(
std_logger, processors=[GELFEncoder(fqdn=False, localname='host')])
expected = {
'version': '1.1',
'host': 'host',
'level': 4, # syslog WARNING
'short_message': 'event',
'_pid': os.getpid(),
'_level_name': 'WARNING',
'_logger': std_logger.name,
}
# When
logger.warning('event')
# Then
self.assertEqual(collector.call_count, 1)
args, kwargs = collector.call_args
self.assertEqual(len(args), 1)
self.assertEqual(len(kwargs), 0)
event_json = zlib.decompress(args[0])
event_dict = json.loads(event_json.decode('utf-8'))
self.assertEqual(event_dict, expected)
|
Add tests for standard GELFHandler
|
Add tests for standard GELFHandler
|
Python
|
bsd-3-clause
|
enthought/graystruct
|
Add tests for standard GELFHandler
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Simon Jagoe
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the 3-clause BSD license. See the LICENSE.txt file for details.
from __future__ import absolute_import
import json
import logging
import os
import unittest
import zlib
from structlog import wrap_logger
from mock import Mock
from ..encoder import GELFEncoder
from ..handler import GELFHandler
class TestingGELFHandler(GELFHandler):
def __init__(self, mock, *args, **kwargs):
super(TestingGELFHandler, self).__init__(*args, **kwargs)
self._mock = mock
def send(self, s):
self._mock(s)
class TestGELFHandler(unittest.TestCase):
def test(self):
# Given
std_logger = logging.getLogger(__name__)
collector = Mock()
handler = TestingGELFHandler(collector, 'localhost')
std_logger.addHandler(handler)
std_logger.setLevel(logging.DEBUG)
logger = wrap_logger(
std_logger, processors=[GELFEncoder(fqdn=False, localname='host')])
expected = {
'version': '1.1',
'host': 'host',
'level': 4, # syslog WARNING
'short_message': 'event',
'_pid': os.getpid(),
'_level_name': 'WARNING',
'_logger': std_logger.name,
}
# When
logger.warning('event')
# Then
self.assertEqual(collector.call_count, 1)
args, kwargs = collector.call_args
self.assertEqual(len(args), 1)
self.assertEqual(len(kwargs), 0)
event_json = zlib.decompress(args[0])
event_dict = json.loads(event_json.decode('utf-8'))
self.assertEqual(event_dict, expected)
|
<commit_before><commit_msg>Add tests for standard GELFHandler<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Simon Jagoe
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the 3-clause BSD license. See the LICENSE.txt file for details.
from __future__ import absolute_import
import json
import logging
import os
import unittest
import zlib
from structlog import wrap_logger
from mock import Mock
from ..encoder import GELFEncoder
from ..handler import GELFHandler
class TestingGELFHandler(GELFHandler):
def __init__(self, mock, *args, **kwargs):
super(TestingGELFHandler, self).__init__(*args, **kwargs)
self._mock = mock
def send(self, s):
self._mock(s)
class TestGELFHandler(unittest.TestCase):
def test(self):
# Given
std_logger = logging.getLogger(__name__)
collector = Mock()
handler = TestingGELFHandler(collector, 'localhost')
std_logger.addHandler(handler)
std_logger.setLevel(logging.DEBUG)
logger = wrap_logger(
std_logger, processors=[GELFEncoder(fqdn=False, localname='host')])
expected = {
'version': '1.1',
'host': 'host',
'level': 4, # syslog WARNING
'short_message': 'event',
'_pid': os.getpid(),
'_level_name': 'WARNING',
'_logger': std_logger.name,
}
# When
logger.warning('event')
# Then
self.assertEqual(collector.call_count, 1)
args, kwargs = collector.call_args
self.assertEqual(len(args), 1)
self.assertEqual(len(kwargs), 0)
event_json = zlib.decompress(args[0])
event_dict = json.loads(event_json.decode('utf-8'))
self.assertEqual(event_dict, expected)
|
Add tests for standard GELFHandler# -*- coding: utf-8 -*-
# Copyright (c) 2015 Simon Jagoe
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the 3-clause BSD license. See the LICENSE.txt file for details.
from __future__ import absolute_import
import json
import logging
import os
import unittest
import zlib
from structlog import wrap_logger
from mock import Mock
from ..encoder import GELFEncoder
from ..handler import GELFHandler
class TestingGELFHandler(GELFHandler):
def __init__(self, mock, *args, **kwargs):
super(TestingGELFHandler, self).__init__(*args, **kwargs)
self._mock = mock
def send(self, s):
self._mock(s)
class TestGELFHandler(unittest.TestCase):
def test(self):
# Given
std_logger = logging.getLogger(__name__)
collector = Mock()
handler = TestingGELFHandler(collector, 'localhost')
std_logger.addHandler(handler)
std_logger.setLevel(logging.DEBUG)
logger = wrap_logger(
std_logger, processors=[GELFEncoder(fqdn=False, localname='host')])
expected = {
'version': '1.1',
'host': 'host',
'level': 4, # syslog WARNING
'short_message': 'event',
'_pid': os.getpid(),
'_level_name': 'WARNING',
'_logger': std_logger.name,
}
# When
logger.warning('event')
# Then
self.assertEqual(collector.call_count, 1)
args, kwargs = collector.call_args
self.assertEqual(len(args), 1)
self.assertEqual(len(kwargs), 0)
event_json = zlib.decompress(args[0])
event_dict = json.loads(event_json.decode('utf-8'))
self.assertEqual(event_dict, expected)
|
<commit_before><commit_msg>Add tests for standard GELFHandler<commit_after># -*- coding: utf-8 -*-
# Copyright (c) 2015 Simon Jagoe
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the 3-clause BSD license. See the LICENSE.txt file for details.
from __future__ import absolute_import
import json
import logging
import os
import unittest
import zlib
from structlog import wrap_logger
from mock import Mock
from ..encoder import GELFEncoder
from ..handler import GELFHandler
class TestingGELFHandler(GELFHandler):
def __init__(self, mock, *args, **kwargs):
super(TestingGELFHandler, self).__init__(*args, **kwargs)
self._mock = mock
def send(self, s):
self._mock(s)
class TestGELFHandler(unittest.TestCase):
def test(self):
# Given
std_logger = logging.getLogger(__name__)
collector = Mock()
handler = TestingGELFHandler(collector, 'localhost')
std_logger.addHandler(handler)
std_logger.setLevel(logging.DEBUG)
logger = wrap_logger(
std_logger, processors=[GELFEncoder(fqdn=False, localname='host')])
expected = {
'version': '1.1',
'host': 'host',
'level': 4, # syslog WARNING
'short_message': 'event',
'_pid': os.getpid(),
'_level_name': 'WARNING',
'_logger': std_logger.name,
}
# When
logger.warning('event')
# Then
self.assertEqual(collector.call_count, 1)
args, kwargs = collector.call_args
self.assertEqual(len(args), 1)
self.assertEqual(len(kwargs), 0)
event_json = zlib.decompress(args[0])
event_dict = json.loads(event_json.decode('utf-8'))
self.assertEqual(event_dict, expected)
|
|
bd89051aa27bdd5dfb6667978f8245d0c76fa928
|
s4v3.py
|
s4v3.py
|
from s4v2 import *
import openpyxl
from openpyxl import Workbook
from openpyxl.writer.excel import ExcelWriter
from openpyxl.cell import get_column_letter
def save_spreadsheet(filename, data_sample):
wb = Workbook() # shortcut for typing Workbook function
ws = wb.active # shortcut for typing active workbook function and also, for finding the sheet in the workbook that we're working on, the active one.
row_index = 1 # set the row index to 1, the starting point for excel, i.e. the upper left-hand corner
for rows in data_sample: # iterate through the rows in the spreadsheet
col_index = 1 # set the col index to 1 (starting point for excel, i.e. the upper left-hand corner)
for field in rows:
col_letter = get_column_letter(col_index) # use the imported get column letter function to get the letter of the column that we're working in.
ws.cell('{}{}'.format(col_letter, row_index)).value = field # I'm not entirely sure what we're doing here because I haven't worked with these function before, but my guess is that we're writing the values in the field of the data sample into the current cell of the new workbook
col_index += 1 # increase column index
row_index += 1 # increase row index
wb.save(filename)
kiton_ties = filter_col_by_string(data_from_csv, "brandName", "Kiton")
save_spreadsheet("_data/s4-kiton.xlsx", kiton_ties)
|
Create function to save data to Microsoft Excel file
|
Create function to save data to Microsoft Excel file
|
Python
|
mit
|
alexmilesyounger/ds_basics
|
Create function to save data to Microsoft Excel file
|
from s4v2 import *
import openpyxl
from openpyxl import Workbook
from openpyxl.writer.excel import ExcelWriter
from openpyxl.cell import get_column_letter
def save_spreadsheet(filename, data_sample):
wb = Workbook() # shortcut for typing Workbook function
ws = wb.active # shortcut for typing active workbook function and also, for finding the sheet in the workbook that we're working on, the active one.
row_index = 1 # set the row index to 1, the starting point for excel, i.e. the upper left-hand corner
for rows in data_sample: # iterate through the rows in the spreadsheet
col_index = 1 # set the col index to 1 (starting point for excel, i.e. the upper left-hand corner)
for field in rows:
col_letter = get_column_letter(col_index) # use the imported get column letter function to get the letter of the column that we're working in.
ws.cell('{}{}'.format(col_letter, row_index)).value = field # I'm not entirely sure what we're doing here because I haven't worked with these function before, but my guess is that we're writing the values in the field of the data sample into the current cell of the new workbook
col_index += 1 # increase column index
row_index += 1 # increase row index
wb.save(filename)
kiton_ties = filter_col_by_string(data_from_csv, "brandName", "Kiton")
save_spreadsheet("_data/s4-kiton.xlsx", kiton_ties)
|
<commit_before><commit_msg>Create function to save data to Microsoft Excel file<commit_after>
|
from s4v2 import *
import openpyxl
from openpyxl import Workbook
from openpyxl.writer.excel import ExcelWriter
from openpyxl.cell import get_column_letter
def save_spreadsheet(filename, data_sample):
wb = Workbook() # shortcut for typing Workbook function
ws = wb.active # shortcut for typing active workbook function and also, for finding the sheet in the workbook that we're working on, the active one.
row_index = 1 # set the row index to 1, the starting point for excel, i.e. the upper left-hand corner
for rows in data_sample: # iterate through the rows in the spreadsheet
col_index = 1 # set the col index to 1 (starting point for excel, i.e. the upper left-hand corner)
for field in rows:
col_letter = get_column_letter(col_index) # use the imported get column letter function to get the letter of the column that we're working in.
ws.cell('{}{}'.format(col_letter, row_index)).value = field # I'm not entirely sure what we're doing here because I haven't worked with these function before, but my guess is that we're writing the values in the field of the data sample into the current cell of the new workbook
col_index += 1 # increase column index
row_index += 1 # increase row index
wb.save(filename)
kiton_ties = filter_col_by_string(data_from_csv, "brandName", "Kiton")
save_spreadsheet("_data/s4-kiton.xlsx", kiton_ties)
|
Create function to save data to Microsoft Excel filefrom s4v2 import *
import openpyxl
from openpyxl import Workbook
from openpyxl.writer.excel import ExcelWriter
from openpyxl.cell import get_column_letter
def save_spreadsheet(filename, data_sample):
wb = Workbook() # shortcut for typing Workbook function
ws = wb.active # shortcut for typing active workbook function and also, for finding the sheet in the workbook that we're working on, the active one.
row_index = 1 # set the row index to 1, the starting point for excel, i.e. the upper left-hand corner
for rows in data_sample: # iterate through the rows in the spreadsheet
col_index = 1 # set the col index to 1 (starting point for excel, i.e. the upper left-hand corner)
for field in rows:
col_letter = get_column_letter(col_index) # use the imported get column letter function to get the letter of the column that we're working in.
ws.cell('{}{}'.format(col_letter, row_index)).value = field # I'm not entirely sure what we're doing here because I haven't worked with these function before, but my guess is that we're writing the values in the field of the data sample into the current cell of the new workbook
col_index += 1 # increase column index
row_index += 1 # increase row index
wb.save(filename)
kiton_ties = filter_col_by_string(data_from_csv, "brandName", "Kiton")
save_spreadsheet("_data/s4-kiton.xlsx", kiton_ties)
|
<commit_before><commit_msg>Create function to save data to Microsoft Excel file<commit_after>from s4v2 import *
import openpyxl
from openpyxl import Workbook
from openpyxl.writer.excel import ExcelWriter
from openpyxl.cell import get_column_letter
def save_spreadsheet(filename, data_sample):
wb = Workbook() # shortcut for typing Workbook function
ws = wb.active # shortcut for typing active workbook function and also, for finding the sheet in the workbook that we're working on, the active one.
row_index = 1 # set the row index to 1, the starting point for excel, i.e. the upper left-hand corner
for rows in data_sample: # iterate through the rows in the spreadsheet
col_index = 1 # set the col index to 1 (starting point for excel, i.e. the upper left-hand corner)
for field in rows:
col_letter = get_column_letter(col_index) # use the imported get column letter function to get the letter of the column that we're working in.
ws.cell('{}{}'.format(col_letter, row_index)).value = field # I'm not entirely sure what we're doing here because I haven't worked with these function before, but my guess is that we're writing the values in the field of the data sample into the current cell of the new workbook
col_index += 1 # increase column index
row_index += 1 # increase row index
wb.save(filename)
kiton_ties = filter_col_by_string(data_from_csv, "brandName", "Kiton")
save_spreadsheet("_data/s4-kiton.xlsx", kiton_ties)
|
|
a929cb4981cde475905b422d70f37e6647875c17
|
is_json.py
|
is_json.py
|
#!/usr/bin/env python
"""A function and script for determining whether a file contains valid JSON."""
import json
def is_json(json_file):
"""Returns True if a file is valid JSON."""
try:
with open(json_file, 'r') as fp:
json.load(fp)
except ValueError:
return False
except IOError as e:
print e
return False
else:
return True
def main():
"""Handle arguments to the `is_json` console script."""
import argparse
parser = argparse.ArgumentParser(
description="Checks whether a file contains valid JSON.")
parser.add_argument("json_file",
help="A file containing JSON")
parser.add_argument('--version', action='version',
version='is_json 0.1')
args = parser.parse_args()
r = is_json(args.json_file)
print '{} : {}'.format(r, args.json_file)
if __name__ == '__main__':
main()
|
Add simple Python JSON validator
|
Add simple Python JSON validator
|
Python
|
mit
|
mdpiper/wunderkammer,mdpiper/wunderkammer,mdpiper/wunderkammer,mdpiper/wunderkammer
|
Add simple Python JSON validator
|
#!/usr/bin/env python
"""A function and script for determining whether a file contains valid JSON."""
import json
def is_json(json_file):
"""Returns True if a file is valid JSON."""
try:
with open(json_file, 'r') as fp:
json.load(fp)
except ValueError:
return False
except IOError as e:
print e
return False
else:
return True
def main():
"""Handle arguments to the `is_json` console script."""
import argparse
parser = argparse.ArgumentParser(
description="Checks whether a file contains valid JSON.")
parser.add_argument("json_file",
help="A file containing JSON")
parser.add_argument('--version', action='version',
version='is_json 0.1')
args = parser.parse_args()
r = is_json(args.json_file)
print '{} : {}'.format(r, args.json_file)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add simple Python JSON validator<commit_after>
|
#!/usr/bin/env python
"""A function and script for determining whether a file contains valid JSON."""
import json
def is_json(json_file):
"""Returns True if a file is valid JSON."""
try:
with open(json_file, 'r') as fp:
json.load(fp)
except ValueError:
return False
except IOError as e:
print e
return False
else:
return True
def main():
"""Handle arguments to the `is_json` console script."""
import argparse
parser = argparse.ArgumentParser(
description="Checks whether a file contains valid JSON.")
parser.add_argument("json_file",
help="A file containing JSON")
parser.add_argument('--version', action='version',
version='is_json 0.1')
args = parser.parse_args()
r = is_json(args.json_file)
print '{} : {}'.format(r, args.json_file)
if __name__ == '__main__':
main()
|
Add simple Python JSON validator#!/usr/bin/env python
"""A function and script for determining whether a file contains valid JSON."""
import json
def is_json(json_file):
"""Returns True if a file is valid JSON."""
try:
with open(json_file, 'r') as fp:
json.load(fp)
except ValueError:
return False
except IOError as e:
print e
return False
else:
return True
def main():
"""Handle arguments to the `is_json` console script."""
import argparse
parser = argparse.ArgumentParser(
description="Checks whether a file contains valid JSON.")
parser.add_argument("json_file",
help="A file containing JSON")
parser.add_argument('--version', action='version',
version='is_json 0.1')
args = parser.parse_args()
r = is_json(args.json_file)
print '{} : {}'.format(r, args.json_file)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add simple Python JSON validator<commit_after>#!/usr/bin/env python
"""A function and script for determining whether a file contains valid JSON."""
import json
def is_json(json_file):
"""Returns True if a file is valid JSON."""
try:
with open(json_file, 'r') as fp:
json.load(fp)
except ValueError:
return False
except IOError as e:
print e
return False
else:
return True
def main():
"""Handle arguments to the `is_json` console script."""
import argparse
parser = argparse.ArgumentParser(
description="Checks whether a file contains valid JSON.")
parser.add_argument("json_file",
help="A file containing JSON")
parser.add_argument('--version', action='version',
version='is_json 0.1')
args = parser.parse_args()
r = is_json(args.json_file)
print '{} : {}'.format(r, args.json_file)
if __name__ == '__main__':
main()
|
|
c11eab6c1b9b707510b32ee54d684720f9f397ad
|
choosealicense/test/test_generate.py
|
choosealicense/test/test_generate.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `license generate` function
"""
from click.testing import CliRunner
from choosealicense.main import (generate, LICENSE_WITH_CONTEXT,
get_default_context)
def test_generate_license():
all_the_licenses = ('agpl-3.0, apache-2.0, artistic-2.0, bsd-2-clause, '
'bsd-3-clause, cc0-1.0, epl-1.0, gpl-2.0, gpl-3.0, '
'isc, lgpl-2.1, lgpl-3.0, mit, mpl-2.0, unlicense')
runner = CliRunner()
for license in all_the_licenses.split(', '):
result = runner.invoke(generate, [license])
output, exit_code = result.output, result.exit_code
assert exit_code == 0
if license not in LICENSE_WITH_CONTEXT:
pass
else:
defaults = get_default_context()
if license in ['mit', 'artistic-2.0', 'bsd-2-clause']:
assert defaults['fullname'] in output
assert defaults['year'] in output
if license == 'isc':
assert defaults['fullname'] in output
assert defaults['year'] in output
assert defaults['email'] in output
if license == 'bsd-3-clause':
assert defaults['fullname'] in output
assert defaults['year'] in output
assert defaults['project'] in output
|
Add test for `license generate` function
|
Add test for `license generate` function
|
Python
|
mit
|
lord63/choosealicense-cli
|
Add test for `license generate` function
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `license generate` function
"""
from click.testing import CliRunner
from choosealicense.main import (generate, LICENSE_WITH_CONTEXT,
get_default_context)
def test_generate_license():
all_the_licenses = ('agpl-3.0, apache-2.0, artistic-2.0, bsd-2-clause, '
'bsd-3-clause, cc0-1.0, epl-1.0, gpl-2.0, gpl-3.0, '
'isc, lgpl-2.1, lgpl-3.0, mit, mpl-2.0, unlicense')
runner = CliRunner()
for license in all_the_licenses.split(', '):
result = runner.invoke(generate, [license])
output, exit_code = result.output, result.exit_code
assert exit_code == 0
if license not in LICENSE_WITH_CONTEXT:
pass
else:
defaults = get_default_context()
if license in ['mit', 'artistic-2.0', 'bsd-2-clause']:
assert defaults['fullname'] in output
assert defaults['year'] in output
if license == 'isc':
assert defaults['fullname'] in output
assert defaults['year'] in output
assert defaults['email'] in output
if license == 'bsd-3-clause':
assert defaults['fullname'] in output
assert defaults['year'] in output
assert defaults['project'] in output
|
<commit_before><commit_msg>Add test for `license generate` function<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `license generate` function
"""
from click.testing import CliRunner
from choosealicense.main import (generate, LICENSE_WITH_CONTEXT,
get_default_context)
def test_generate_license():
all_the_licenses = ('agpl-3.0, apache-2.0, artistic-2.0, bsd-2-clause, '
'bsd-3-clause, cc0-1.0, epl-1.0, gpl-2.0, gpl-3.0, '
'isc, lgpl-2.1, lgpl-3.0, mit, mpl-2.0, unlicense')
runner = CliRunner()
for license in all_the_licenses.split(', '):
result = runner.invoke(generate, [license])
output, exit_code = result.output, result.exit_code
assert exit_code == 0
if license not in LICENSE_WITH_CONTEXT:
pass
else:
defaults = get_default_context()
if license in ['mit', 'artistic-2.0', 'bsd-2-clause']:
assert defaults['fullname'] in output
assert defaults['year'] in output
if license == 'isc':
assert defaults['fullname'] in output
assert defaults['year'] in output
assert defaults['email'] in output
if license == 'bsd-3-clause':
assert defaults['fullname'] in output
assert defaults['year'] in output
assert defaults['project'] in output
|
Add test for `license generate` function#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `license generate` function
"""
from click.testing import CliRunner
from choosealicense.main import (generate, LICENSE_WITH_CONTEXT,
get_default_context)
def test_generate_license():
all_the_licenses = ('agpl-3.0, apache-2.0, artistic-2.0, bsd-2-clause, '
'bsd-3-clause, cc0-1.0, epl-1.0, gpl-2.0, gpl-3.0, '
'isc, lgpl-2.1, lgpl-3.0, mit, mpl-2.0, unlicense')
runner = CliRunner()
for license in all_the_licenses.split(', '):
result = runner.invoke(generate, [license])
output, exit_code = result.output, result.exit_code
assert exit_code == 0
if license not in LICENSE_WITH_CONTEXT:
pass
else:
defaults = get_default_context()
if license in ['mit', 'artistic-2.0', 'bsd-2-clause']:
assert defaults['fullname'] in output
assert defaults['year'] in output
if license == 'isc':
assert defaults['fullname'] in output
assert defaults['year'] in output
assert defaults['email'] in output
if license == 'bsd-3-clause':
assert defaults['fullname'] in output
assert defaults['year'] in output
assert defaults['project'] in output
|
<commit_before><commit_msg>Add test for `license generate` function<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `license generate` function
"""
from click.testing import CliRunner
from choosealicense.main import (generate, LICENSE_WITH_CONTEXT,
get_default_context)
def test_generate_license():
all_the_licenses = ('agpl-3.0, apache-2.0, artistic-2.0, bsd-2-clause, '
'bsd-3-clause, cc0-1.0, epl-1.0, gpl-2.0, gpl-3.0, '
'isc, lgpl-2.1, lgpl-3.0, mit, mpl-2.0, unlicense')
runner = CliRunner()
for license in all_the_licenses.split(', '):
result = runner.invoke(generate, [license])
output, exit_code = result.output, result.exit_code
assert exit_code == 0
if license not in LICENSE_WITH_CONTEXT:
pass
else:
defaults = get_default_context()
if license in ['mit', 'artistic-2.0', 'bsd-2-clause']:
assert defaults['fullname'] in output
assert defaults['year'] in output
if license == 'isc':
assert defaults['fullname'] in output
assert defaults['year'] in output
assert defaults['email'] in output
if license == 'bsd-3-clause':
assert defaults['fullname'] in output
assert defaults['year'] in output
assert defaults['project'] in output
|
|
8c972ebd2a93a9516230185118941995a921b1c6
|
server/app/handlers.py
|
server/app/handlers.py
|
import os
import aiohttp
import json
from aiohttp import web
from .consts import KUDAGO_API_BASE_URL, CLIENT_DIR
async def serve_api(request):
url = '{}/{}/?{}'.format(
KUDAGO_API_BASE_URL,
request.match_info['path'],
request.query_string,
)
response = await aiohttp.get(url)
body = await response.json()
if isinstance(body, dict):
for field in ('next', 'previous'):
value = body.get(field)
if value:
body[field] = value.replace(KUDAGO_API_BASE_URL, '/api')
return web.Response(body=json.dumps(body).encode('utf-8'))
async def serve_client(request):
filepath = os.path.join(CLIENT_DIR, 'index.html')
stat = os.stat(filepath)
chunk_size = 256 * 1024
response = web.StreamResponse()
response.content_type = 'text/html'
response.last_modified = stat.st_mtime
response.content_length = stat.st_size
response.start(request)
with open(filepath, 'rb') as f:
chunk = f.read(chunk_size)
while chunk:
response.write(chunk)
chunk = f.read(chunk_size)
return response
|
import os
import aiohttp
import json
from aiohttp import web
from .consts import KUDAGO_API_BASE_URL, CLIENT_DIR
async def serve_api(request):
url = '{}/{}/?{}'.format(
KUDAGO_API_BASE_URL,
request.match_info['path'],
request.query_string,
)
response = await aiohttp.get(url)
body = await response.json()
if isinstance(body, dict):
for field in ('next', 'previous'):
value = body.get(field)
if value:
body[field] = value.replace(KUDAGO_API_BASE_URL, '/api')
return web.Response(
text=json.dumps(body),
content_type='application/json'
)
async def serve_client(request):
filepath = os.path.join(CLIENT_DIR, 'index.html')
stat = os.stat(filepath)
chunk_size = 256 * 1024
response = web.StreamResponse()
response.content_type = 'text/html'
response.last_modified = stat.st_mtime
response.content_length = stat.st_size
response.start(request)
with open(filepath, 'rb') as f:
chunk = f.read(chunk_size)
while chunk:
response.write(chunk)
chunk = f.read(chunk_size)
return response
|
Add content type to API responses
|
Add content type to API responses
It doesn’t make much of a difference, but it’s the nice thing to do
|
Python
|
mit
|
despawnerer/theatrics,despawnerer/theatrics,despawnerer/theatrics
|
import os
import aiohttp
import json
from aiohttp import web
from .consts import KUDAGO_API_BASE_URL, CLIENT_DIR
async def serve_api(request):
url = '{}/{}/?{}'.format(
KUDAGO_API_BASE_URL,
request.match_info['path'],
request.query_string,
)
response = await aiohttp.get(url)
body = await response.json()
if isinstance(body, dict):
for field in ('next', 'previous'):
value = body.get(field)
if value:
body[field] = value.replace(KUDAGO_API_BASE_URL, '/api')
return web.Response(body=json.dumps(body).encode('utf-8'))
async def serve_client(request):
filepath = os.path.join(CLIENT_DIR, 'index.html')
stat = os.stat(filepath)
chunk_size = 256 * 1024
response = web.StreamResponse()
response.content_type = 'text/html'
response.last_modified = stat.st_mtime
response.content_length = stat.st_size
response.start(request)
with open(filepath, 'rb') as f:
chunk = f.read(chunk_size)
while chunk:
response.write(chunk)
chunk = f.read(chunk_size)
return response
Add content type to API responses
It doesn’t make much of a difference, but it’s the nice thing to do
|
import os
import aiohttp
import json
from aiohttp import web
from .consts import KUDAGO_API_BASE_URL, CLIENT_DIR
async def serve_api(request):
url = '{}/{}/?{}'.format(
KUDAGO_API_BASE_URL,
request.match_info['path'],
request.query_string,
)
response = await aiohttp.get(url)
body = await response.json()
if isinstance(body, dict):
for field in ('next', 'previous'):
value = body.get(field)
if value:
body[field] = value.replace(KUDAGO_API_BASE_URL, '/api')
return web.Response(
text=json.dumps(body),
content_type='application/json'
)
async def serve_client(request):
filepath = os.path.join(CLIENT_DIR, 'index.html')
stat = os.stat(filepath)
chunk_size = 256 * 1024
response = web.StreamResponse()
response.content_type = 'text/html'
response.last_modified = stat.st_mtime
response.content_length = stat.st_size
response.start(request)
with open(filepath, 'rb') as f:
chunk = f.read(chunk_size)
while chunk:
response.write(chunk)
chunk = f.read(chunk_size)
return response
|
<commit_before>import os
import aiohttp
import json
from aiohttp import web
from .consts import KUDAGO_API_BASE_URL, CLIENT_DIR
async def serve_api(request):
url = '{}/{}/?{}'.format(
KUDAGO_API_BASE_URL,
request.match_info['path'],
request.query_string,
)
response = await aiohttp.get(url)
body = await response.json()
if isinstance(body, dict):
for field in ('next', 'previous'):
value = body.get(field)
if value:
body[field] = value.replace(KUDAGO_API_BASE_URL, '/api')
return web.Response(body=json.dumps(body).encode('utf-8'))
async def serve_client(request):
filepath = os.path.join(CLIENT_DIR, 'index.html')
stat = os.stat(filepath)
chunk_size = 256 * 1024
response = web.StreamResponse()
response.content_type = 'text/html'
response.last_modified = stat.st_mtime
response.content_length = stat.st_size
response.start(request)
with open(filepath, 'rb') as f:
chunk = f.read(chunk_size)
while chunk:
response.write(chunk)
chunk = f.read(chunk_size)
return response
<commit_msg>Add content type to API responses
It doesn’t make much of a difference, but it’s the nice thing to do<commit_after>
|
import os
import aiohttp
import json
from aiohttp import web
from .consts import KUDAGO_API_BASE_URL, CLIENT_DIR
async def serve_api(request):
url = '{}/{}/?{}'.format(
KUDAGO_API_BASE_URL,
request.match_info['path'],
request.query_string,
)
response = await aiohttp.get(url)
body = await response.json()
if isinstance(body, dict):
for field in ('next', 'previous'):
value = body.get(field)
if value:
body[field] = value.replace(KUDAGO_API_BASE_URL, '/api')
return web.Response(
text=json.dumps(body),
content_type='application/json'
)
async def serve_client(request):
filepath = os.path.join(CLIENT_DIR, 'index.html')
stat = os.stat(filepath)
chunk_size = 256 * 1024
response = web.StreamResponse()
response.content_type = 'text/html'
response.last_modified = stat.st_mtime
response.content_length = stat.st_size
response.start(request)
with open(filepath, 'rb') as f:
chunk = f.read(chunk_size)
while chunk:
response.write(chunk)
chunk = f.read(chunk_size)
return response
|
import os
import aiohttp
import json
from aiohttp import web
from .consts import KUDAGO_API_BASE_URL, CLIENT_DIR
async def serve_api(request):
url = '{}/{}/?{}'.format(
KUDAGO_API_BASE_URL,
request.match_info['path'],
request.query_string,
)
response = await aiohttp.get(url)
body = await response.json()
if isinstance(body, dict):
for field in ('next', 'previous'):
value = body.get(field)
if value:
body[field] = value.replace(KUDAGO_API_BASE_URL, '/api')
return web.Response(body=json.dumps(body).encode('utf-8'))
async def serve_client(request):
filepath = os.path.join(CLIENT_DIR, 'index.html')
stat = os.stat(filepath)
chunk_size = 256 * 1024
response = web.StreamResponse()
response.content_type = 'text/html'
response.last_modified = stat.st_mtime
response.content_length = stat.st_size
response.start(request)
with open(filepath, 'rb') as f:
chunk = f.read(chunk_size)
while chunk:
response.write(chunk)
chunk = f.read(chunk_size)
return response
Add content type to API responses
It doesn’t make much of a difference, but it’s the nice thing to doimport os
import aiohttp
import json
from aiohttp import web
from .consts import KUDAGO_API_BASE_URL, CLIENT_DIR
async def serve_api(request):
url = '{}/{}/?{}'.format(
KUDAGO_API_BASE_URL,
request.match_info['path'],
request.query_string,
)
response = await aiohttp.get(url)
body = await response.json()
if isinstance(body, dict):
for field in ('next', 'previous'):
value = body.get(field)
if value:
body[field] = value.replace(KUDAGO_API_BASE_URL, '/api')
return web.Response(
text=json.dumps(body),
content_type='application/json'
)
async def serve_client(request):
filepath = os.path.join(CLIENT_DIR, 'index.html')
stat = os.stat(filepath)
chunk_size = 256 * 1024
response = web.StreamResponse()
response.content_type = 'text/html'
response.last_modified = stat.st_mtime
response.content_length = stat.st_size
response.start(request)
with open(filepath, 'rb') as f:
chunk = f.read(chunk_size)
while chunk:
response.write(chunk)
chunk = f.read(chunk_size)
return response
|
<commit_before>import os
import aiohttp
import json
from aiohttp import web
from .consts import KUDAGO_API_BASE_URL, CLIENT_DIR
async def serve_api(request):
url = '{}/{}/?{}'.format(
KUDAGO_API_BASE_URL,
request.match_info['path'],
request.query_string,
)
response = await aiohttp.get(url)
body = await response.json()
if isinstance(body, dict):
for field in ('next', 'previous'):
value = body.get(field)
if value:
body[field] = value.replace(KUDAGO_API_BASE_URL, '/api')
return web.Response(body=json.dumps(body).encode('utf-8'))
async def serve_client(request):
filepath = os.path.join(CLIENT_DIR, 'index.html')
stat = os.stat(filepath)
chunk_size = 256 * 1024
response = web.StreamResponse()
response.content_type = 'text/html'
response.last_modified = stat.st_mtime
response.content_length = stat.st_size
response.start(request)
with open(filepath, 'rb') as f:
chunk = f.read(chunk_size)
while chunk:
response.write(chunk)
chunk = f.read(chunk_size)
return response
<commit_msg>Add content type to API responses
It doesn’t make much of a difference, but it’s the nice thing to do<commit_after>import os
import aiohttp
import json
from aiohttp import web
from .consts import KUDAGO_API_BASE_URL, CLIENT_DIR
async def serve_api(request):
url = '{}/{}/?{}'.format(
KUDAGO_API_BASE_URL,
request.match_info['path'],
request.query_string,
)
response = await aiohttp.get(url)
body = await response.json()
if isinstance(body, dict):
for field in ('next', 'previous'):
value = body.get(field)
if value:
body[field] = value.replace(KUDAGO_API_BASE_URL, '/api')
return web.Response(
text=json.dumps(body),
content_type='application/json'
)
async def serve_client(request):
filepath = os.path.join(CLIENT_DIR, 'index.html')
stat = os.stat(filepath)
chunk_size = 256 * 1024
response = web.StreamResponse()
response.content_type = 'text/html'
response.last_modified = stat.st_mtime
response.content_length = stat.st_size
response.start(request)
with open(filepath, 'rb') as f:
chunk = f.read(chunk_size)
while chunk:
response.write(chunk)
chunk = f.read(chunk_size)
return response
|
48c227c263abc046f1b293ebc5864c229154cde4
|
script/lib/config.py
|
script/lib/config.py
|
#!/usr/bin/env python
import platform
import sys
NODE_VERSION = 'v0.11.13'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = '197fe67fee1e4d867c76264065b2eb80b9dbd3a0'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
|
#!/usr/bin/env python
import platform
import sys
NODE_VERSION = 'v0.11.13'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'bb664e4665851fe923ce904e620ba43d8d010ba5'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
|
Upgrade libchromium for the accelerator fix.
|
Upgrade libchromium for the accelerator fix.
|
Python
|
mit
|
trigrass2/electron,Rokt33r/electron,sky7sea/electron,Faiz7412/electron,pandoraui/electron,Ivshti/electron,vHanda/electron,bright-sparks/electron,baiwyc119/electron,fireball-x/atom-shell,mattotodd/electron,tincan24/electron,dkfiresky/electron,voidbridge/electron,bright-sparks/electron,brenca/electron,ianscrivener/electron,simongregory/electron,simonfork/electron,kazupon/electron,egoist/electron,gabriel/electron,Neron-X5/electron,gerhardberger/electron,faizalpribadi/electron,wan-qy/electron,saronwei/electron,vaginessa/electron,Zagorakiss/electron,kcrt/electron,eric-seekas/electron,robinvandernoord/electron,tomashanacek/electron,Jonekee/electron,deed02392/electron,anko/electron,soulteary/electron,lrlna/electron,IonicaBizauKitchen/electron,chrisswk/electron,gabrielPeart/electron,soulteary/electron,stevekinney/electron,stevekinney/electron,dahal/electron,aaron-goshine/electron,webmechanicx/electron,fffej/electron,oiledCode/electron,benweissmann/electron,smczk/electron,the-ress/electron,kikong/electron,leethomas/electron,joaomoreno/atom-shell,howmuchcomputer/electron,kostia/electron,sircharleswatson/electron,lzpfmh/electron,destan/electron,aliib/electron,shiftkey/electron,setzer777/electron,lzpfmh/electron,LadyNaggaga/electron,arusakov/electron,fireball-x/atom-shell,gerhardberger/electron,DivyaKMenon/electron,joneit/electron,Jonekee/electron,IonicaBizauKitchen/electron,kcrt/electron,jtburke/electron,nicobot/electron,medixdev/electron,leolujuyi/electron,gbn972/electron,bbondy/electron,trankmichael/electron,cos2004/electron,benweissmann/electron,mirrh/electron,nicobot/electron,aichingm/electron,ianscrivener/electron,baiwyc119/electron,chriskdon/electron,Evercoder/electron,sky7sea/electron,lrlna/electron,jacksondc/electron,RIAEvangelist/electron,renaesop/electron,kokdemo/electron,coderhaoxin/electron,zhakui/electron,JesselJohn/electron,smczk/electron,IonicaBizauKitchen/electron,bpasero/electron,fffej/electron,voidbridge/electron,lzpfmh/electron,edulan/electron,subblue/electron,faizalpribadi/electron,aecca/electron,edulan/electron,soulteary/electron,miniak/electron,fabien-d/electron,mhkeller/electron,tonyganch/electron,rajatsingla28/electron,miniak/electron,lzpfmh/electron,nagyistoce/electron-atom-shell,yalexx/electron,John-Lin/electron,kazupon/electron,IonicaBizauKitchen/electron,gbn972/electron,jlord/electron,deed02392/electron,sshiting/electron,jjz/electron,saronwei/electron,minggo/electron,saronwei/electron,fffej/electron,nagyistoce/electron-atom-shell,bitemyapp/electron,chriskdon/electron,Gerhut/electron,abhishekgahlot/electron,MaxWhere/electron,egoist/electron,xfstudio/electron,tylergibson/electron,simonfork/electron,gstack/infinium-shell,MaxWhere/electron,takashi/electron,twolfson/electron,simonfork/electron,micalan/electron,beni55/electron,mhkeller/electron,mirrh/electron,gerhardberger/electron,maxogden/atom-shell,nagyistoce/electron-atom-shell,jhen0409/electron,anko/electron,Rokt33r/electron,seanchas116/electron,arturts/electron,Jacobichou/electron,wan-qy/electron,setzer777/electron,John-Lin/electron,Rokt33r/electron,gamedevsam/electron,deed02392/electron,jsutcodes/electron,bobwol/electron,beni55/electron,simongregory/electron,tonyganch/electron,GoooIce/electron,dongjoon-hyun/electron,rhencke/electron,eriser/electron,pirafrank/electron,destan/electron,stevekinney/electron,Ivshti/electron,webmechanicx/electron,thingsinjars/electron,pirafrank/electron,dongjoon-hyun/electron,MaxWhere/electron,minggo/electron,Andrey-Pavlov/electron,farmisen/electron,gerhardberger/electron,Jacobichou/electron,anko/electron,evgenyzinoviev/electron,simonfork/electron,dongjoon-hyun/electron,vaginessa/electron,kokdemo/electron,synaptek/electron,gamedevsam/electron,John-Lin/electron,greyhwndz/electron,medixdev/electron,natgolov/electron,kcrt/electron,jcblw/electron,tinydew4/electron,howmuchcomputer/electron,robinvandernoord/electron,shockone/electron,howmuchcomputer/electron,mhkeller/electron,MaxGraey/electron,mirrh/electron,thompsonemerson/electron,farmisen/electron,shiftkey/electron,kokdemo/electron,hokein/atom-shell,jhen0409/electron,kenmozi/electron,tomashanacek/electron,minggo/electron,leftstick/electron,digideskio/electron,kostia/electron,medixdev/electron,thingsinjars/electron,davazp/electron,leolujuyi/electron,felixrieseberg/electron,benweissmann/electron,greyhwndz/electron,xiruibing/electron,thomsonreuters/electron,adamjgray/electron,JussMee15/electron,gabriel/electron,fomojola/electron,preco21/electron,jiaz/electron,meowlab/electron,systembugtj/electron,subblue/electron,thingsinjars/electron,Floato/electron,jsutcodes/electron,darwin/electron,thomsonreuters/electron,vHanda/electron,natgolov/electron,electron/electron,Gerhut/electron,evgenyzinoviev/electron,bitemyapp/electron,electron/electron,bitemyapp/electron,LadyNaggaga/electron,simongregory/electron,dahal/electron,Gerhut/electron,nekuz0r/electron,Faiz7412/electron,bpasero/electron,posix4e/electron,Floato/electron,evgenyzinoviev/electron,mattotodd/electron,mjaniszew/electron,fireball-x/atom-shell,twolfson/electron,dahal/electron,jaanus/electron,zhakui/electron,neutrous/electron,bright-sparks/electron,wan-qy/electron,kostia/electron,tomashanacek/electron,Jacobichou/electron,eric-seekas/electron,joneit/electron,zhakui/electron,sshiting/electron,MaxGraey/electron,brenca/electron,Gerhut/electron,vaginessa/electron,icattlecoder/electron,jlord/electron,fffej/electron,wolfflow/electron,carsonmcdonald/electron,cqqccqc/electron,felixrieseberg/electron,gabriel/electron,Zagorakiss/electron,the-ress/electron,sshiting/electron,faizalpribadi/electron,deepak1556/atom-shell,noikiy/electron,deepak1556/atom-shell,sshiting/electron,jlhbaseball15/electron,smczk/electron,bwiggs/electron,jannishuebl/electron,adcentury/electron,anko/electron,kenmozi/electron,felixrieseberg/electron,davazp/electron,xfstudio/electron,simonfork/electron,kcrt/electron,yalexx/electron,ankitaggarwal011/electron,baiwyc119/electron,brave/muon,sky7sea/electron,benweissmann/electron,chriskdon/electron,twolfson/electron,Floato/electron,BionicClick/electron,mubassirhayat/electron,seanchas116/electron,timruffles/electron,carsonmcdonald/electron,joaomoreno/atom-shell,kokdemo/electron,gstack/infinium-shell,anko/electron,dkfiresky/electron,natgolov/electron,rajatsingla28/electron,evgenyzinoviev/electron,tylergibson/electron,astoilkov/electron,michaelchiche/electron,dkfiresky/electron,Ivshti/electron,thomsonreuters/electron,jsutcodes/electron,Zagorakiss/electron,setzer777/electron,howmuchcomputer/electron,astoilkov/electron,Ivshti/electron,pandoraui/electron,vaginessa/electron,leethomas/electron,the-ress/electron,renaesop/electron,Jonekee/electron,miniak/electron,hokein/atom-shell,kostia/electron,JussMee15/electron,edulan/electron,leolujuyi/electron,shaundunne/electron,thompsonemerson/electron,jhen0409/electron,the-ress/electron,webmechanicx/electron,jcblw/electron,stevemao/electron,mirrh/electron,lzpfmh/electron,leolujuyi/electron,synaptek/electron,tomashanacek/electron,preco21/electron,farmisen/electron,greyhwndz/electron,bruce/electron,bobwol/electron,soulteary/electron,adcentury/electron,dongjoon-hyun/electron,Jonekee/electron,shaundunne/electron,mhkeller/electron,gerhardberger/electron,biblerule/UMCTelnetHub,jlord/electron,medixdev/electron,kenmozi/electron,mubassirhayat/electron,systembugtj/electron,ervinb/electron,shennushi/electron,abhishekgahlot/electron,robinvandernoord/electron,cqqccqc/electron,seanchas116/electron,jtburke/electron,DivyaKMenon/electron,chrisswk/electron,stevekinney/electron,trankmichael/electron,zhakui/electron,leethomas/electron,RobertJGabriel/electron,pirafrank/electron,natgolov/electron,DivyaKMenon/electron,miniak/electron,kikong/electron,gabrielPeart/electron,aichingm/electron,trigrass2/electron,gabriel/electron,timruffles/electron,wan-qy/electron,oiledCode/electron,DivyaKMenon/electron,coderhaoxin/electron,takashi/electron,aichingm/electron,deepak1556/atom-shell,aliib/electron,baiwyc119/electron,medixdev/electron,trigrass2/electron,MaxGraey/electron,greyhwndz/electron,micalan/electron,lrlna/electron,tylergibson/electron,thompsonemerson/electron,edulan/electron,gstack/infinium-shell,biblerule/UMCTelnetHub,gabriel/electron,tylergibson/electron,IonicaBizauKitchen/electron,bright-sparks/electron,egoist/electron,bruce/electron,michaelchiche/electron,Jacobichou/electron,BionicClick/electron,voidbridge/electron,shockone/electron,the-ress/electron,mjaniszew/electron,rajatsingla28/electron,aecca/electron,eriser/electron,JussMee15/electron,Evercoder/electron,ankitaggarwal011/electron,cos2004/electron,yan-foto/electron,bpasero/electron,jonatasfreitasv/electron,xfstudio/electron,gbn972/electron,xiruibing/electron,carsonmcdonald/electron,jlord/electron,digideskio/electron,aichingm/electron,preco21/electron,chrisswk/electron,maxogden/atom-shell,bpasero/electron,darwin/electron,ankitaggarwal011/electron,dongjoon-hyun/electron,vHanda/electron,Faiz7412/electron,kazupon/electron,fireball-x/atom-shell,matiasinsaurralde/electron,oiledCode/electron,thingsinjars/electron,hokein/atom-shell,leolujuyi/electron,beni55/electron,deed02392/electron,jonatasfreitasv/electron,setzer777/electron,simongregory/electron,iftekeriba/electron,neutrous/electron,mhkeller/electron,Gerhut/electron,SufianHassan/electron,fritx/electron,lrlna/electron,RobertJGabriel/electron,jacksondc/electron,fritx/electron,mhkeller/electron,jannishuebl/electron,brave/electron,Rokt33r/electron,mattdesl/electron,icattlecoder/electron,LadyNaggaga/electron,jaanus/electron,ervinb/electron,pandoraui/electron,mattdesl/electron,leftstick/electron,bitemyapp/electron,wolfflow/electron,eriser/electron,mrwizard82d1/electron,subblue/electron,kikong/electron,JesselJohn/electron,jhen0409/electron,brave/electron,matiasinsaurralde/electron,thomsonreuters/electron,systembugtj/electron,kcrt/electron,shaundunne/electron,pirafrank/electron,aichingm/electron,digideskio/electron,vaginessa/electron,sshiting/electron,soulteary/electron,JesselJohn/electron,jlord/electron,yalexx/electron,beni55/electron,faizalpribadi/electron,michaelchiche/electron,John-Lin/electron,bruce/electron,ianscrivener/electron,jjz/electron,kenmozi/electron,BionicClick/electron,rreimann/electron,astoilkov/electron,tomashanacek/electron,MaxWhere/electron,faizalpribadi/electron,d-salas/electron,aecca/electron,trigrass2/electron,smczk/electron,eric-seekas/electron,brenca/electron,fomojola/electron,MaxWhere/electron,renaesop/electron,rsvip/electron,smczk/electron,michaelchiche/electron,gstack/infinium-shell,medixdev/electron,Neron-X5/electron,Andrey-Pavlov/electron,brenca/electron,meowlab/electron,tincan24/electron,adcentury/electron,timruffles/electron,nagyistoce/electron-atom-shell,sky7sea/electron,astoilkov/electron,RIAEvangelist/electron,arturts/electron,biblerule/UMCTelnetHub,gbn972/electron,bitemyapp/electron,arturts/electron,nicholasess/electron,gamedevsam/electron,DivyaKMenon/electron,SufianHassan/electron,fritx/electron,bobwol/electron,adamjgray/electron,JussMee15/electron,fabien-d/electron,tomashanacek/electron,coderhaoxin/electron,Faiz7412/electron,thompsonemerson/electron,RIAEvangelist/electron,Andrey-Pavlov/electron,neutrous/electron,jlhbaseball15/electron,SufianHassan/electron,shennushi/electron,joneit/electron,roadev/electron,mirrh/electron,joneit/electron,takashi/electron,d-salas/electron,cos2004/electron,Andrey-Pavlov/electron,christian-bromann/electron,chriskdon/electron,takashi/electron,mrwizard82d1/electron,cqqccqc/electron,shockone/electron,webmechanicx/electron,John-Lin/electron,nicholasess/electron,wan-qy/electron,christian-bromann/electron,felixrieseberg/electron,jiaz/electron,mjaniszew/electron,JesselJohn/electron,arturts/electron,kcrt/electron,systembugtj/electron,cqqccqc/electron,dahal/electron,aaron-goshine/electron,aecca/electron,noikiy/electron,meowlab/electron,preco21/electron,neutrous/electron,matiasinsaurralde/electron,beni55/electron,nekuz0r/electron,benweissmann/electron,adamjgray/electron,bwiggs/electron,bpasero/electron,RIAEvangelist/electron,mjaniszew/electron,pombredanne/electron,pirafrank/electron,adcentury/electron,baiwyc119/electron,maxogden/atom-shell,posix4e/electron,wolfflow/electron,jcblw/electron,iftekeriba/electron,abhishekgahlot/electron,electron/electron,natgolov/electron,wolfflow/electron,arusakov/electron,biblerule/UMCTelnetHub,Gerhut/electron,renaesop/electron,Zagorakiss/electron,noikiy/electron,stevekinney/electron,kikong/electron,rprichard/electron,gabrielPeart/electron,gabrielPeart/electron,nekuz0r/electron,robinvandernoord/electron,synaptek/electron,gabriel/electron,eriser/electron,greyhwndz/electron,dkfiresky/electron,Floato/electron,jannishuebl/electron,etiktin/electron,jacksondc/electron,abhishekgahlot/electron,rprichard/electron,vipulroxx/electron,leethomas/electron,bpasero/electron,nicholasess/electron,aichingm/electron,destan/electron,matiasinsaurralde/electron,aliib/electron,neutrous/electron,chrisswk/electron,arusakov/electron,Rokt33r/electron,aaron-goshine/electron,Neron-X5/electron,rhencke/electron,trankmichael/electron,lrlna/electron,voidbridge/electron,xiruibing/electron,synaptek/electron,ankitaggarwal011/electron,tincan24/electron,subblue/electron,kokdemo/electron,MaxGraey/electron,rreimann/electron,noikiy/electron,mrwizard82d1/electron,yan-foto/electron,vipulroxx/electron,jiaz/electron,pirafrank/electron,ervinb/electron,jlhbaseball15/electron,pandoraui/electron,yalexx/electron,brave/electron,vHanda/electron,mubassirhayat/electron,dkfiresky/electron,sshiting/electron,mrwizard82d1/electron,robinvandernoord/electron,gerhardberger/electron,rsvip/electron,pandoraui/electron,roadev/electron,bruce/electron,tincan24/electron,jjz/electron,shockone/electron,vHanda/electron,coderhaoxin/electron,mattotodd/electron,IonicaBizauKitchen/electron,jtburke/electron,matiasinsaurralde/electron,beni55/electron,matiasinsaurralde/electron,subblue/electron,gerhardberger/electron,rhencke/electron,kostia/electron,pombredanne/electron,christian-bromann/electron,bbondy/electron,icattlecoder/electron,trankmichael/electron,GoooIce/electron,yan-foto/electron,yan-foto/electron,rajatsingla28/electron,sircharleswatson/electron,etiktin/electron,rprichard/electron,deed02392/electron,voidbridge/electron,sircharleswatson/electron,felixrieseberg/electron,tonyganch/electron,JesselJohn/electron,vipulroxx/electron,webmechanicx/electron,thingsinjars/electron,eric-seekas/electron,Evercoder/electron,xiruibing/electron,Jacobichou/electron,bitemyapp/electron,jjz/electron,etiktin/electron,minggo/electron,fabien-d/electron,jsutcodes/electron,stevemao/electron,howmuchcomputer/electron,icattlecoder/electron,sky7sea/electron,deepak1556/atom-shell,shaundunne/electron,simonfork/electron,Andrey-Pavlov/electron,jcblw/electron,davazp/electron,nekuz0r/electron,jaanus/electron,LadyNaggaga/electron,robinvandernoord/electron,rreimann/electron,shaundunne/electron,mubassirhayat/electron,the-ress/electron,yalexx/electron,xiruibing/electron,christian-bromann/electron,deepak1556/atom-shell,fomojola/electron,tinydew4/electron,lrlna/electron,brenca/electron,chrisswk/electron,miniak/electron,leftstick/electron,iftekeriba/electron,joaomoreno/atom-shell,eriser/electron,aecca/electron,fireball-x/atom-shell,trankmichael/electron,ervinb/electron,Neron-X5/electron,roadev/electron,egoist/electron,kenmozi/electron,michaelchiche/electron,DivyaKMenon/electron,ankitaggarwal011/electron,mubassirhayat/electron,twolfson/electron,carsonmcdonald/electron,shennushi/electron,mrwizard82d1/electron,stevemao/electron,wolfflow/electron,fabien-d/electron,micalan/electron,electron/electron,mattdesl/electron,mjaniszew/electron,lzpfmh/electron,jhen0409/electron,GoooIce/electron,thomsonreuters/electron,subblue/electron,xfstudio/electron,jaanus/electron,arturts/electron,brave/muon,JussMee15/electron,micalan/electron,aaron-goshine/electron,RobertJGabriel/electron,adamjgray/electron,mjaniszew/electron,arusakov/electron,synaptek/electron,aliib/electron,RobertJGabriel/electron,sircharleswatson/electron,cqqccqc/electron,jannishuebl/electron,JesselJohn/electron,adamjgray/electron,preco21/electron,fabien-d/electron,darwin/electron,ervinb/electron,digideskio/electron,seanchas116/electron,joneit/electron,bwiggs/electron,shaundunne/electron,cqqccqc/electron,bwiggs/electron,arusakov/electron,chriskdon/electron,jaanus/electron,posix4e/electron,rhencke/electron,nicholasess/electron,maxogden/atom-shell,darwin/electron,shiftkey/electron,Faiz7412/electron,nagyistoce/electron-atom-shell,trigrass2/electron,d-salas/electron,jtburke/electron,nekuz0r/electron,jcblw/electron,dahal/electron,jiaz/electron,systembugtj/electron,rhencke/electron,kostia/electron,John-Lin/electron,gamedevsam/electron,Rokt33r/electron,iftekeriba/electron,BionicClick/electron,roadev/electron,seanchas116/electron,mirrh/electron,bwiggs/electron,edulan/electron,vaginessa/electron,Zagorakiss/electron,eriser/electron,fritx/electron,RIAEvangelist/electron,saronwei/electron,egoist/electron,gbn972/electron,GoooIce/electron,ianscrivener/electron,leethomas/electron,jacksondc/electron,gamedevsam/electron,natgolov/electron,Ivshti/electron,nicobot/electron,jlhbaseball15/electron,brave/muon,tylergibson/electron,tinydew4/electron,brenca/electron,kokdemo/electron,kazupon/electron,iftekeriba/electron,darwin/electron,tincan24/electron,eric-seekas/electron,rsvip/electron,shennushi/electron,nekuz0r/electron,roadev/electron,ervinb/electron,shockone/electron,oiledCode/electron,voidbridge/electron,tonyganch/electron,davazp/electron,christian-bromann/electron,jhen0409/electron,smczk/electron,bwiggs/electron,kenmozi/electron,pombredanne/electron,rajatsingla28/electron,jacksondc/electron,mattdesl/electron,jonatasfreitasv/electron,LadyNaggaga/electron,kazupon/electron,shennushi/electron,vipulroxx/electron,brave/electron,farmisen/electron,stevemao/electron,abhishekgahlot/electron,rreimann/electron,jsutcodes/electron,coderhaoxin/electron,saronwei/electron,vipulroxx/electron,GoooIce/electron,dongjoon-hyun/electron,etiktin/electron,posix4e/electron,yan-foto/electron,bobwol/electron,brave/muon,coderhaoxin/electron,jiaz/electron,adcentury/electron,fffej/electron,astoilkov/electron,tinydew4/electron,deed02392/electron,gabrielPeart/electron,Floato/electron,benweissmann/electron,iftekeriba/electron,brave/electron,noikiy/electron,setzer777/electron,d-salas/electron,meowlab/electron,takashi/electron,yan-foto/electron,jcblw/electron,Jacobichou/electron,simongregory/electron,davazp/electron,miniak/electron,electron/electron,electron/electron,xiruibing/electron,jlhbaseball15/electron,Evercoder/electron,bright-sparks/electron,soulteary/electron,joaomoreno/atom-shell,biblerule/UMCTelnetHub,leolujuyi/electron,shiftkey/electron,fomojola/electron,felixrieseberg/electron,wan-qy/electron,pombredanne/electron,pombredanne/electron,leftstick/electron,sircharleswatson/electron,aecca/electron,Andrey-Pavlov/electron,bobwol/electron,gbn972/electron,bruce/electron,thingsinjars/electron,shockone/electron,Neron-X5/electron,bbondy/electron,digideskio/electron,rsvip/electron,digideskio/electron,shiftkey/electron,thompsonemerson/electron,jjz/electron,joneit/electron,SufianHassan/electron,SufianHassan/electron,wolfflow/electron,aaron-goshine/electron,fritx/electron,electron/electron,icattlecoder/electron,rreimann/electron,SufianHassan/electron,fomojola/electron,Evercoder/electron,evgenyzinoviev/electron,BionicClick/electron,aliib/electron,tincan24/electron,maxogden/atom-shell,chriskdon/electron,twolfson/electron,zhakui/electron,mattdesl/electron,Neron-X5/electron,michaelchiche/electron,jonatasfreitasv/electron,pombredanne/electron,baiwyc119/electron,mrwizard82d1/electron,biblerule/UMCTelnetHub,tylergibson/electron,trankmichael/electron,bpasero/electron,jonatasfreitasv/electron,thompsonemerson/electron,preco21/electron,rsvip/electron,davazp/electron,bbondy/electron,greyhwndz/electron,bobwol/electron,vipulroxx/electron,neutrous/electron,mattdesl/electron,bright-sparks/electron,posix4e/electron,vHanda/electron,renaesop/electron,seanchas116/electron,pandoraui/electron,BionicClick/electron,farmisen/electron,zhakui/electron,RIAEvangelist/electron,dahal/electron,kazupon/electron,posix4e/electron,fffej/electron,setzer777/electron,christian-bromann/electron,jaanus/electron,joaomoreno/atom-shell,nicobot/electron,noikiy/electron,GoooIce/electron,hokein/atom-shell,carsonmcdonald/electron,RobertJGabriel/electron,xfstudio/electron,icattlecoder/electron,carsonmcdonald/electron,synaptek/electron,thomsonreuters/electron,leftstick/electron,webmechanicx/electron,jtburke/electron,oiledCode/electron,minggo/electron,d-salas/electron,nicobot/electron,destan/electron,aliib/electron,LadyNaggaga/electron,etiktin/electron,brave/electron,rprichard/electron,cos2004/electron,stevekinney/electron,fritx/electron,bruce/electron,Zagorakiss/electron,nicholasess/electron,oiledCode/electron,rajatsingla28/electron,farmisen/electron,MaxGraey/electron,adamjgray/electron,MaxWhere/electron,Evercoder/electron,systembugtj/electron,gstack/infinium-shell,stevemao/electron,gamedevsam/electron,jjz/electron,bbondy/electron,aaron-goshine/electron,rhencke/electron,mattotodd/electron,ianscrivener/electron,jtburke/electron,nicholasess/electron,minggo/electron,brave/muon,RobertJGabriel/electron,trigrass2/electron,fomojola/electron,mattotodd/electron,faizalpribadi/electron,joaomoreno/atom-shell,howmuchcomputer/electron,ianscrivener/electron,edulan/electron,shennushi/electron,kikong/electron,the-ress/electron,mattotodd/electron,xfstudio/electron,shiftkey/electron,renaesop/electron,stevemao/electron,nicobot/electron,jsutcodes/electron,sircharleswatson/electron,jlhbaseball15/electron,d-salas/electron,JussMee15/electron,twolfson/electron,jonatasfreitasv/electron,brave/muon,leftstick/electron,astoilkov/electron,cos2004/electron,jiaz/electron,sky7sea/electron,dkfiresky/electron,jacksondc/electron,jannishuebl/electron,eric-seekas/electron,abhishekgahlot/electron,gabrielPeart/electron,timruffles/electron,micalan/electron,Jonekee/electron,egoist/electron,Jonekee/electron,arusakov/electron,arturts/electron,Floato/electron,takashi/electron,cos2004/electron,destan/electron,adcentury/electron,yalexx/electron,hokein/atom-shell,bbondy/electron,tinydew4/electron,meowlab/electron,simongregory/electron,timruffles/electron,tinydew4/electron,saronwei/electron,tonyganch/electron,rreimann/electron,leethomas/electron,destan/electron,micalan/electron,anko/electron,evgenyzinoviev/electron,meowlab/electron,etiktin/electron,tonyganch/electron,jannishuebl/electron,roadev/electron,ankitaggarwal011/electron
|
#!/usr/bin/env python
import platform
import sys
NODE_VERSION = 'v0.11.13'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = '197fe67fee1e4d867c76264065b2eb80b9dbd3a0'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
Upgrade libchromium for the accelerator fix.
|
#!/usr/bin/env python
import platform
import sys
NODE_VERSION = 'v0.11.13'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'bb664e4665851fe923ce904e620ba43d8d010ba5'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
|
<commit_before>#!/usr/bin/env python
import platform
import sys
NODE_VERSION = 'v0.11.13'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = '197fe67fee1e4d867c76264065b2eb80b9dbd3a0'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
<commit_msg>Upgrade libchromium for the accelerator fix.<commit_after>
|
#!/usr/bin/env python
import platform
import sys
NODE_VERSION = 'v0.11.13'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'bb664e4665851fe923ce904e620ba43d8d010ba5'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
|
#!/usr/bin/env python
import platform
import sys
NODE_VERSION = 'v0.11.13'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = '197fe67fee1e4d867c76264065b2eb80b9dbd3a0'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
Upgrade libchromium for the accelerator fix.#!/usr/bin/env python
import platform
import sys
NODE_VERSION = 'v0.11.13'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'bb664e4665851fe923ce904e620ba43d8d010ba5'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
|
<commit_before>#!/usr/bin/env python
import platform
import sys
NODE_VERSION = 'v0.11.13'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = '197fe67fee1e4d867c76264065b2eb80b9dbd3a0'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
<commit_msg>Upgrade libchromium for the accelerator fix.<commit_after>#!/usr/bin/env python
import platform
import sys
NODE_VERSION = 'v0.11.13'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'bb664e4665851fe923ce904e620ba43d8d010ba5'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
|
3f4113fa5f58641af25077381e39ba3f4d74355a
|
weasyprint/stacking.py
|
weasyprint/stacking.py
|
# coding: utf8
"""
weasyprint.stacking
-------------------
:copyright: Copyright 2011-2012 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, unicode_literals
from .formatting_structure import boxes
def establishes_stacking_context(box):
return (
box.style.position != 'static' and box.style.z_index != 'auto'
) or (
box.style.opacity < 1
) or (
box.style.transform # empty list for 'transform: none'
)
class StackingContext(object):
def __init__(self, box):
self.negative_z_contexts = [] # 3: Child contexts, z-index < 0
self.block_boxes = [] # 4, 7: In flow, non positioned
self.float_boxes = [] # 5: Non positioned
self.zero_z_contexts = [] # 8: Child contexts, z-index = 0
self.positive_z_contexts = [] # 9: Child contexts, z-index > 0
self.box = self._dispatch_children(box)
self.z_index = box.style.z_index
if self.z_index == 'auto':
self.z_index = 0
def _dispatch_children(self, box):
if not isinstance(box, boxes.ParentBox):
return box
children = []
for child in box.children:
if establishes_stacking_context(child):
context = StackingContext(child)
if context.z_index < 0:
self.negative_z_contexts.append(context)
elif context.z_index == 0:
self.zero_z_contexts.append(context)
elif context.z_index > 0:
self.positive_z_contexts.append(context)
# Remove from children
else:
child = self._dispatch_children(child)
if child.style.position != 'static':
assert child.style.z_index == 'auto'
# sub-contexts are already removed
context = StackingContext(child)
self.zero_z_contexts.append(context)
elif child.is_floated():
self.float_boxes.append(child)
elif isinstance(child, boxes.BlockBox):
self.block_boxes.append(child)
children.append(child)
return box.copy_with_children(children)
|
Add a StackingContext in preparation for z-index drawing.
|
Add a StackingContext in preparation for z-index drawing.
|
Python
|
bsd-3-clause
|
andrewleech/WeasyPrint,andrewleech/WeasyPrint,Kozea/WeasyPrint,marclaporte/WeasyPrint,jasco/WeasyPrint,jasco/WeasyPrint,prepare/TestWeasyPrint,prepare/TestWeasyPrint,Kozea/WeasyPrint,marclaporte/WeasyPrint
|
Add a StackingContext in preparation for z-index drawing.
|
# coding: utf8
"""
weasyprint.stacking
-------------------
:copyright: Copyright 2011-2012 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, unicode_literals
from .formatting_structure import boxes
def establishes_stacking_context(box):
return (
box.style.position != 'static' and box.style.z_index != 'auto'
) or (
box.style.opacity < 1
) or (
box.style.transform # empty list for 'transform: none'
)
class StackingContext(object):
def __init__(self, box):
self.negative_z_contexts = [] # 3: Child contexts, z-index < 0
self.block_boxes = [] # 4, 7: In flow, non positioned
self.float_boxes = [] # 5: Non positioned
self.zero_z_contexts = [] # 8: Child contexts, z-index = 0
self.positive_z_contexts = [] # 9: Child contexts, z-index > 0
self.box = self._dispatch_children(box)
self.z_index = box.style.z_index
if self.z_index == 'auto':
self.z_index = 0
def _dispatch_children(self, box):
if not isinstance(box, boxes.ParentBox):
return box
children = []
for child in box.children:
if establishes_stacking_context(child):
context = StackingContext(child)
if context.z_index < 0:
self.negative_z_contexts.append(context)
elif context.z_index == 0:
self.zero_z_contexts.append(context)
elif context.z_index > 0:
self.positive_z_contexts.append(context)
# Remove from children
else:
child = self._dispatch_children(child)
if child.style.position != 'static':
assert child.style.z_index == 'auto'
# sub-contexts are already removed
context = StackingContext(child)
self.zero_z_contexts.append(context)
elif child.is_floated():
self.float_boxes.append(child)
elif isinstance(child, boxes.BlockBox):
self.block_boxes.append(child)
children.append(child)
return box.copy_with_children(children)
|
<commit_before><commit_msg>Add a StackingContext in preparation for z-index drawing.<commit_after>
|
# coding: utf8
"""
weasyprint.stacking
-------------------
:copyright: Copyright 2011-2012 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, unicode_literals
from .formatting_structure import boxes
def establishes_stacking_context(box):
return (
box.style.position != 'static' and box.style.z_index != 'auto'
) or (
box.style.opacity < 1
) or (
box.style.transform # empty list for 'transform: none'
)
class StackingContext(object):
def __init__(self, box):
self.negative_z_contexts = [] # 3: Child contexts, z-index < 0
self.block_boxes = [] # 4, 7: In flow, non positioned
self.float_boxes = [] # 5: Non positioned
self.zero_z_contexts = [] # 8: Child contexts, z-index = 0
self.positive_z_contexts = [] # 9: Child contexts, z-index > 0
self.box = self._dispatch_children(box)
self.z_index = box.style.z_index
if self.z_index == 'auto':
self.z_index = 0
def _dispatch_children(self, box):
if not isinstance(box, boxes.ParentBox):
return box
children = []
for child in box.children:
if establishes_stacking_context(child):
context = StackingContext(child)
if context.z_index < 0:
self.negative_z_contexts.append(context)
elif context.z_index == 0:
self.zero_z_contexts.append(context)
elif context.z_index > 0:
self.positive_z_contexts.append(context)
# Remove from children
else:
child = self._dispatch_children(child)
if child.style.position != 'static':
assert child.style.z_index == 'auto'
# sub-contexts are already removed
context = StackingContext(child)
self.zero_z_contexts.append(context)
elif child.is_floated():
self.float_boxes.append(child)
elif isinstance(child, boxes.BlockBox):
self.block_boxes.append(child)
children.append(child)
return box.copy_with_children(children)
|
Add a StackingContext in preparation for z-index drawing.# coding: utf8
"""
weasyprint.stacking
-------------------
:copyright: Copyright 2011-2012 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, unicode_literals
from .formatting_structure import boxes
def establishes_stacking_context(box):
return (
box.style.position != 'static' and box.style.z_index != 'auto'
) or (
box.style.opacity < 1
) or (
box.style.transform # empty list for 'transform: none'
)
class StackingContext(object):
def __init__(self, box):
self.negative_z_contexts = [] # 3: Child contexts, z-index < 0
self.block_boxes = [] # 4, 7: In flow, non positioned
self.float_boxes = [] # 5: Non positioned
self.zero_z_contexts = [] # 8: Child contexts, z-index = 0
self.positive_z_contexts = [] # 9: Child contexts, z-index > 0
self.box = self._dispatch_children(box)
self.z_index = box.style.z_index
if self.z_index == 'auto':
self.z_index = 0
def _dispatch_children(self, box):
if not isinstance(box, boxes.ParentBox):
return box
children = []
for child in box.children:
if establishes_stacking_context(child):
context = StackingContext(child)
if context.z_index < 0:
self.negative_z_contexts.append(context)
elif context.z_index == 0:
self.zero_z_contexts.append(context)
elif context.z_index > 0:
self.positive_z_contexts.append(context)
# Remove from children
else:
child = self._dispatch_children(child)
if child.style.position != 'static':
assert child.style.z_index == 'auto'
# sub-contexts are already removed
context = StackingContext(child)
self.zero_z_contexts.append(context)
elif child.is_floated():
self.float_boxes.append(child)
elif isinstance(child, boxes.BlockBox):
self.block_boxes.append(child)
children.append(child)
return box.copy_with_children(children)
|
<commit_before><commit_msg>Add a StackingContext in preparation for z-index drawing.<commit_after># coding: utf8
"""
weasyprint.stacking
-------------------
:copyright: Copyright 2011-2012 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, unicode_literals
from .formatting_structure import boxes
def establishes_stacking_context(box):
return (
box.style.position != 'static' and box.style.z_index != 'auto'
) or (
box.style.opacity < 1
) or (
box.style.transform # empty list for 'transform: none'
)
class StackingContext(object):
def __init__(self, box):
self.negative_z_contexts = [] # 3: Child contexts, z-index < 0
self.block_boxes = [] # 4, 7: In flow, non positioned
self.float_boxes = [] # 5: Non positioned
self.zero_z_contexts = [] # 8: Child contexts, z-index = 0
self.positive_z_contexts = [] # 9: Child contexts, z-index > 0
self.box = self._dispatch_children(box)
self.z_index = box.style.z_index
if self.z_index == 'auto':
self.z_index = 0
def _dispatch_children(self, box):
if not isinstance(box, boxes.ParentBox):
return box
children = []
for child in box.children:
if establishes_stacking_context(child):
context = StackingContext(child)
if context.z_index < 0:
self.negative_z_contexts.append(context)
elif context.z_index == 0:
self.zero_z_contexts.append(context)
elif context.z_index > 0:
self.positive_z_contexts.append(context)
# Remove from children
else:
child = self._dispatch_children(child)
if child.style.position != 'static':
assert child.style.z_index == 'auto'
# sub-contexts are already removed
context = StackingContext(child)
self.zero_z_contexts.append(context)
elif child.is_floated():
self.float_boxes.append(child)
elif isinstance(child, boxes.BlockBox):
self.block_boxes.append(child)
children.append(child)
return box.copy_with_children(children)
|
|
e092d456e157d1cb0340bdd6c0599ff9a65dacd0
|
poradnia/cases/migrations/0033_auto_20170929_0815.py
|
poradnia/cases/migrations/0033_auto_20170929_0815.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-29 06:15
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cases', '0032_auto_20170923_1238'),
]
operations = [
migrations.AlterModelOptions(
name='case',
options={'ordering': ['last_send'], 'permissions': (('can_view', 'Can view'), ('can_assign', 'Can assign new permissions'), ('can_send_to_client', 'Can send text to client'), ('can_manage_permission', 'Can assign permission'), ('can_add_record', 'Can add record'), ('can_change_own_record', 'Can change own records'), ('can_change_all_record', 'Can change all records'), ('can_close_case', 'Can close case'), ('can_select_client', 'Can select client'))},
),
]
|
Add missing migrations to cases
|
Add missing migrations to cases
|
Python
|
mit
|
watchdogpolska/poradnia,watchdogpolska/poradnia.siecobywatelska.pl,rwakulszowa/poradnia,watchdogpolska/poradnia,rwakulszowa/poradnia,watchdogpolska/poradnia.siecobywatelska.pl,watchdogpolska/poradnia,rwakulszowa/poradnia,watchdogpolska/poradnia.siecobywatelska.pl,rwakulszowa/poradnia,watchdogpolska/poradnia
|
Add missing migrations to cases
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-29 06:15
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cases', '0032_auto_20170923_1238'),
]
operations = [
migrations.AlterModelOptions(
name='case',
options={'ordering': ['last_send'], 'permissions': (('can_view', 'Can view'), ('can_assign', 'Can assign new permissions'), ('can_send_to_client', 'Can send text to client'), ('can_manage_permission', 'Can assign permission'), ('can_add_record', 'Can add record'), ('can_change_own_record', 'Can change own records'), ('can_change_all_record', 'Can change all records'), ('can_close_case', 'Can close case'), ('can_select_client', 'Can select client'))},
),
]
|
<commit_before><commit_msg>Add missing migrations to cases<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-29 06:15
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cases', '0032_auto_20170923_1238'),
]
operations = [
migrations.AlterModelOptions(
name='case',
options={'ordering': ['last_send'], 'permissions': (('can_view', 'Can view'), ('can_assign', 'Can assign new permissions'), ('can_send_to_client', 'Can send text to client'), ('can_manage_permission', 'Can assign permission'), ('can_add_record', 'Can add record'), ('can_change_own_record', 'Can change own records'), ('can_change_all_record', 'Can change all records'), ('can_close_case', 'Can close case'), ('can_select_client', 'Can select client'))},
),
]
|
Add missing migrations to cases# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-29 06:15
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cases', '0032_auto_20170923_1238'),
]
operations = [
migrations.AlterModelOptions(
name='case',
options={'ordering': ['last_send'], 'permissions': (('can_view', 'Can view'), ('can_assign', 'Can assign new permissions'), ('can_send_to_client', 'Can send text to client'), ('can_manage_permission', 'Can assign permission'), ('can_add_record', 'Can add record'), ('can_change_own_record', 'Can change own records'), ('can_change_all_record', 'Can change all records'), ('can_close_case', 'Can close case'), ('can_select_client', 'Can select client'))},
),
]
|
<commit_before><commit_msg>Add missing migrations to cases<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-29 06:15
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cases', '0032_auto_20170923_1238'),
]
operations = [
migrations.AlterModelOptions(
name='case',
options={'ordering': ['last_send'], 'permissions': (('can_view', 'Can view'), ('can_assign', 'Can assign new permissions'), ('can_send_to_client', 'Can send text to client'), ('can_manage_permission', 'Can assign permission'), ('can_add_record', 'Can add record'), ('can_change_own_record', 'Can change own records'), ('can_change_all_record', 'Can change all records'), ('can_close_case', 'Can close case'), ('can_select_client', 'Can select client'))},
),
]
|
|
77f77ba3e4ce035499c2dac15fedf451621b20c1
|
pcols.py
|
pcols.py
|
import numpy as np
from functools import partial
name_func_dict = {
'backgate': {'label': 'Backgate voltage (V)'},
'MC': {'label': 'Mixing chamber temperature (K)'},
}
def parent_f(data, pdata, meta):
return data['MC'] * 10
name = 'MC*10'
func = partial(parent_f)
label = 'MC temperature in unhelpful units'
name_func_dict[name] = {'func': func, 'label': label}
|
Add sample file for pseudocolumns
|
Add sample file for pseudocolumns
|
Python
|
mit
|
mchels/FolderBrowser
|
Add sample file for pseudocolumns
|
import numpy as np
from functools import partial
name_func_dict = {
'backgate': {'label': 'Backgate voltage (V)'},
'MC': {'label': 'Mixing chamber temperature (K)'},
}
def parent_f(data, pdata, meta):
return data['MC'] * 10
name = 'MC*10'
func = partial(parent_f)
label = 'MC temperature in unhelpful units'
name_func_dict[name] = {'func': func, 'label': label}
|
<commit_before><commit_msg>Add sample file for pseudocolumns<commit_after>
|
import numpy as np
from functools import partial
name_func_dict = {
'backgate': {'label': 'Backgate voltage (V)'},
'MC': {'label': 'Mixing chamber temperature (K)'},
}
def parent_f(data, pdata, meta):
return data['MC'] * 10
name = 'MC*10'
func = partial(parent_f)
label = 'MC temperature in unhelpful units'
name_func_dict[name] = {'func': func, 'label': label}
|
Add sample file for pseudocolumnsimport numpy as np
from functools import partial
name_func_dict = {
'backgate': {'label': 'Backgate voltage (V)'},
'MC': {'label': 'Mixing chamber temperature (K)'},
}
def parent_f(data, pdata, meta):
return data['MC'] * 10
name = 'MC*10'
func = partial(parent_f)
label = 'MC temperature in unhelpful units'
name_func_dict[name] = {'func': func, 'label': label}
|
<commit_before><commit_msg>Add sample file for pseudocolumns<commit_after>import numpy as np
from functools import partial
name_func_dict = {
'backgate': {'label': 'Backgate voltage (V)'},
'MC': {'label': 'Mixing chamber temperature (K)'},
}
def parent_f(data, pdata, meta):
return data['MC'] * 10
name = 'MC*10'
func = partial(parent_f)
label = 'MC temperature in unhelpful units'
name_func_dict[name] = {'func': func, 'label': label}
|
|
463deac9f4f452f20c075fc1ff4591dce4191cad
|
csibe.py
|
csibe.py
|
#!/usr/bin/env python
import os
csibe_path = os.path.dirname(os.path.realpath(__file__))
build_directory = "build"
if not os.path.isdir(build_directory):
os.makedirs(build_directory)
os.chdir(build_directory)
os.system("cmake {0}".format(csibe_path))
|
Add CSiBE build script with basic functionality
|
Add CSiBE build script with basic functionality
The build script csibe.py creates a build directory and executes
CMake there for native target.
|
Python
|
bsd-3-clause
|
bgabor666/csibe,loki04/csibe,szeged/csibe,bgabor666/csibe,loki04/csibe,loki04/csibe,szeged/csibe,loki04/csibe,szeged/csibe,bgabor666/csibe,szeged/csibe,szeged/csibe,bgabor666/csibe,bgabor666/csibe,loki04/csibe,loki04/csibe,bgabor666/csibe,szeged/csibe,bgabor666/csibe,szeged/csibe,loki04/csibe
|
Add CSiBE build script with basic functionality
The build script csibe.py creates a build directory and executes
CMake there for native target.
|
#!/usr/bin/env python
import os
csibe_path = os.path.dirname(os.path.realpath(__file__))
build_directory = "build"
if not os.path.isdir(build_directory):
os.makedirs(build_directory)
os.chdir(build_directory)
os.system("cmake {0}".format(csibe_path))
|
<commit_before><commit_msg>Add CSiBE build script with basic functionality
The build script csibe.py creates a build directory and executes
CMake there for native target.<commit_after>
|
#!/usr/bin/env python
import os
csibe_path = os.path.dirname(os.path.realpath(__file__))
build_directory = "build"
if not os.path.isdir(build_directory):
os.makedirs(build_directory)
os.chdir(build_directory)
os.system("cmake {0}".format(csibe_path))
|
Add CSiBE build script with basic functionality
The build script csibe.py creates a build directory and executes
CMake there for native target.#!/usr/bin/env python
import os
csibe_path = os.path.dirname(os.path.realpath(__file__))
build_directory = "build"
if not os.path.isdir(build_directory):
os.makedirs(build_directory)
os.chdir(build_directory)
os.system("cmake {0}".format(csibe_path))
|
<commit_before><commit_msg>Add CSiBE build script with basic functionality
The build script csibe.py creates a build directory and executes
CMake there for native target.<commit_after>#!/usr/bin/env python
import os
csibe_path = os.path.dirname(os.path.realpath(__file__))
build_directory = "build"
if not os.path.isdir(build_directory):
os.makedirs(build_directory)
os.chdir(build_directory)
os.system("cmake {0}".format(csibe_path))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.