commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1af9b33c603a266b00274cb6b2e707a62d956242
|
bin/debug/fix_usercache_processing.py
|
bin/debug/fix_usercache_processing.py
|
# Fixes usercache processing
# If there are any errors in the usercache processing, fix them and reload the data
# Basic flow
# - Copy data back to user cache
# - Attempt to moveToLongTerm
# - Find errors
# - Fix errors
# - Repeat until no errors are found
import sys
import logging
logging.basicConfig(level=logging.DEBUG)
import uuid
import datetime as pydt
import json
import bson.json_util as bju
import emission.core.get_database as edb
import emission.net.usercache.abstract_usercache_handler as euah
import emission.net.usercache.abstract_usercache as enua
def fix_usercache_errors():
copy_to_usercache()
move_to_long_term()
def copy_to_usercache():
# Step 1: Copy data back to user cache
error_it = edb.get_timeseries_error_db().find()
logging.info("Found %d errors in this round" % error_it.count())
for error in error_it:
logging.debug("Copying entry %s" % error["metadata"])
save_result = edb.get_usercache_db().save(error)
remove_result = edb.get_timeseries_error_db().remove(error["_id"])
logging.debug("save_result = %s, remove_result = %s" % (save_result, remove_result))
logging.info("step copy_to_usercache DONE")
def move_to_long_term():
cache_uuid_list = enua.UserCache.get_uuid_list()
logging.info("cache UUID list = %s" % cache_uuid_list)
for uuid in cache_uuid_list:
logging.info("*" * 10 + "UUID %s: moving to long term" % uuid + "*" * 10)
uh = euah.UserCacheHandler.getUserCacheHandler(uuid)
uh.moveToLongTerm()
if __name__ == '__main__':
fix_usercache_errors()
|
Check in a script to debug usercache processing
|
Check in a script to debug usercache processing
Currently, if there are errors in processing the usercache, we store the
erroneous values into a separate usercache error database. But then, we want to
evaluate the errors and fix them if they reflect bugs. This is a script that
allows us to do that.
It basically copies the values back to the usercache and reruns the new move to
long term code on them. The idea is that you can rerun until you get zero
errors, or at least, zero fixable errors.
|
Python
|
bsd-3-clause
|
yw374cornell/e-mission-server,sunil07t/e-mission-server,shankari/e-mission-server,joshzarrabi/e-mission-server,sunil07t/e-mission-server,joshzarrabi/e-mission-server,shankari/e-mission-server,shankari/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server,sunil07t/e-mission-server,e-mission/e-mission-server,yw374cornell/e-mission-server,e-mission/e-mission-server,e-mission/e-mission-server,joshzarrabi/e-mission-server,yw374cornell/e-mission-server,joshzarrabi/e-mission-server,yw374cornell/e-mission-server,sunil07t/e-mission-server
|
Check in a script to debug usercache processing
Currently, if there are errors in processing the usercache, we store the
erroneous values into a separate usercache error database. But then, we want to
evaluate the errors and fix them if they reflect bugs. This is a script that
allows us to do that.
It basically copies the values back to the usercache and reruns the new move to
long term code on them. The idea is that you can rerun until you get zero
errors, or at least, zero fixable errors.
|
# Fixes usercache processing
# If there are any errors in the usercache processing, fix them and reload the data
# Basic flow
# - Copy data back to user cache
# - Attempt to moveToLongTerm
# - Find errors
# - Fix errors
# - Repeat until no errors are found
import sys
import logging
logging.basicConfig(level=logging.DEBUG)
import uuid
import datetime as pydt
import json
import bson.json_util as bju
import emission.core.get_database as edb
import emission.net.usercache.abstract_usercache_handler as euah
import emission.net.usercache.abstract_usercache as enua
def fix_usercache_errors():
copy_to_usercache()
move_to_long_term()
def copy_to_usercache():
# Step 1: Copy data back to user cache
error_it = edb.get_timeseries_error_db().find()
logging.info("Found %d errors in this round" % error_it.count())
for error in error_it:
logging.debug("Copying entry %s" % error["metadata"])
save_result = edb.get_usercache_db().save(error)
remove_result = edb.get_timeseries_error_db().remove(error["_id"])
logging.debug("save_result = %s, remove_result = %s" % (save_result, remove_result))
logging.info("step copy_to_usercache DONE")
def move_to_long_term():
cache_uuid_list = enua.UserCache.get_uuid_list()
logging.info("cache UUID list = %s" % cache_uuid_list)
for uuid in cache_uuid_list:
logging.info("*" * 10 + "UUID %s: moving to long term" % uuid + "*" * 10)
uh = euah.UserCacheHandler.getUserCacheHandler(uuid)
uh.moveToLongTerm()
if __name__ == '__main__':
fix_usercache_errors()
|
<commit_before><commit_msg>Check in a script to debug usercache processing
Currently, if there are errors in processing the usercache, we store the
erroneous values into a separate usercache error database. But then, we want to
evaluate the errors and fix them if they reflect bugs. This is a script that
allows us to do that.
It basically copies the values back to the usercache and reruns the new move to
long term code on them. The idea is that you can rerun until you get zero
errors, or at least, zero fixable errors.<commit_after>
|
# Fixes usercache processing
# If there are any errors in the usercache processing, fix them and reload the data
# Basic flow
# - Copy data back to user cache
# - Attempt to moveToLongTerm
# - Find errors
# - Fix errors
# - Repeat until no errors are found
import sys
import logging
logging.basicConfig(level=logging.DEBUG)
import uuid
import datetime as pydt
import json
import bson.json_util as bju
import emission.core.get_database as edb
import emission.net.usercache.abstract_usercache_handler as euah
import emission.net.usercache.abstract_usercache as enua
def fix_usercache_errors():
copy_to_usercache()
move_to_long_term()
def copy_to_usercache():
# Step 1: Copy data back to user cache
error_it = edb.get_timeseries_error_db().find()
logging.info("Found %d errors in this round" % error_it.count())
for error in error_it:
logging.debug("Copying entry %s" % error["metadata"])
save_result = edb.get_usercache_db().save(error)
remove_result = edb.get_timeseries_error_db().remove(error["_id"])
logging.debug("save_result = %s, remove_result = %s" % (save_result, remove_result))
logging.info("step copy_to_usercache DONE")
def move_to_long_term():
cache_uuid_list = enua.UserCache.get_uuid_list()
logging.info("cache UUID list = %s" % cache_uuid_list)
for uuid in cache_uuid_list:
logging.info("*" * 10 + "UUID %s: moving to long term" % uuid + "*" * 10)
uh = euah.UserCacheHandler.getUserCacheHandler(uuid)
uh.moveToLongTerm()
if __name__ == '__main__':
fix_usercache_errors()
|
Check in a script to debug usercache processing
Currently, if there are errors in processing the usercache, we store the
erroneous values into a separate usercache error database. But then, we want to
evaluate the errors and fix them if they reflect bugs. This is a script that
allows us to do that.
It basically copies the values back to the usercache and reruns the new move to
long term code on them. The idea is that you can rerun until you get zero
errors, or at least, zero fixable errors.# Fixes usercache processing
# If there are any errors in the usercache processing, fix them and reload the data
# Basic flow
# - Copy data back to user cache
# - Attempt to moveToLongTerm
# - Find errors
# - Fix errors
# - Repeat until no errors are found
import sys
import logging
logging.basicConfig(level=logging.DEBUG)
import uuid
import datetime as pydt
import json
import bson.json_util as bju
import emission.core.get_database as edb
import emission.net.usercache.abstract_usercache_handler as euah
import emission.net.usercache.abstract_usercache as enua
def fix_usercache_errors():
copy_to_usercache()
move_to_long_term()
def copy_to_usercache():
# Step 1: Copy data back to user cache
error_it = edb.get_timeseries_error_db().find()
logging.info("Found %d errors in this round" % error_it.count())
for error in error_it:
logging.debug("Copying entry %s" % error["metadata"])
save_result = edb.get_usercache_db().save(error)
remove_result = edb.get_timeseries_error_db().remove(error["_id"])
logging.debug("save_result = %s, remove_result = %s" % (save_result, remove_result))
logging.info("step copy_to_usercache DONE")
def move_to_long_term():
cache_uuid_list = enua.UserCache.get_uuid_list()
logging.info("cache UUID list = %s" % cache_uuid_list)
for uuid in cache_uuid_list:
logging.info("*" * 10 + "UUID %s: moving to long term" % uuid + "*" * 10)
uh = euah.UserCacheHandler.getUserCacheHandler(uuid)
uh.moveToLongTerm()
if __name__ == '__main__':
fix_usercache_errors()
|
<commit_before><commit_msg>Check in a script to debug usercache processing
Currently, if there are errors in processing the usercache, we store the
erroneous values into a separate usercache error database. But then, we want to
evaluate the errors and fix them if they reflect bugs. This is a script that
allows us to do that.
It basically copies the values back to the usercache and reruns the new move to
long term code on them. The idea is that you can rerun until you get zero
errors, or at least, zero fixable errors.<commit_after># Fixes usercache processing
# If there are any errors in the usercache processing, fix them and reload the data
# Basic flow
# - Copy data back to user cache
# - Attempt to moveToLongTerm
# - Find errors
# - Fix errors
# - Repeat until no errors are found
import sys
import logging
logging.basicConfig(level=logging.DEBUG)
import uuid
import datetime as pydt
import json
import bson.json_util as bju
import emission.core.get_database as edb
import emission.net.usercache.abstract_usercache_handler as euah
import emission.net.usercache.abstract_usercache as enua
def fix_usercache_errors():
copy_to_usercache()
move_to_long_term()
def copy_to_usercache():
# Step 1: Copy data back to user cache
error_it = edb.get_timeseries_error_db().find()
logging.info("Found %d errors in this round" % error_it.count())
for error in error_it:
logging.debug("Copying entry %s" % error["metadata"])
save_result = edb.get_usercache_db().save(error)
remove_result = edb.get_timeseries_error_db().remove(error["_id"])
logging.debug("save_result = %s, remove_result = %s" % (save_result, remove_result))
logging.info("step copy_to_usercache DONE")
def move_to_long_term():
cache_uuid_list = enua.UserCache.get_uuid_list()
logging.info("cache UUID list = %s" % cache_uuid_list)
for uuid in cache_uuid_list:
logging.info("*" * 10 + "UUID %s: moving to long term" % uuid + "*" * 10)
uh = euah.UserCacheHandler.getUserCacheHandler(uuid)
uh.moveToLongTerm()
if __name__ == '__main__':
fix_usercache_errors()
|
|
9d196fff2c1347361e3650363e2c4d718e84ce62
|
CodeFights/avoidObstacles.py
|
CodeFights/avoidObstacles.py
|
#!//urs/local/bin/python
# Code Fights Avoid Obstacles Problem
def avoidObstacles(inputArray):
for jump in range(2, 40):
safe = True
for item in inputArray:
if item % jump == 0:
safe = False
break
if safe:
return jump
def main():
tests = [
[[5, 3, 6, 7, 9], 4],
[[2, 3], 4],
[[1, 4, 10, 6, 2], 7]
]
for t in tests:
res = avoidObstacles(t[0])
if t[1] == res:
print("PASSED: avoidObstacles({}) returned {}"
.format(t[0], res))
else:
print("FAILED: avoidObstacles({}) returned {}, should have returned {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
Solve Code Fights avoid obstacles problem
|
Solve Code Fights avoid obstacles problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights avoid obstacles problem
|
#!//urs/local/bin/python
# Code Fights Avoid Obstacles Problem
def avoidObstacles(inputArray):
for jump in range(2, 40):
safe = True
for item in inputArray:
if item % jump == 0:
safe = False
break
if safe:
return jump
def main():
tests = [
[[5, 3, 6, 7, 9], 4],
[[2, 3], 4],
[[1, 4, 10, 6, 2], 7]
]
for t in tests:
res = avoidObstacles(t[0])
if t[1] == res:
print("PASSED: avoidObstacles({}) returned {}"
.format(t[0], res))
else:
print("FAILED: avoidObstacles({}) returned {}, should have returned {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights avoid obstacles problem<commit_after>
|
#!//urs/local/bin/python
# Code Fights Avoid Obstacles Problem
def avoidObstacles(inputArray):
for jump in range(2, 40):
safe = True
for item in inputArray:
if item % jump == 0:
safe = False
break
if safe:
return jump
def main():
tests = [
[[5, 3, 6, 7, 9], 4],
[[2, 3], 4],
[[1, 4, 10, 6, 2], 7]
]
for t in tests:
res = avoidObstacles(t[0])
if t[1] == res:
print("PASSED: avoidObstacles({}) returned {}"
.format(t[0], res))
else:
print("FAILED: avoidObstacles({}) returned {}, should have returned {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
Solve Code Fights avoid obstacles problem#!//urs/local/bin/python
# Code Fights Avoid Obstacles Problem
def avoidObstacles(inputArray):
for jump in range(2, 40):
safe = True
for item in inputArray:
if item % jump == 0:
safe = False
break
if safe:
return jump
def main():
tests = [
[[5, 3, 6, 7, 9], 4],
[[2, 3], 4],
[[1, 4, 10, 6, 2], 7]
]
for t in tests:
res = avoidObstacles(t[0])
if t[1] == res:
print("PASSED: avoidObstacles({}) returned {}"
.format(t[0], res))
else:
print("FAILED: avoidObstacles({}) returned {}, should have returned {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights avoid obstacles problem<commit_after>#!//urs/local/bin/python
# Code Fights Avoid Obstacles Problem
def avoidObstacles(inputArray):
for jump in range(2, 40):
safe = True
for item in inputArray:
if item % jump == 0:
safe = False
break
if safe:
return jump
def main():
tests = [
[[5, 3, 6, 7, 9], 4],
[[2, 3], 4],
[[1, 4, 10, 6, 2], 7]
]
for t in tests:
res = avoidObstacles(t[0])
if t[1] == res:
print("PASSED: avoidObstacles({}) returned {}"
.format(t[0], res))
else:
print("FAILED: avoidObstacles({}) returned {}, should have returned {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
|
ce7a93bea9aeeaa49bec527b69f2b65efc7c450d
|
plotter.py
|
plotter.py
|
import plotly.graph_objs as go
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
import matplotlib.pyplot as plt
def plot(x_data, y_data):
# x_data = iteration_numbers
# y_data = makespans
# colorscale = ['#7A4579', '#D56073', 'rgb(236,158,105)', (1, 1, 0.2), (0.98,0.98,0.98)]
# fig = FF.create_2D_density(
# x_data, y_data, colorscale=colorscale,
# hist_color='rgb(255, 237, 222)', point_size=3
# )
# py.plot(fig, filename='histogram_subplots')
# trace = go.Scattergl(
# x = x_data,
# y = y_data,
# mode = 'lines',
# marker = dict(
# color = 'rgb(152, 0, 0)',
# line = dict(
# width = 1,
# color = 'rgb(0,0,0)')
# )
# )
# data = [trace]
# py.plot(data, filename='goodthick')
plt.plot(x_data, y_data, 'ro')
plt.title("Initial population: " + str(100) + " Iteration Numbers: " + str(len(x_data)))
plt.ylabel("Makespan")
plt.xlabel("Generation")
plt.show()
|
Move plotting out of main
|
Move plotting out of main
|
Python
|
mit
|
Irvel/JSSP-Genetic-Algorithm
|
Move plotting out of main
|
import plotly.graph_objs as go
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
import matplotlib.pyplot as plt
def plot(x_data, y_data):
# x_data = iteration_numbers
# y_data = makespans
# colorscale = ['#7A4579', '#D56073', 'rgb(236,158,105)', (1, 1, 0.2), (0.98,0.98,0.98)]
# fig = FF.create_2D_density(
# x_data, y_data, colorscale=colorscale,
# hist_color='rgb(255, 237, 222)', point_size=3
# )
# py.plot(fig, filename='histogram_subplots')
# trace = go.Scattergl(
# x = x_data,
# y = y_data,
# mode = 'lines',
# marker = dict(
# color = 'rgb(152, 0, 0)',
# line = dict(
# width = 1,
# color = 'rgb(0,0,0)')
# )
# )
# data = [trace]
# py.plot(data, filename='goodthick')
plt.plot(x_data, y_data, 'ro')
plt.title("Initial population: " + str(100) + " Iteration Numbers: " + str(len(x_data)))
plt.ylabel("Makespan")
plt.xlabel("Generation")
plt.show()
|
<commit_before><commit_msg>Move plotting out of main<commit_after>
|
import plotly.graph_objs as go
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
import matplotlib.pyplot as plt
def plot(x_data, y_data):
# x_data = iteration_numbers
# y_data = makespans
# colorscale = ['#7A4579', '#D56073', 'rgb(236,158,105)', (1, 1, 0.2), (0.98,0.98,0.98)]
# fig = FF.create_2D_density(
# x_data, y_data, colorscale=colorscale,
# hist_color='rgb(255, 237, 222)', point_size=3
# )
# py.plot(fig, filename='histogram_subplots')
# trace = go.Scattergl(
# x = x_data,
# y = y_data,
# mode = 'lines',
# marker = dict(
# color = 'rgb(152, 0, 0)',
# line = dict(
# width = 1,
# color = 'rgb(0,0,0)')
# )
# )
# data = [trace]
# py.plot(data, filename='goodthick')
plt.plot(x_data, y_data, 'ro')
plt.title("Initial population: " + str(100) + " Iteration Numbers: " + str(len(x_data)))
plt.ylabel("Makespan")
plt.xlabel("Generation")
plt.show()
|
Move plotting out of mainimport plotly.graph_objs as go
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
import matplotlib.pyplot as plt
def plot(x_data, y_data):
# x_data = iteration_numbers
# y_data = makespans
# colorscale = ['#7A4579', '#D56073', 'rgb(236,158,105)', (1, 1, 0.2), (0.98,0.98,0.98)]
# fig = FF.create_2D_density(
# x_data, y_data, colorscale=colorscale,
# hist_color='rgb(255, 237, 222)', point_size=3
# )
# py.plot(fig, filename='histogram_subplots')
# trace = go.Scattergl(
# x = x_data,
# y = y_data,
# mode = 'lines',
# marker = dict(
# color = 'rgb(152, 0, 0)',
# line = dict(
# width = 1,
# color = 'rgb(0,0,0)')
# )
# )
# data = [trace]
# py.plot(data, filename='goodthick')
plt.plot(x_data, y_data, 'ro')
plt.title("Initial population: " + str(100) + " Iteration Numbers: " + str(len(x_data)))
plt.ylabel("Makespan")
plt.xlabel("Generation")
plt.show()
|
<commit_before><commit_msg>Move plotting out of main<commit_after>import plotly.graph_objs as go
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
import matplotlib.pyplot as plt
def plot(x_data, y_data):
# x_data = iteration_numbers
# y_data = makespans
# colorscale = ['#7A4579', '#D56073', 'rgb(236,158,105)', (1, 1, 0.2), (0.98,0.98,0.98)]
# fig = FF.create_2D_density(
# x_data, y_data, colorscale=colorscale,
# hist_color='rgb(255, 237, 222)', point_size=3
# )
# py.plot(fig, filename='histogram_subplots')
# trace = go.Scattergl(
# x = x_data,
# y = y_data,
# mode = 'lines',
# marker = dict(
# color = 'rgb(152, 0, 0)',
# line = dict(
# width = 1,
# color = 'rgb(0,0,0)')
# )
# )
# data = [trace]
# py.plot(data, filename='goodthick')
plt.plot(x_data, y_data, 'ro')
plt.title("Initial population: " + str(100) + " Iteration Numbers: " + str(len(x_data)))
plt.ylabel("Makespan")
plt.xlabel("Generation")
plt.show()
|
|
75f36085f81c763016517901084d3181f9f1f108
|
pinax/eventlog/migrations/0003_auto_20160111_0208.py
|
pinax/eventlog/migrations/0003_auto_20160111_0208.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-11 02:08
from __future__ import unicode_literals
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('eventlog', '0002_auto_20150113_1450'),
]
operations = [
migrations.AlterField(
model_name='log',
name='extra',
field=jsonfield.fields.JSONField(),
),
]
|
Add missing migration from the switch to jsonfield
|
Add missing migration from the switch to jsonfield
|
Python
|
mit
|
pinax/pinax-eventlog
|
Add missing migration from the switch to jsonfield
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-11 02:08
from __future__ import unicode_literals
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('eventlog', '0002_auto_20150113_1450'),
]
operations = [
migrations.AlterField(
model_name='log',
name='extra',
field=jsonfield.fields.JSONField(),
),
]
|
<commit_before><commit_msg>Add missing migration from the switch to jsonfield<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-11 02:08
from __future__ import unicode_literals
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('eventlog', '0002_auto_20150113_1450'),
]
operations = [
migrations.AlterField(
model_name='log',
name='extra',
field=jsonfield.fields.JSONField(),
),
]
|
Add missing migration from the switch to jsonfield# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-11 02:08
from __future__ import unicode_literals
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('eventlog', '0002_auto_20150113_1450'),
]
operations = [
migrations.AlterField(
model_name='log',
name='extra',
field=jsonfield.fields.JSONField(),
),
]
|
<commit_before><commit_msg>Add missing migration from the switch to jsonfield<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-11 02:08
from __future__ import unicode_literals
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('eventlog', '0002_auto_20150113_1450'),
]
operations = [
migrations.AlterField(
model_name='log',
name='extra',
field=jsonfield.fields.JSONField(),
),
]
|
|
fd850084bd858be9fcc98fa8fbe29bdd481fc066
|
qserver.py
|
qserver.py
|
#!/usr/bin/env python
"""
qserver
This module will use a queuing server to execute a number of tasks
across multiple threads.
Example
-------
tasks = (qserver.os_task("list files","ls -1"), \
qserver.task("my job",my_func,arg1,arg2,arg3))
qserver.start(tasks)
Written by Brian Powell on 1/17/14
Copyright (c)2014 University of Hawaii under the BSD-License.
"""
from __future__ import print_function
import Queue
import threading
import subprocess
class task:
def __init__(self, name, cmd, *args):
self.cmd = cmd
self.name = name
self.args = args
pass
def run(self):
if callable(self.cmd):
self.cmd(*self.args)
pass
pass
class os_task(task):
def run(self):
subprocess.call(self.cmd, shell=True)
class process_thread(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while True:
item = self.queue.get()
print(self.getName()+" running "+item.name+"\n")
item.run()
self.queue.task_done()
def start(tasks, nthreads=2):
q = Queue.Queue()
for i in range(nthreads):
t = process_thread(q)
t.daemon = True
t.start()
for item in tasks:
q.put(item)
q.join()
|
Add queueing server to handle multiple processes
|
Add queueing server to handle multiple processes
|
Python
|
mit
|
dalepartridge/seapy,ocefpaf/seapy,powellb/seapy
|
Add queueing server to handle multiple processes
|
#!/usr/bin/env python
"""
qserver
This module will use a queuing server to execute a number of tasks
across multiple threads.
Example
-------
tasks = (qserver.os_task("list files","ls -1"), \
qserver.task("my job",my_func,arg1,arg2,arg3))
qserver.start(tasks)
Written by Brian Powell on 1/17/14
Copyright (c)2014 University of Hawaii under the BSD-License.
"""
from __future__ import print_function
import Queue
import threading
import subprocess
class task:
def __init__(self, name, cmd, *args):
self.cmd = cmd
self.name = name
self.args = args
pass
def run(self):
if callable(self.cmd):
self.cmd(*self.args)
pass
pass
class os_task(task):
def run(self):
subprocess.call(self.cmd, shell=True)
class process_thread(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while True:
item = self.queue.get()
print(self.getName()+" running "+item.name+"\n")
item.run()
self.queue.task_done()
def start(tasks, nthreads=2):
q = Queue.Queue()
for i in range(nthreads):
t = process_thread(q)
t.daemon = True
t.start()
for item in tasks:
q.put(item)
q.join()
|
<commit_before><commit_msg>Add queueing server to handle multiple processes<commit_after>
|
#!/usr/bin/env python
"""
qserver
This module will use a queuing server to execute a number of tasks
across multiple threads.
Example
-------
tasks = (qserver.os_task("list files","ls -1"), \
qserver.task("my job",my_func,arg1,arg2,arg3))
qserver.start(tasks)
Written by Brian Powell on 1/17/14
Copyright (c)2014 University of Hawaii under the BSD-License.
"""
from __future__ import print_function
import Queue
import threading
import subprocess
class task:
def __init__(self, name, cmd, *args):
self.cmd = cmd
self.name = name
self.args = args
pass
def run(self):
if callable(self.cmd):
self.cmd(*self.args)
pass
pass
class os_task(task):
def run(self):
subprocess.call(self.cmd, shell=True)
class process_thread(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while True:
item = self.queue.get()
print(self.getName()+" running "+item.name+"\n")
item.run()
self.queue.task_done()
def start(tasks, nthreads=2):
q = Queue.Queue()
for i in range(nthreads):
t = process_thread(q)
t.daemon = True
t.start()
for item in tasks:
q.put(item)
q.join()
|
Add queueing server to handle multiple processes#!/usr/bin/env python
"""
qserver
This module will use a queuing server to execute a number of tasks
across multiple threads.
Example
-------
tasks = (qserver.os_task("list files","ls -1"), \
qserver.task("my job",my_func,arg1,arg2,arg3))
qserver.start(tasks)
Written by Brian Powell on 1/17/14
Copyright (c)2014 University of Hawaii under the BSD-License.
"""
from __future__ import print_function
import Queue
import threading
import subprocess
class task:
def __init__(self, name, cmd, *args):
self.cmd = cmd
self.name = name
self.args = args
pass
def run(self):
if callable(self.cmd):
self.cmd(*self.args)
pass
pass
class os_task(task):
def run(self):
subprocess.call(self.cmd, shell=True)
class process_thread(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while True:
item = self.queue.get()
print(self.getName()+" running "+item.name+"\n")
item.run()
self.queue.task_done()
def start(tasks, nthreads=2):
q = Queue.Queue()
for i in range(nthreads):
t = process_thread(q)
t.daemon = True
t.start()
for item in tasks:
q.put(item)
q.join()
|
<commit_before><commit_msg>Add queueing server to handle multiple processes<commit_after>#!/usr/bin/env python
"""
qserver
This module will use a queuing server to execute a number of tasks
across multiple threads.
Example
-------
tasks = (qserver.os_task("list files","ls -1"), \
qserver.task("my job",my_func,arg1,arg2,arg3))
qserver.start(tasks)
Written by Brian Powell on 1/17/14
Copyright (c)2014 University of Hawaii under the BSD-License.
"""
from __future__ import print_function
import Queue
import threading
import subprocess
class task:
def __init__(self, name, cmd, *args):
self.cmd = cmd
self.name = name
self.args = args
pass
def run(self):
if callable(self.cmd):
self.cmd(*self.args)
pass
pass
class os_task(task):
def run(self):
subprocess.call(self.cmd, shell=True)
class process_thread(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while True:
item = self.queue.get()
print(self.getName()+" running "+item.name+"\n")
item.run()
self.queue.task_done()
def start(tasks, nthreads=2):
q = Queue.Queue()
for i in range(nthreads):
t = process_thread(q)
t.daemon = True
t.start()
for item in tasks:
q.put(item)
q.join()
|
|
0c3c3921897816f0c7c1c74ad0b52a25cef5b742
|
tests/compat.py
|
tests/compat.py
|
import sys
if sys.version_info.major < 3:
import unittest2 as unittest
else:
import unittest
|
from evelink.thirdparty.six import PY2
if PY2:
import unittest2 as unittest
else:
import unittest
|
Use six for easy version info.
|
[PY3] Use six for easy version info.
|
Python
|
mit
|
Morloth1274/EVE-Online-POCO-manager,FashtimeDotCom/evelink,ayust/evelink,zigdon/evelink,bastianh/evelink
|
import sys
if sys.version_info.major < 3:
import unittest2 as unittest
else:
import unittest
[PY3] Use six for easy version info.
|
from evelink.thirdparty.six import PY2
if PY2:
import unittest2 as unittest
else:
import unittest
|
<commit_before>import sys
if sys.version_info.major < 3:
import unittest2 as unittest
else:
import unittest
<commit_msg>[PY3] Use six for easy version info.<commit_after>
|
from evelink.thirdparty.six import PY2
if PY2:
import unittest2 as unittest
else:
import unittest
|
import sys
if sys.version_info.major < 3:
import unittest2 as unittest
else:
import unittest
[PY3] Use six for easy version info.from evelink.thirdparty.six import PY2
if PY2:
import unittest2 as unittest
else:
import unittest
|
<commit_before>import sys
if sys.version_info.major < 3:
import unittest2 as unittest
else:
import unittest
<commit_msg>[PY3] Use six for easy version info.<commit_after>from evelink.thirdparty.six import PY2
if PY2:
import unittest2 as unittest
else:
import unittest
|
495a9617cd42b5d88f9391b47d355a93c960c988
|
corehq/apps/data_interfaces/migrations/0007_logging_models.py
|
corehq/apps/data_interfaces/migrations/0007_logging_models.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-04 20:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0006_case_rule_refactor'),
]
operations = [
migrations.CreateModel(
name='CaseRuleSubmission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(db_index=True)),
('form_id', models.CharField(max_length=255)),
('archived', models.BooleanField(default=False)),
('rule', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='data_interfaces.AutomaticUpdateRule')),
],
),
migrations.CreateModel(
name='DomainCaseRuleRun',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(max_length=126)),
('started_on', models.DateTimeField(db_index=True)),
('finished_on', models.DateTimeField(null=True)),
('status', models.CharField(max_length=1)),
('cases_checked', models.IntegerField(null=True)),
('num_updates', models.IntegerField(null=True)),
('num_closes', models.IntegerField(null=True)),
('num_related_updates', models.IntegerField(null=True)),
('num_related_closes', models.IntegerField(null=True)),
],
),
migrations.AlterIndexTogether(
name='domaincaserulerun',
index_together=set([('domain', 'started_on')]),
),
migrations.AlterIndexTogether(
name='caserulesubmission',
index_together=set([('rule', 'created_on')]),
),
]
|
Add migration for logging models
|
Add migration for logging models
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add migration for logging models
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-04 20:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0006_case_rule_refactor'),
]
operations = [
migrations.CreateModel(
name='CaseRuleSubmission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(db_index=True)),
('form_id', models.CharField(max_length=255)),
('archived', models.BooleanField(default=False)),
('rule', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='data_interfaces.AutomaticUpdateRule')),
],
),
migrations.CreateModel(
name='DomainCaseRuleRun',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(max_length=126)),
('started_on', models.DateTimeField(db_index=True)),
('finished_on', models.DateTimeField(null=True)),
('status', models.CharField(max_length=1)),
('cases_checked', models.IntegerField(null=True)),
('num_updates', models.IntegerField(null=True)),
('num_closes', models.IntegerField(null=True)),
('num_related_updates', models.IntegerField(null=True)),
('num_related_closes', models.IntegerField(null=True)),
],
),
migrations.AlterIndexTogether(
name='domaincaserulerun',
index_together=set([('domain', 'started_on')]),
),
migrations.AlterIndexTogether(
name='caserulesubmission',
index_together=set([('rule', 'created_on')]),
),
]
|
<commit_before><commit_msg>Add migration for logging models<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-04 20:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0006_case_rule_refactor'),
]
operations = [
migrations.CreateModel(
name='CaseRuleSubmission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(db_index=True)),
('form_id', models.CharField(max_length=255)),
('archived', models.BooleanField(default=False)),
('rule', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='data_interfaces.AutomaticUpdateRule')),
],
),
migrations.CreateModel(
name='DomainCaseRuleRun',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(max_length=126)),
('started_on', models.DateTimeField(db_index=True)),
('finished_on', models.DateTimeField(null=True)),
('status', models.CharField(max_length=1)),
('cases_checked', models.IntegerField(null=True)),
('num_updates', models.IntegerField(null=True)),
('num_closes', models.IntegerField(null=True)),
('num_related_updates', models.IntegerField(null=True)),
('num_related_closes', models.IntegerField(null=True)),
],
),
migrations.AlterIndexTogether(
name='domaincaserulerun',
index_together=set([('domain', 'started_on')]),
),
migrations.AlterIndexTogether(
name='caserulesubmission',
index_together=set([('rule', 'created_on')]),
),
]
|
Add migration for logging models# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-04 20:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0006_case_rule_refactor'),
]
operations = [
migrations.CreateModel(
name='CaseRuleSubmission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(db_index=True)),
('form_id', models.CharField(max_length=255)),
('archived', models.BooleanField(default=False)),
('rule', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='data_interfaces.AutomaticUpdateRule')),
],
),
migrations.CreateModel(
name='DomainCaseRuleRun',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(max_length=126)),
('started_on', models.DateTimeField(db_index=True)),
('finished_on', models.DateTimeField(null=True)),
('status', models.CharField(max_length=1)),
('cases_checked', models.IntegerField(null=True)),
('num_updates', models.IntegerField(null=True)),
('num_closes', models.IntegerField(null=True)),
('num_related_updates', models.IntegerField(null=True)),
('num_related_closes', models.IntegerField(null=True)),
],
),
migrations.AlterIndexTogether(
name='domaincaserulerun',
index_together=set([('domain', 'started_on')]),
),
migrations.AlterIndexTogether(
name='caserulesubmission',
index_together=set([('rule', 'created_on')]),
),
]
|
<commit_before><commit_msg>Add migration for logging models<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-04 20:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0006_case_rule_refactor'),
]
operations = [
migrations.CreateModel(
name='CaseRuleSubmission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(db_index=True)),
('form_id', models.CharField(max_length=255)),
('archived', models.BooleanField(default=False)),
('rule', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='data_interfaces.AutomaticUpdateRule')),
],
),
migrations.CreateModel(
name='DomainCaseRuleRun',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(max_length=126)),
('started_on', models.DateTimeField(db_index=True)),
('finished_on', models.DateTimeField(null=True)),
('status', models.CharField(max_length=1)),
('cases_checked', models.IntegerField(null=True)),
('num_updates', models.IntegerField(null=True)),
('num_closes', models.IntegerField(null=True)),
('num_related_updates', models.IntegerField(null=True)),
('num_related_closes', models.IntegerField(null=True)),
],
),
migrations.AlterIndexTogether(
name='domaincaserulerun',
index_together=set([('domain', 'started_on')]),
),
migrations.AlterIndexTogether(
name='caserulesubmission',
index_together=set([('rule', 'created_on')]),
),
]
|
|
f3bce5f47dc75e392a72846d98de467af6892f28
|
dit/example_dists/tests/test_mdbsi.py
|
dit/example_dists/tests/test_mdbsi.py
|
"""
Tests for the MDBSI distributions.
"""
import pytest
from dit.example_dists.mdbsi import dyadic, triadic
from dit.multivariate import (
entropy as H,
total_correlation as T,
dual_total_correlation as B,
coinformation as I,
residual_entropy as R,
caekl_mutual_information as J,
tse_complexity as TSE,
gk_common_information as K,
wyner_common_information as C,
exact_common_information as G,
functional_common_information as F,
mss_common_information as M,
)
from dit.other import (
extropy as X,
perplexity as P,
disequilibrium as D,
)
@pytest.mark.parametrize(('measure', 'dy_val', 'tri_val'), [
(H, 3, 3),
(T, 3, 3),
(B, 3, 3),
(I, 0, 0),
(R, 0, 0),
(J, 1.5, 1.5),
(TSE, 2, 2),
(K, 0, 1),
(C, 3, 3),
(G, 3, 3),
(F, 3, 3),
(M, 3, 3),
(X, 1.3485155455967712, 1.3485155455967712),
(P, 8, 8),
(D, 0.76124740551164605, 0.76124740551164605),
])
def test_measures(measure, dy_val, tri_val):
"""
Test that the distributions have the correct properties.
"""
assert measure(dyadic) == dy_val
assert measure(triadic) == tri_val
|
Add tests regarding the dyadic and triadic distributions from MDBSI.
|
Add tests regarding the dyadic and triadic distributions from MDBSI.
|
Python
|
bsd-3-clause
|
Autoplectic/dit,Autoplectic/dit,dit/dit,Autoplectic/dit,dit/dit,dit/dit,dit/dit,Autoplectic/dit,dit/dit,Autoplectic/dit
|
Add tests regarding the dyadic and triadic distributions from MDBSI.
|
"""
Tests for the MDBSI distributions.
"""
import pytest
from dit.example_dists.mdbsi import dyadic, triadic
from dit.multivariate import (
entropy as H,
total_correlation as T,
dual_total_correlation as B,
coinformation as I,
residual_entropy as R,
caekl_mutual_information as J,
tse_complexity as TSE,
gk_common_information as K,
wyner_common_information as C,
exact_common_information as G,
functional_common_information as F,
mss_common_information as M,
)
from dit.other import (
extropy as X,
perplexity as P,
disequilibrium as D,
)
@pytest.mark.parametrize(('measure', 'dy_val', 'tri_val'), [
(H, 3, 3),
(T, 3, 3),
(B, 3, 3),
(I, 0, 0),
(R, 0, 0),
(J, 1.5, 1.5),
(TSE, 2, 2),
(K, 0, 1),
(C, 3, 3),
(G, 3, 3),
(F, 3, 3),
(M, 3, 3),
(X, 1.3485155455967712, 1.3485155455967712),
(P, 8, 8),
(D, 0.76124740551164605, 0.76124740551164605),
])
def test_measures(measure, dy_val, tri_val):
"""
Test that the distributions have the correct properties.
"""
assert measure(dyadic) == dy_val
assert measure(triadic) == tri_val
|
<commit_before><commit_msg>Add tests regarding the dyadic and triadic distributions from MDBSI.<commit_after>
|
"""
Tests for the MDBSI distributions.
"""
import pytest
from dit.example_dists.mdbsi import dyadic, triadic
from dit.multivariate import (
entropy as H,
total_correlation as T,
dual_total_correlation as B,
coinformation as I,
residual_entropy as R,
caekl_mutual_information as J,
tse_complexity as TSE,
gk_common_information as K,
wyner_common_information as C,
exact_common_information as G,
functional_common_information as F,
mss_common_information as M,
)
from dit.other import (
extropy as X,
perplexity as P,
disequilibrium as D,
)
@pytest.mark.parametrize(('measure', 'dy_val', 'tri_val'), [
(H, 3, 3),
(T, 3, 3),
(B, 3, 3),
(I, 0, 0),
(R, 0, 0),
(J, 1.5, 1.5),
(TSE, 2, 2),
(K, 0, 1),
(C, 3, 3),
(G, 3, 3),
(F, 3, 3),
(M, 3, 3),
(X, 1.3485155455967712, 1.3485155455967712),
(P, 8, 8),
(D, 0.76124740551164605, 0.76124740551164605),
])
def test_measures(measure, dy_val, tri_val):
"""
Test that the distributions have the correct properties.
"""
assert measure(dyadic) == dy_val
assert measure(triadic) == tri_val
|
Add tests regarding the dyadic and triadic distributions from MDBSI."""
Tests for the MDBSI distributions.
"""
import pytest
from dit.example_dists.mdbsi import dyadic, triadic
from dit.multivariate import (
entropy as H,
total_correlation as T,
dual_total_correlation as B,
coinformation as I,
residual_entropy as R,
caekl_mutual_information as J,
tse_complexity as TSE,
gk_common_information as K,
wyner_common_information as C,
exact_common_information as G,
functional_common_information as F,
mss_common_information as M,
)
from dit.other import (
extropy as X,
perplexity as P,
disequilibrium as D,
)
@pytest.mark.parametrize(('measure', 'dy_val', 'tri_val'), [
(H, 3, 3),
(T, 3, 3),
(B, 3, 3),
(I, 0, 0),
(R, 0, 0),
(J, 1.5, 1.5),
(TSE, 2, 2),
(K, 0, 1),
(C, 3, 3),
(G, 3, 3),
(F, 3, 3),
(M, 3, 3),
(X, 1.3485155455967712, 1.3485155455967712),
(P, 8, 8),
(D, 0.76124740551164605, 0.76124740551164605),
])
def test_measures(measure, dy_val, tri_val):
"""
Test that the distributions have the correct properties.
"""
assert measure(dyadic) == dy_val
assert measure(triadic) == tri_val
|
<commit_before><commit_msg>Add tests regarding the dyadic and triadic distributions from MDBSI.<commit_after>"""
Tests for the MDBSI distributions.
"""
import pytest
from dit.example_dists.mdbsi import dyadic, triadic
from dit.multivariate import (
entropy as H,
total_correlation as T,
dual_total_correlation as B,
coinformation as I,
residual_entropy as R,
caekl_mutual_information as J,
tse_complexity as TSE,
gk_common_information as K,
wyner_common_information as C,
exact_common_information as G,
functional_common_information as F,
mss_common_information as M,
)
from dit.other import (
extropy as X,
perplexity as P,
disequilibrium as D,
)
@pytest.mark.parametrize(('measure', 'dy_val', 'tri_val'), [
(H, 3, 3),
(T, 3, 3),
(B, 3, 3),
(I, 0, 0),
(R, 0, 0),
(J, 1.5, 1.5),
(TSE, 2, 2),
(K, 0, 1),
(C, 3, 3),
(G, 3, 3),
(F, 3, 3),
(M, 3, 3),
(X, 1.3485155455967712, 1.3485155455967712),
(P, 8, 8),
(D, 0.76124740551164605, 0.76124740551164605),
])
def test_measures(measure, dy_val, tri_val):
"""
Test that the distributions have the correct properties.
"""
assert measure(dyadic) == dy_val
assert measure(triadic) == tri_val
|
|
ad705bd8993ba522326c64bd9b6232ef5374e65f
|
diffsettings-all.py
|
diffsettings-all.py
|
#!/usr/bin/env python
# Based on Django 1.5 diffsettings
# https://github.com/django/django/blob/1.5/django/core/management/commands/diffsettings.py
import sys
def module_to_dict(module, omittable=lambda k: k.startswith('_')):
"Converts a module namespace to a Python dictionary. Used by get_settings_diff."
return dict([(k, repr(v)) for k, v in module.__dict__.items() if not omittable(k)])
def main():
# Inspired by Postfix's "postconf -n".
from django.conf import settings, global_settings
# Because settings are imported lazily, we need to explicitly load them.
settings._setup()
user_settings = module_to_dict(settings._wrapped)
default_settings = module_to_dict(global_settings)
output = []
for key in sorted(user_settings.keys()):
if key not in default_settings:
output.append("%s = %s ###" % (key, user_settings[key]))
elif user_settings[key] != default_settings[key]:
output.append("%s = %s" % (key, user_settings[key]))
else:
output.append("### %s = %s" % (key, user_settings[key]))
return '\n'.join(output)
if __name__ == '__main__':
sys.stdout.write(main() + '\n')
|
Add diffsettings --all for Django<=1.5
|
scripts: Add diffsettings --all for Django<=1.5
|
Python
|
bsd-3-clause
|
fmierlo/django-default-settings,fmierlo/django-default-settings
|
scripts: Add diffsettings --all for Django<=1.5
|
#!/usr/bin/env python
# Based on Django 1.5 diffsettings
# https://github.com/django/django/blob/1.5/django/core/management/commands/diffsettings.py
import sys
def module_to_dict(module, omittable=lambda k: k.startswith('_')):
"Converts a module namespace to a Python dictionary. Used by get_settings_diff."
return dict([(k, repr(v)) for k, v in module.__dict__.items() if not omittable(k)])
def main():
# Inspired by Postfix's "postconf -n".
from django.conf import settings, global_settings
# Because settings are imported lazily, we need to explicitly load them.
settings._setup()
user_settings = module_to_dict(settings._wrapped)
default_settings = module_to_dict(global_settings)
output = []
for key in sorted(user_settings.keys()):
if key not in default_settings:
output.append("%s = %s ###" % (key, user_settings[key]))
elif user_settings[key] != default_settings[key]:
output.append("%s = %s" % (key, user_settings[key]))
else:
output.append("### %s = %s" % (key, user_settings[key]))
return '\n'.join(output)
if __name__ == '__main__':
sys.stdout.write(main() + '\n')
|
<commit_before><commit_msg>scripts: Add diffsettings --all for Django<=1.5<commit_after>
|
#!/usr/bin/env python
# Based on Django 1.5 diffsettings
# https://github.com/django/django/blob/1.5/django/core/management/commands/diffsettings.py
import sys
def module_to_dict(module, omittable=lambda k: k.startswith('_')):
"Converts a module namespace to a Python dictionary. Used by get_settings_diff."
return dict([(k, repr(v)) for k, v in module.__dict__.items() if not omittable(k)])
def main():
# Inspired by Postfix's "postconf -n".
from django.conf import settings, global_settings
# Because settings are imported lazily, we need to explicitly load them.
settings._setup()
user_settings = module_to_dict(settings._wrapped)
default_settings = module_to_dict(global_settings)
output = []
for key in sorted(user_settings.keys()):
if key not in default_settings:
output.append("%s = %s ###" % (key, user_settings[key]))
elif user_settings[key] != default_settings[key]:
output.append("%s = %s" % (key, user_settings[key]))
else:
output.append("### %s = %s" % (key, user_settings[key]))
return '\n'.join(output)
if __name__ == '__main__':
sys.stdout.write(main() + '\n')
|
scripts: Add diffsettings --all for Django<=1.5#!/usr/bin/env python
# Based on Django 1.5 diffsettings
# https://github.com/django/django/blob/1.5/django/core/management/commands/diffsettings.py
import sys
def module_to_dict(module, omittable=lambda k: k.startswith('_')):
"Converts a module namespace to a Python dictionary. Used by get_settings_diff."
return dict([(k, repr(v)) for k, v in module.__dict__.items() if not omittable(k)])
def main():
# Inspired by Postfix's "postconf -n".
from django.conf import settings, global_settings
# Because settings are imported lazily, we need to explicitly load them.
settings._setup()
user_settings = module_to_dict(settings._wrapped)
default_settings = module_to_dict(global_settings)
output = []
for key in sorted(user_settings.keys()):
if key not in default_settings:
output.append("%s = %s ###" % (key, user_settings[key]))
elif user_settings[key] != default_settings[key]:
output.append("%s = %s" % (key, user_settings[key]))
else:
output.append("### %s = %s" % (key, user_settings[key]))
return '\n'.join(output)
if __name__ == '__main__':
sys.stdout.write(main() + '\n')
|
<commit_before><commit_msg>scripts: Add diffsettings --all for Django<=1.5<commit_after>#!/usr/bin/env python
# Based on Django 1.5 diffsettings
# https://github.com/django/django/blob/1.5/django/core/management/commands/diffsettings.py
import sys
def module_to_dict(module, omittable=lambda k: k.startswith('_')):
"Converts a module namespace to a Python dictionary. Used by get_settings_diff."
return dict([(k, repr(v)) for k, v in module.__dict__.items() if not omittable(k)])
def main():
# Inspired by Postfix's "postconf -n".
from django.conf import settings, global_settings
# Because settings are imported lazily, we need to explicitly load them.
settings._setup()
user_settings = module_to_dict(settings._wrapped)
default_settings = module_to_dict(global_settings)
output = []
for key in sorted(user_settings.keys()):
if key not in default_settings:
output.append("%s = %s ###" % (key, user_settings[key]))
elif user_settings[key] != default_settings[key]:
output.append("%s = %s" % (key, user_settings[key]))
else:
output.append("### %s = %s" % (key, user_settings[key]))
return '\n'.join(output)
if __name__ == '__main__':
sys.stdout.write(main() + '\n')
|
|
9f94ad31c1f94cccf4dbdaef6a7b8faa0199fa46
|
test/integration/ggrc/converters/test_export_snapshots.py
|
test/integration/ggrc/converters/test_export_snapshots.py
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for snapshot export."""
from integration.ggrc import TestCase
class TestExportSnapshots(TestCase):
"""Tests basic snapshot export."""
def setUp(self):
super(TestExportSnapshots, self).setUp()
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "GGRC",
"X-export-view": "blocks",
}
def test_simple_export(self):
"""Test simple empty snapshot export."""
search_request = [{
"object_name": "Snapshot",
"filters": {
"expression": {
"left": "child_type",
"op": {"name": "="},
"right": "Control",
},
},
}]
parsed_data = self.export_parsed_csv(search_request)["Snapshot"]
self.assertEqual(parsed_data, [])
|
Add basic snapshot export test
|
Add basic snapshot export test
|
Python
|
apache-2.0
|
AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core
|
Add basic snapshot export test
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for snapshot export."""
from integration.ggrc import TestCase
class TestExportSnapshots(TestCase):
"""Tests basic snapshot export."""
def setUp(self):
super(TestExportSnapshots, self).setUp()
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "GGRC",
"X-export-view": "blocks",
}
def test_simple_export(self):
"""Test simple empty snapshot export."""
search_request = [{
"object_name": "Snapshot",
"filters": {
"expression": {
"left": "child_type",
"op": {"name": "="},
"right": "Control",
},
},
}]
parsed_data = self.export_parsed_csv(search_request)["Snapshot"]
self.assertEqual(parsed_data, [])
|
<commit_before><commit_msg>Add basic snapshot export test<commit_after>
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for snapshot export."""
from integration.ggrc import TestCase
class TestExportSnapshots(TestCase):
"""Tests basic snapshot export."""
def setUp(self):
super(TestExportSnapshots, self).setUp()
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "GGRC",
"X-export-view": "blocks",
}
def test_simple_export(self):
"""Test simple empty snapshot export."""
search_request = [{
"object_name": "Snapshot",
"filters": {
"expression": {
"left": "child_type",
"op": {"name": "="},
"right": "Control",
},
},
}]
parsed_data = self.export_parsed_csv(search_request)["Snapshot"]
self.assertEqual(parsed_data, [])
|
Add basic snapshot export test# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for snapshot export."""
from integration.ggrc import TestCase
class TestExportSnapshots(TestCase):
"""Tests basic snapshot export."""
def setUp(self):
super(TestExportSnapshots, self).setUp()
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "GGRC",
"X-export-view": "blocks",
}
def test_simple_export(self):
"""Test simple empty snapshot export."""
search_request = [{
"object_name": "Snapshot",
"filters": {
"expression": {
"left": "child_type",
"op": {"name": "="},
"right": "Control",
},
},
}]
parsed_data = self.export_parsed_csv(search_request)["Snapshot"]
self.assertEqual(parsed_data, [])
|
<commit_before><commit_msg>Add basic snapshot export test<commit_after># Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for snapshot export."""
from integration.ggrc import TestCase
class TestExportSnapshots(TestCase):
"""Tests basic snapshot export."""
def setUp(self):
super(TestExportSnapshots, self).setUp()
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "GGRC",
"X-export-view": "blocks",
}
def test_simple_export(self):
"""Test simple empty snapshot export."""
search_request = [{
"object_name": "Snapshot",
"filters": {
"expression": {
"left": "child_type",
"op": {"name": "="},
"right": "Control",
},
},
}]
parsed_data = self.export_parsed_csv(search_request)["Snapshot"]
self.assertEqual(parsed_data, [])
|
|
0129f699427300feaf61eb4dda122c98dab32328
|
caffe2/python/operator_test/snapshot_test.py
|
caffe2/python/operator_test/snapshot_test.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
import os
import shutil
import tempfile
import unittest
class SnapshotTest(unittest.TestCase):
"""A simple test case to make sure that the snapshot behavior is correct.
"""
def testSnapshot(self):
temp_root = tempfile.mkdtemp()
net = core.Net("test_snapshot")
# Note(jiayq): I am being a bit lazy here and am using the old iter
# convention that does not have an input. Optionally change it to the
# new style if needed.
net.Iter([], "iter")
net.ConstantFill([], "value", shape=[1, 2, 3])
net.Snapshot(["iter", "value"], [],
db=os.path.join(temp_root, "test_snapshot_at_%05d"),
db_type="leveldb", every=10, absolute_path=True)
self.assertTrue(workspace.CreateNet(net))
for i in range(100):
self.assertTrue(workspace.RunNet("test_snapshot"))
for i in range(1, 10):
# Print statements are only for debugging purposes.
# print("Asserting %d" % i)
# print(os.path.join(temp_root, "test_snapshot_at_%05d" % (i * 10)))
self.assertTrue(os.path.exists(
os.path.join(temp_root, "test_snapshot_at_%05d" % (i * 10))))
# Finally, clean up.
shutil.rmtree(temp_root)
if __name__ == "__main__":
import unittest
unittest.main()
|
Add a snapshot test for Simon Layton
|
Add a snapshot test for Simon Layton
Summary: This is mainly for the OSS side checking.
Reviewed By: dzhulgakov
Differential Revision: D4238349
fbshipit-source-id: 061da3f721341c4a1249e1cc6c8c842fc505860f
|
Python
|
apache-2.0
|
davinwang/caffe2,bwasti/caffe2,bwasti/caffe2,xzturn/caffe2,pietern/caffe2,davinwang/caffe2,Yangqing/caffe2,davinwang/caffe2,Yangqing/caffe2,xzturn/caffe2,xzturn/caffe2,Yangqing/caffe2,bwasti/caffe2,xzturn/caffe2,caffe2/caffe2,xzturn/caffe2,bwasti/caffe2,Yangqing/caffe2,sf-wind/caffe2,pietern/caffe2,sf-wind/caffe2,pietern/caffe2,sf-wind/caffe2,sf-wind/caffe2,pietern/caffe2,Yangqing/caffe2,davinwang/caffe2,davinwang/caffe2,sf-wind/caffe2,pietern/caffe2,bwasti/caffe2
|
Add a snapshot test for Simon Layton
Summary: This is mainly for the OSS side checking.
Reviewed By: dzhulgakov
Differential Revision: D4238349
fbshipit-source-id: 061da3f721341c4a1249e1cc6c8c842fc505860f
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
import os
import shutil
import tempfile
import unittest
class SnapshotTest(unittest.TestCase):
"""A simple test case to make sure that the snapshot behavior is correct.
"""
def testSnapshot(self):
temp_root = tempfile.mkdtemp()
net = core.Net("test_snapshot")
# Note(jiayq): I am being a bit lazy here and am using the old iter
# convention that does not have an input. Optionally change it to the
# new style if needed.
net.Iter([], "iter")
net.ConstantFill([], "value", shape=[1, 2, 3])
net.Snapshot(["iter", "value"], [],
db=os.path.join(temp_root, "test_snapshot_at_%05d"),
db_type="leveldb", every=10, absolute_path=True)
self.assertTrue(workspace.CreateNet(net))
for i in range(100):
self.assertTrue(workspace.RunNet("test_snapshot"))
for i in range(1, 10):
# Print statements are only for debugging purposes.
# print("Asserting %d" % i)
# print(os.path.join(temp_root, "test_snapshot_at_%05d" % (i * 10)))
self.assertTrue(os.path.exists(
os.path.join(temp_root, "test_snapshot_at_%05d" % (i * 10))))
# Finally, clean up.
shutil.rmtree(temp_root)
if __name__ == "__main__":
import unittest
unittest.main()
|
<commit_before><commit_msg>Add a snapshot test for Simon Layton
Summary: This is mainly for the OSS side checking.
Reviewed By: dzhulgakov
Differential Revision: D4238349
fbshipit-source-id: 061da3f721341c4a1249e1cc6c8c842fc505860f<commit_after>
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
import os
import shutil
import tempfile
import unittest
class SnapshotTest(unittest.TestCase):
"""A simple test case to make sure that the snapshot behavior is correct.
"""
def testSnapshot(self):
temp_root = tempfile.mkdtemp()
net = core.Net("test_snapshot")
# Note(jiayq): I am being a bit lazy here and am using the old iter
# convention that does not have an input. Optionally change it to the
# new style if needed.
net.Iter([], "iter")
net.ConstantFill([], "value", shape=[1, 2, 3])
net.Snapshot(["iter", "value"], [],
db=os.path.join(temp_root, "test_snapshot_at_%05d"),
db_type="leveldb", every=10, absolute_path=True)
self.assertTrue(workspace.CreateNet(net))
for i in range(100):
self.assertTrue(workspace.RunNet("test_snapshot"))
for i in range(1, 10):
# Print statements are only for debugging purposes.
# print("Asserting %d" % i)
# print(os.path.join(temp_root, "test_snapshot_at_%05d" % (i * 10)))
self.assertTrue(os.path.exists(
os.path.join(temp_root, "test_snapshot_at_%05d" % (i * 10))))
# Finally, clean up.
shutil.rmtree(temp_root)
if __name__ == "__main__":
import unittest
unittest.main()
|
Add a snapshot test for Simon Layton
Summary: This is mainly for the OSS side checking.
Reviewed By: dzhulgakov
Differential Revision: D4238349
fbshipit-source-id: 061da3f721341c4a1249e1cc6c8c842fc505860ffrom __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
import os
import shutil
import tempfile
import unittest
class SnapshotTest(unittest.TestCase):
"""A simple test case to make sure that the snapshot behavior is correct.
"""
def testSnapshot(self):
temp_root = tempfile.mkdtemp()
net = core.Net("test_snapshot")
# Note(jiayq): I am being a bit lazy here and am using the old iter
# convention that does not have an input. Optionally change it to the
# new style if needed.
net.Iter([], "iter")
net.ConstantFill([], "value", shape=[1, 2, 3])
net.Snapshot(["iter", "value"], [],
db=os.path.join(temp_root, "test_snapshot_at_%05d"),
db_type="leveldb", every=10, absolute_path=True)
self.assertTrue(workspace.CreateNet(net))
for i in range(100):
self.assertTrue(workspace.RunNet("test_snapshot"))
for i in range(1, 10):
# Print statements are only for debugging purposes.
# print("Asserting %d" % i)
# print(os.path.join(temp_root, "test_snapshot_at_%05d" % (i * 10)))
self.assertTrue(os.path.exists(
os.path.join(temp_root, "test_snapshot_at_%05d" % (i * 10))))
# Finally, clean up.
shutil.rmtree(temp_root)
if __name__ == "__main__":
import unittest
unittest.main()
|
<commit_before><commit_msg>Add a snapshot test for Simon Layton
Summary: This is mainly for the OSS side checking.
Reviewed By: dzhulgakov
Differential Revision: D4238349
fbshipit-source-id: 061da3f721341c4a1249e1cc6c8c842fc505860f<commit_after>from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
import os
import shutil
import tempfile
import unittest
class SnapshotTest(unittest.TestCase):
"""A simple test case to make sure that the snapshot behavior is correct.
"""
def testSnapshot(self):
temp_root = tempfile.mkdtemp()
net = core.Net("test_snapshot")
# Note(jiayq): I am being a bit lazy here and am using the old iter
# convention that does not have an input. Optionally change it to the
# new style if needed.
net.Iter([], "iter")
net.ConstantFill([], "value", shape=[1, 2, 3])
net.Snapshot(["iter", "value"], [],
db=os.path.join(temp_root, "test_snapshot_at_%05d"),
db_type="leveldb", every=10, absolute_path=True)
self.assertTrue(workspace.CreateNet(net))
for i in range(100):
self.assertTrue(workspace.RunNet("test_snapshot"))
for i in range(1, 10):
# Print statements are only for debugging purposes.
# print("Asserting %d" % i)
# print(os.path.join(temp_root, "test_snapshot_at_%05d" % (i * 10)))
self.assertTrue(os.path.exists(
os.path.join(temp_root, "test_snapshot_at_%05d" % (i * 10))))
# Finally, clean up.
shutil.rmtree(temp_root)
if __name__ == "__main__":
import unittest
unittest.main()
|
|
e16054a24c7e8d5325ad4b1fa31a65681febc43c
|
test/test_se.py
|
test/test_se.py
|
# vim: foldmethod=marker
from lobster.cmssw import dataset
from lobster import fs, se
import os
import shutil
import subprocess
import tempfile
import unittest
class TestSE(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = os.path.expandvars('/hadoop/store/user/matze/')
cls.workdir = tempfile.mkdtemp(prefix=path)
os.chmod(cls.workdir, 0777)
os.makedirs(os.path.join(cls.workdir, 'spam'))
for i in range(10):
with open(os.path.join(cls.workdir, 'spam', str(i) + '.txt'), 'w') as f:
f.write('eggs')
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.workdir)
def query(self, url):
if not isinstance(url, list):
url = [url]
s = se.StorageConfiguration({'input': url})
s.activate()
with fs.default():
info = dataset.MetaInterface().get_info({'label': 'ham', 'files': 'spam/'})
assert len(info.files) == 10
class TestLocal(TestSE):
def runTest(self):
self.query('file://' + self.workdir)
class TestHadoop(TestSE):
def runTest(self):
self.query('hdfs://' + self.workdir.replace('/hadoop', '', 1))
class TestSRM(TestSE):
def runTest(self):
self.query('srm://T3_US_NotreDame' + self.workdir.replace('/hadoop', '', 1))
class TestChirp(TestSE):
def setUp(self):
fd, self.acl = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write('unix:' + os.environ['USER'] + ' rl\n')
args=['chirp_server', '-p', '9666',
'--root=' + self.workdir,
'-a', 'unix', '-A', self.acl]
self.p = subprocess.Popen(args)
def tearDown(self):
os.unlink(self.acl)
self.p.terminate()
def runTest(self):
self.query('chirp://earth.crc.nd.edu:9666')
class TestFailure(TestSE):
def runTest(self):
self.query(['file:///fuckup', 'file://' + self.workdir])
if __name__ == '__main__':
unittest.main()
|
Add tests for non-DBS data discovery.
|
Add tests for non-DBS data discovery.
|
Python
|
mit
|
matz-e/lobster,matz-e/lobster,matz-e/lobster
|
Add tests for non-DBS data discovery.
|
# vim: foldmethod=marker
from lobster.cmssw import dataset
from lobster import fs, se
import os
import shutil
import subprocess
import tempfile
import unittest
class TestSE(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = os.path.expandvars('/hadoop/store/user/matze/')
cls.workdir = tempfile.mkdtemp(prefix=path)
os.chmod(cls.workdir, 0777)
os.makedirs(os.path.join(cls.workdir, 'spam'))
for i in range(10):
with open(os.path.join(cls.workdir, 'spam', str(i) + '.txt'), 'w') as f:
f.write('eggs')
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.workdir)
def query(self, url):
if not isinstance(url, list):
url = [url]
s = se.StorageConfiguration({'input': url})
s.activate()
with fs.default():
info = dataset.MetaInterface().get_info({'label': 'ham', 'files': 'spam/'})
assert len(info.files) == 10
class TestLocal(TestSE):
def runTest(self):
self.query('file://' + self.workdir)
class TestHadoop(TestSE):
def runTest(self):
self.query('hdfs://' + self.workdir.replace('/hadoop', '', 1))
class TestSRM(TestSE):
def runTest(self):
self.query('srm://T3_US_NotreDame' + self.workdir.replace('/hadoop', '', 1))
class TestChirp(TestSE):
def setUp(self):
fd, self.acl = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write('unix:' + os.environ['USER'] + ' rl\n')
args=['chirp_server', '-p', '9666',
'--root=' + self.workdir,
'-a', 'unix', '-A', self.acl]
self.p = subprocess.Popen(args)
def tearDown(self):
os.unlink(self.acl)
self.p.terminate()
def runTest(self):
self.query('chirp://earth.crc.nd.edu:9666')
class TestFailure(TestSE):
def runTest(self):
self.query(['file:///fuckup', 'file://' + self.workdir])
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for non-DBS data discovery.<commit_after>
|
# vim: foldmethod=marker
from lobster.cmssw import dataset
from lobster import fs, se
import os
import shutil
import subprocess
import tempfile
import unittest
class TestSE(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = os.path.expandvars('/hadoop/store/user/matze/')
cls.workdir = tempfile.mkdtemp(prefix=path)
os.chmod(cls.workdir, 0777)
os.makedirs(os.path.join(cls.workdir, 'spam'))
for i in range(10):
with open(os.path.join(cls.workdir, 'spam', str(i) + '.txt'), 'w') as f:
f.write('eggs')
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.workdir)
def query(self, url):
if not isinstance(url, list):
url = [url]
s = se.StorageConfiguration({'input': url})
s.activate()
with fs.default():
info = dataset.MetaInterface().get_info({'label': 'ham', 'files': 'spam/'})
assert len(info.files) == 10
class TestLocal(TestSE):
def runTest(self):
self.query('file://' + self.workdir)
class TestHadoop(TestSE):
def runTest(self):
self.query('hdfs://' + self.workdir.replace('/hadoop', '', 1))
class TestSRM(TestSE):
def runTest(self):
self.query('srm://T3_US_NotreDame' + self.workdir.replace('/hadoop', '', 1))
class TestChirp(TestSE):
def setUp(self):
fd, self.acl = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write('unix:' + os.environ['USER'] + ' rl\n')
args=['chirp_server', '-p', '9666',
'--root=' + self.workdir,
'-a', 'unix', '-A', self.acl]
self.p = subprocess.Popen(args)
def tearDown(self):
os.unlink(self.acl)
self.p.terminate()
def runTest(self):
self.query('chirp://earth.crc.nd.edu:9666')
class TestFailure(TestSE):
def runTest(self):
self.query(['file:///fuckup', 'file://' + self.workdir])
if __name__ == '__main__':
unittest.main()
|
Add tests for non-DBS data discovery.# vim: foldmethod=marker
from lobster.cmssw import dataset
from lobster import fs, se
import os
import shutil
import subprocess
import tempfile
import unittest
class TestSE(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = os.path.expandvars('/hadoop/store/user/matze/')
cls.workdir = tempfile.mkdtemp(prefix=path)
os.chmod(cls.workdir, 0777)
os.makedirs(os.path.join(cls.workdir, 'spam'))
for i in range(10):
with open(os.path.join(cls.workdir, 'spam', str(i) + '.txt'), 'w') as f:
f.write('eggs')
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.workdir)
def query(self, url):
if not isinstance(url, list):
url = [url]
s = se.StorageConfiguration({'input': url})
s.activate()
with fs.default():
info = dataset.MetaInterface().get_info({'label': 'ham', 'files': 'spam/'})
assert len(info.files) == 10
class TestLocal(TestSE):
def runTest(self):
self.query('file://' + self.workdir)
class TestHadoop(TestSE):
def runTest(self):
self.query('hdfs://' + self.workdir.replace('/hadoop', '', 1))
class TestSRM(TestSE):
def runTest(self):
self.query('srm://T3_US_NotreDame' + self.workdir.replace('/hadoop', '', 1))
class TestChirp(TestSE):
def setUp(self):
fd, self.acl = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write('unix:' + os.environ['USER'] + ' rl\n')
args=['chirp_server', '-p', '9666',
'--root=' + self.workdir,
'-a', 'unix', '-A', self.acl]
self.p = subprocess.Popen(args)
def tearDown(self):
os.unlink(self.acl)
self.p.terminate()
def runTest(self):
self.query('chirp://earth.crc.nd.edu:9666')
class TestFailure(TestSE):
def runTest(self):
self.query(['file:///fuckup', 'file://' + self.workdir])
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for non-DBS data discovery.<commit_after># vim: foldmethod=marker
from lobster.cmssw import dataset
from lobster import fs, se
import os
import shutil
import subprocess
import tempfile
import unittest
class TestSE(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = os.path.expandvars('/hadoop/store/user/matze/')
cls.workdir = tempfile.mkdtemp(prefix=path)
os.chmod(cls.workdir, 0777)
os.makedirs(os.path.join(cls.workdir, 'spam'))
for i in range(10):
with open(os.path.join(cls.workdir, 'spam', str(i) + '.txt'), 'w') as f:
f.write('eggs')
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.workdir)
def query(self, url):
if not isinstance(url, list):
url = [url]
s = se.StorageConfiguration({'input': url})
s.activate()
with fs.default():
info = dataset.MetaInterface().get_info({'label': 'ham', 'files': 'spam/'})
assert len(info.files) == 10
class TestLocal(TestSE):
def runTest(self):
self.query('file://' + self.workdir)
class TestHadoop(TestSE):
def runTest(self):
self.query('hdfs://' + self.workdir.replace('/hadoop', '', 1))
class TestSRM(TestSE):
def runTest(self):
self.query('srm://T3_US_NotreDame' + self.workdir.replace('/hadoop', '', 1))
class TestChirp(TestSE):
def setUp(self):
fd, self.acl = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write('unix:' + os.environ['USER'] + ' rl\n')
args=['chirp_server', '-p', '9666',
'--root=' + self.workdir,
'-a', 'unix', '-A', self.acl]
self.p = subprocess.Popen(args)
def tearDown(self):
os.unlink(self.acl)
self.p.terminate()
def runTest(self):
self.query('chirp://earth.crc.nd.edu:9666')
class TestFailure(TestSE):
def runTest(self):
self.query(['file:///fuckup', 'file://' + self.workdir])
if __name__ == '__main__':
unittest.main()
|
|
cbd90060410108877d068913a4dfc681b81d6956
|
galera_consistency.py
|
galera_consistency.py
|
import optparse
import subprocess
def table_checksum(user, password, host):
args = ['/usr/bin/pt-table-checksum', '-u', user, '-p', password]
if host:
args.extend(['-h', host])
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
return (proc.return_code, out, err)
def main():
usage = "Usage: %prog [-h] [-H] username password"
parser = optparse.OptionParser(usage=usage)
parser.add_option('-H', '--host', action='store', dest='host',
default=None)
(options, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
raise SystemExit(True)
(status, _, err) = table_checksum(args[0], args[1], options.host)
if status != 0:
print "status err %s" % err
raise SystemExit(True)
print "status ok"
if __name__ == '__main__':
main()
|
Use pt-table-checksum to check for galera consistency
|
Use pt-table-checksum to check for galera consistency
|
Python
|
apache-2.0
|
jpmontez/rpc-openstack,cfarquhar/rpc-maas,miguelgrinberg/rpc-openstack,xeregin/rpc-openstack,mancdaz/rpc-openstack,nrb/rpc-openstack,stevelle/rpc-openstack,claco/rpc-openstack,BjoernT/rpc-openstack,npawelek/rpc-maas,shannonmitchell/rpc-openstack,rcbops/rpc-openstack,robb-romans/rpc-openstack,mattt416/rpc-openstack,andymcc/rpc-openstack,cloudnull/rpc-maas,busterswt/rpc-openstack,stevelle/rpc-openstack,robb-romans/rpc-openstack,stevelle/rpc-openstack,sigmavirus24/rpc-openstack,galstrom21/rpc-openstack,briancurtin/rpc-maas,hughsaunders/rpc-openstack,byronmccollum/rpc-openstack,andymcc/rpc-openstack,miguelgrinberg/rpc-openstack,cfarquhar/rpc-openstack,jacobwagner/rpc-openstack,cloudnull/rpc-openstack,npawelek/rpc-maas,cloudnull/rpc-openstack,mancdaz/rpc-openstack,prometheanfire/rpc-openstack,claco/rpc-openstack,busterswt/rpc-openstack,cloudnull/rpc-maas,claco/rpc-openstack,cfarquhar/rpc-openstack,shannonmitchell/rpc-openstack,nrb/rpc-openstack,darrenchan/rpc-openstack,cfarquhar/rpc-maas,busterswt/rpc-openstack,briancurtin/rpc-maas,sigmavirus24/rpc-openstack,andymcc/rpc-openstack,jpmontez/rpc-openstack,prometheanfire/rpc-openstack,cloudnull/rpc-maas,jpmontez/rpc-openstack,nrb/rpc-openstack,git-harry/rpc-openstack,npawelek/rpc-maas,xeregin/rpc-openstack,xeregin/rpc-openstack,major/rpc-openstack,darrenchan/rpc-openstack,miguelgrinberg/rpc-openstack,sigmavirus24/rpc-openstack,byronmccollum/rpc-openstack,BjoernT/rpc-openstack,darrenchan/rpc-openstack,byronmccollum/rpc-openstack,xeregin/rpc-openstack,mattt416/rpc-openstack,sigmavirus24/rpc-openstack,mattt416/rpc-openstack,major/rpc-openstack,jacobwagner/rpc-openstack,galstrom21/rpc-openstack,briancurtin/rpc-maas,rcbops/rpc-openstack,darrenchan/rpc-openstack,hughsaunders/rpc-openstack,git-harry/rpc-openstack,cfarquhar/rpc-maas
|
Use pt-table-checksum to check for galera consistency
|
import optparse
import subprocess
def table_checksum(user, password, host):
args = ['/usr/bin/pt-table-checksum', '-u', user, '-p', password]
if host:
args.extend(['-h', host])
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
return (proc.return_code, out, err)
def main():
usage = "Usage: %prog [-h] [-H] username password"
parser = optparse.OptionParser(usage=usage)
parser.add_option('-H', '--host', action='store', dest='host',
default=None)
(options, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
raise SystemExit(True)
(status, _, err) = table_checksum(args[0], args[1], options.host)
if status != 0:
print "status err %s" % err
raise SystemExit(True)
print "status ok"
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Use pt-table-checksum to check for galera consistency<commit_after>
|
import optparse
import subprocess
def table_checksum(user, password, host):
args = ['/usr/bin/pt-table-checksum', '-u', user, '-p', password]
if host:
args.extend(['-h', host])
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
return (proc.return_code, out, err)
def main():
usage = "Usage: %prog [-h] [-H] username password"
parser = optparse.OptionParser(usage=usage)
parser.add_option('-H', '--host', action='store', dest='host',
default=None)
(options, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
raise SystemExit(True)
(status, _, err) = table_checksum(args[0], args[1], options.host)
if status != 0:
print "status err %s" % err
raise SystemExit(True)
print "status ok"
if __name__ == '__main__':
main()
|
Use pt-table-checksum to check for galera consistencyimport optparse
import subprocess
def table_checksum(user, password, host):
args = ['/usr/bin/pt-table-checksum', '-u', user, '-p', password]
if host:
args.extend(['-h', host])
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
return (proc.return_code, out, err)
def main():
usage = "Usage: %prog [-h] [-H] username password"
parser = optparse.OptionParser(usage=usage)
parser.add_option('-H', '--host', action='store', dest='host',
default=None)
(options, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
raise SystemExit(True)
(status, _, err) = table_checksum(args[0], args[1], options.host)
if status != 0:
print "status err %s" % err
raise SystemExit(True)
print "status ok"
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Use pt-table-checksum to check for galera consistency<commit_after>import optparse
import subprocess
def table_checksum(user, password, host):
args = ['/usr/bin/pt-table-checksum', '-u', user, '-p', password]
if host:
args.extend(['-h', host])
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
return (proc.return_code, out, err)
def main():
usage = "Usage: %prog [-h] [-H] username password"
parser = optparse.OptionParser(usage=usage)
parser.add_option('-H', '--host', action='store', dest='host',
default=None)
(options, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
raise SystemExit(True)
(status, _, err) = table_checksum(args[0], args[1], options.host)
if status != 0:
print "status err %s" % err
raise SystemExit(True)
print "status ok"
if __name__ == '__main__':
main()
|
|
a948006eb02591d56f76b32b98d5bb8ace5c2600
|
indra/sources/sofia/make_sofia_tsv.py
|
indra/sources/sofia/make_sofia_tsv.py
|
import sys
import json
def make_file(ont_json_file, fname):
with open(ont_json_file, 'r') as fh:
ont_json = json.load(fh)
rows = []
for top_key, entries in ont_json.items():
for entry_key, examples in entries.items():
entry_str = '%s/%s' % (top_key, entry_key)
examples_str = '%s' % (','.join(examples))
row = '%s\t%s' % (entry_str, examples_str)
rows.append(row)
with open(fname, 'w') as fh:
fh.write('\n'.join(rows))
if __name__ == '__main__':
ont_json_file = sys.argv[1]
fname = 'sofia_ontology_examples.tsv'
make_file(ont_json_file, fname)
|
Make SOFIA TSV for ontology mapping
|
Make SOFIA TSV for ontology mapping
|
Python
|
bsd-2-clause
|
pvtodorov/indra,johnbachman/indra,sorgerlab/indra,sorgerlab/belpy,pvtodorov/indra,johnbachman/indra,sorgerlab/belpy,bgyori/indra,bgyori/indra,johnbachman/belpy,johnbachman/indra,bgyori/indra,sorgerlab/indra,sorgerlab/belpy,pvtodorov/indra,sorgerlab/indra,johnbachman/belpy,johnbachman/belpy,pvtodorov/indra
|
Make SOFIA TSV for ontology mapping
|
import sys
import json
def make_file(ont_json_file, fname):
with open(ont_json_file, 'r') as fh:
ont_json = json.load(fh)
rows = []
for top_key, entries in ont_json.items():
for entry_key, examples in entries.items():
entry_str = '%s/%s' % (top_key, entry_key)
examples_str = '%s' % (','.join(examples))
row = '%s\t%s' % (entry_str, examples_str)
rows.append(row)
with open(fname, 'w') as fh:
fh.write('\n'.join(rows))
if __name__ == '__main__':
ont_json_file = sys.argv[1]
fname = 'sofia_ontology_examples.tsv'
make_file(ont_json_file, fname)
|
<commit_before><commit_msg>Make SOFIA TSV for ontology mapping<commit_after>
|
import sys
import json
def make_file(ont_json_file, fname):
with open(ont_json_file, 'r') as fh:
ont_json = json.load(fh)
rows = []
for top_key, entries in ont_json.items():
for entry_key, examples in entries.items():
entry_str = '%s/%s' % (top_key, entry_key)
examples_str = '%s' % (','.join(examples))
row = '%s\t%s' % (entry_str, examples_str)
rows.append(row)
with open(fname, 'w') as fh:
fh.write('\n'.join(rows))
if __name__ == '__main__':
ont_json_file = sys.argv[1]
fname = 'sofia_ontology_examples.tsv'
make_file(ont_json_file, fname)
|
Make SOFIA TSV for ontology mappingimport sys
import json
def make_file(ont_json_file, fname):
with open(ont_json_file, 'r') as fh:
ont_json = json.load(fh)
rows = []
for top_key, entries in ont_json.items():
for entry_key, examples in entries.items():
entry_str = '%s/%s' % (top_key, entry_key)
examples_str = '%s' % (','.join(examples))
row = '%s\t%s' % (entry_str, examples_str)
rows.append(row)
with open(fname, 'w') as fh:
fh.write('\n'.join(rows))
if __name__ == '__main__':
ont_json_file = sys.argv[1]
fname = 'sofia_ontology_examples.tsv'
make_file(ont_json_file, fname)
|
<commit_before><commit_msg>Make SOFIA TSV for ontology mapping<commit_after>import sys
import json
def make_file(ont_json_file, fname):
with open(ont_json_file, 'r') as fh:
ont_json = json.load(fh)
rows = []
for top_key, entries in ont_json.items():
for entry_key, examples in entries.items():
entry_str = '%s/%s' % (top_key, entry_key)
examples_str = '%s' % (','.join(examples))
row = '%s\t%s' % (entry_str, examples_str)
rows.append(row)
with open(fname, 'w') as fh:
fh.write('\n'.join(rows))
if __name__ == '__main__':
ont_json_file = sys.argv[1]
fname = 'sofia_ontology_examples.tsv'
make_file(ont_json_file, fname)
|
|
1ef08f6d940f47033df003dd641e61e718b2ca57
|
src/tests/test_beeper.py
|
src/tests/test_beeper.py
|
from unittest import mock
from geometry_msgs.msg import PoseStamped
import rospy
from beeper import Beeper
class TestBeeper(object):
def setup(self):
rospy.init_node("beeper_test", anonymous=True)
self.beeper = Beeper()
self.pose = PoseStamped()
@mock.patch.object(Beeper, "beep", autospec=True)
def test_beep(self, mock_beep):
self.beeper.beep()
mock_beep.assert_called_with(self.beeper)
|
Add test file for beeper logic
|
Add test file for beeper logic
|
Python
|
mit
|
masasin/spirit,masasin/spirit
|
Add test file for beeper logic
|
from unittest import mock
from geometry_msgs.msg import PoseStamped
import rospy
from beeper import Beeper
class TestBeeper(object):
def setup(self):
rospy.init_node("beeper_test", anonymous=True)
self.beeper = Beeper()
self.pose = PoseStamped()
@mock.patch.object(Beeper, "beep", autospec=True)
def test_beep(self, mock_beep):
self.beeper.beep()
mock_beep.assert_called_with(self.beeper)
|
<commit_before><commit_msg>Add test file for beeper logic<commit_after>
|
from unittest import mock
from geometry_msgs.msg import PoseStamped
import rospy
from beeper import Beeper
class TestBeeper(object):
def setup(self):
rospy.init_node("beeper_test", anonymous=True)
self.beeper = Beeper()
self.pose = PoseStamped()
@mock.patch.object(Beeper, "beep", autospec=True)
def test_beep(self, mock_beep):
self.beeper.beep()
mock_beep.assert_called_with(self.beeper)
|
Add test file for beeper logicfrom unittest import mock
from geometry_msgs.msg import PoseStamped
import rospy
from beeper import Beeper
class TestBeeper(object):
def setup(self):
rospy.init_node("beeper_test", anonymous=True)
self.beeper = Beeper()
self.pose = PoseStamped()
@mock.patch.object(Beeper, "beep", autospec=True)
def test_beep(self, mock_beep):
self.beeper.beep()
mock_beep.assert_called_with(self.beeper)
|
<commit_before><commit_msg>Add test file for beeper logic<commit_after>from unittest import mock
from geometry_msgs.msg import PoseStamped
import rospy
from beeper import Beeper
class TestBeeper(object):
def setup(self):
rospy.init_node("beeper_test", anonymous=True)
self.beeper = Beeper()
self.pose = PoseStamped()
@mock.patch.object(Beeper, "beep", autospec=True)
def test_beep(self, mock_beep):
self.beeper.beep()
mock_beep.assert_called_with(self.beeper)
|
|
af5840c8f223d334997440878d0a7e7eaeb19be4
|
test/alltests.py
|
test/alltests.py
|
#!/usr/bin/env python
"""Build the documentation, and run the JavaScript tests."""
import sphinx.cmdline
import os
import sys
# Build the documentation for vcwebedit.
source_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'doc')
build_dir = os.path.join(source_dir, '_build')
# Build html, warning treated as errors, output the doctrees to
# build_dir/doctrees.
sphinx_argv = ['sphinx-build',
'-b', 'html',
'-W',
'-d', os.path.join(build_dir, 'doctrees'),
source_dir, build_dir]
print("Building vcwebedit documentation with:\n")
print(' '.join(sphinx_argv))
build_result = sphinx.cmdline.main(sphinx_argv)
if build_result:
sys.stderr.write("sphinx-build failed.\n")
sys.exit(build_result)
else:
print("sphinx-build succeeded!")
# Exit success
sys.exit(0)
|
Add script to run tests.
|
Add script to run tests.
|
Python
|
apache-2.0
|
thewtex/vcwebedit,thewtex/vcwebedit
|
Add script to run tests.
|
#!/usr/bin/env python
"""Build the documentation, and run the JavaScript tests."""
import sphinx.cmdline
import os
import sys
# Build the documentation for vcwebedit.
source_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'doc')
build_dir = os.path.join(source_dir, '_build')
# Build html, warning treated as errors, output the doctrees to
# build_dir/doctrees.
sphinx_argv = ['sphinx-build',
'-b', 'html',
'-W',
'-d', os.path.join(build_dir, 'doctrees'),
source_dir, build_dir]
print("Building vcwebedit documentation with:\n")
print(' '.join(sphinx_argv))
build_result = sphinx.cmdline.main(sphinx_argv)
if build_result:
sys.stderr.write("sphinx-build failed.\n")
sys.exit(build_result)
else:
print("sphinx-build succeeded!")
# Exit success
sys.exit(0)
|
<commit_before><commit_msg>Add script to run tests.<commit_after>
|
#!/usr/bin/env python
"""Build the documentation, and run the JavaScript tests."""
import sphinx.cmdline
import os
import sys
# Build the documentation for vcwebedit.
source_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'doc')
build_dir = os.path.join(source_dir, '_build')
# Build html, warning treated as errors, output the doctrees to
# build_dir/doctrees.
sphinx_argv = ['sphinx-build',
'-b', 'html',
'-W',
'-d', os.path.join(build_dir, 'doctrees'),
source_dir, build_dir]
print("Building vcwebedit documentation with:\n")
print(' '.join(sphinx_argv))
build_result = sphinx.cmdline.main(sphinx_argv)
if build_result:
sys.stderr.write("sphinx-build failed.\n")
sys.exit(build_result)
else:
print("sphinx-build succeeded!")
# Exit success
sys.exit(0)
|
Add script to run tests.#!/usr/bin/env python
"""Build the documentation, and run the JavaScript tests."""
import sphinx.cmdline
import os
import sys
# Build the documentation for vcwebedit.
source_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'doc')
build_dir = os.path.join(source_dir, '_build')
# Build html, warning treated as errors, output the doctrees to
# build_dir/doctrees.
sphinx_argv = ['sphinx-build',
'-b', 'html',
'-W',
'-d', os.path.join(build_dir, 'doctrees'),
source_dir, build_dir]
print("Building vcwebedit documentation with:\n")
print(' '.join(sphinx_argv))
build_result = sphinx.cmdline.main(sphinx_argv)
if build_result:
sys.stderr.write("sphinx-build failed.\n")
sys.exit(build_result)
else:
print("sphinx-build succeeded!")
# Exit success
sys.exit(0)
|
<commit_before><commit_msg>Add script to run tests.<commit_after>#!/usr/bin/env python
"""Build the documentation, and run the JavaScript tests."""
import sphinx.cmdline
import os
import sys
# Build the documentation for vcwebedit.
source_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'doc')
build_dir = os.path.join(source_dir, '_build')
# Build html, warning treated as errors, output the doctrees to
# build_dir/doctrees.
sphinx_argv = ['sphinx-build',
'-b', 'html',
'-W',
'-d', os.path.join(build_dir, 'doctrees'),
source_dir, build_dir]
print("Building vcwebedit documentation with:\n")
print(' '.join(sphinx_argv))
build_result = sphinx.cmdline.main(sphinx_argv)
if build_result:
sys.stderr.write("sphinx-build failed.\n")
sys.exit(build_result)
else:
print("sphinx-build succeeded!")
# Exit success
sys.exit(0)
|
|
72169b77a1d5cf92aa906b616b93f702b3ce55a1
|
spectra.py
|
spectra.py
|
import sys
import numpy as np
from realtime import *
class Spectra(object):
"""Return a spectra object that can plot the total absorption spectrum or
circular dichroism spectra for a given system. Can accept x,y,and z RT-TDDFT
log files"""
def __init__(self,x=None,y=None,z=None,s='abs',d=150):
self.spectra_type = s
self.damp_const = d
# Load all the RealTime objects
self.directions = []
if x:
self.x = RealTime(x)
self.directions.append('x')
if y:
self.y = RealTime(y)
self.directions.append('y')
if z:
self.z = RealTime(z)
self.directions.append('z')
# Enforce consistent data lengths
self.align_data
# Do the isotropic fourier transform
for q in self.directions:
self.__dict__[q].fourier_tx(q,self.spectra_type,self.damp_const)
def plot(self,xlim=[0,15],ylim=None):
toEV = 27.2114
import matplotlib.pyplot as plt
ax = plt.subplot(111)
S = np.zeros_like(self.__dict__[self.directions[0]].fourier)
for q in self.directions:
frequency = self.__dict__[q].frequency
S += self.__dict__[q].fourier
ax.plot(frequency*toEV,S)
ax.set_xlim(xlim)
if not ylim:
if self.spectra_type == 'abs':
ax.set_ylim([0,5])
elif self.spectra_type == 'ecd':
ax.set_ylim([-5,5])
plt.show()
def align_data(self):
lengths = []
if self.x:
lengths.append(self.x.min_length)
if self.y:
lengths.append(self.y.min_length)
if self.z:
lengths.append(self.z.min_length)
min_length = min(lengths)
if self.x:
self.x.truncate(min_length)
if self.y:
self.y.truncate(min_length)
if self.z:
self.z.truncate(min_length)
if __name__ == '__main__':
spectra = Spectra(x='cd',s='abs',d=1000)
spectra.plot()
|
Add Spectra class for plotting data from RealTime objects
|
Add Spectra class for plotting data from RealTime objects
|
Python
|
mit
|
wavefunction91/gaussian_realtime_parse,jjgoings/gaussian_realtime_parse
|
Add Spectra class for plotting data from RealTime objects
|
import sys
import numpy as np
from realtime import *
class Spectra(object):
"""Return a spectra object that can plot the total absorption spectrum or
circular dichroism spectra for a given system. Can accept x,y,and z RT-TDDFT
log files"""
def __init__(self,x=None,y=None,z=None,s='abs',d=150):
self.spectra_type = s
self.damp_const = d
# Load all the RealTime objects
self.directions = []
if x:
self.x = RealTime(x)
self.directions.append('x')
if y:
self.y = RealTime(y)
self.directions.append('y')
if z:
self.z = RealTime(z)
self.directions.append('z')
# Enforce consistent data lengths
self.align_data
# Do the isotropic fourier transform
for q in self.directions:
self.__dict__[q].fourier_tx(q,self.spectra_type,self.damp_const)
def plot(self,xlim=[0,15],ylim=None):
toEV = 27.2114
import matplotlib.pyplot as plt
ax = plt.subplot(111)
S = np.zeros_like(self.__dict__[self.directions[0]].fourier)
for q in self.directions:
frequency = self.__dict__[q].frequency
S += self.__dict__[q].fourier
ax.plot(frequency*toEV,S)
ax.set_xlim(xlim)
if not ylim:
if self.spectra_type == 'abs':
ax.set_ylim([0,5])
elif self.spectra_type == 'ecd':
ax.set_ylim([-5,5])
plt.show()
def align_data(self):
lengths = []
if self.x:
lengths.append(self.x.min_length)
if self.y:
lengths.append(self.y.min_length)
if self.z:
lengths.append(self.z.min_length)
min_length = min(lengths)
if self.x:
self.x.truncate(min_length)
if self.y:
self.y.truncate(min_length)
if self.z:
self.z.truncate(min_length)
if __name__ == '__main__':
spectra = Spectra(x='cd',s='abs',d=1000)
spectra.plot()
|
<commit_before><commit_msg>Add Spectra class for plotting data from RealTime objects<commit_after>
|
import sys
import numpy as np
from realtime import *
class Spectra(object):
"""Return a spectra object that can plot the total absorption spectrum or
circular dichroism spectra for a given system. Can accept x,y,and z RT-TDDFT
log files"""
def __init__(self,x=None,y=None,z=None,s='abs',d=150):
self.spectra_type = s
self.damp_const = d
# Load all the RealTime objects
self.directions = []
if x:
self.x = RealTime(x)
self.directions.append('x')
if y:
self.y = RealTime(y)
self.directions.append('y')
if z:
self.z = RealTime(z)
self.directions.append('z')
# Enforce consistent data lengths
self.align_data
# Do the isotropic fourier transform
for q in self.directions:
self.__dict__[q].fourier_tx(q,self.spectra_type,self.damp_const)
def plot(self,xlim=[0,15],ylim=None):
toEV = 27.2114
import matplotlib.pyplot as plt
ax = plt.subplot(111)
S = np.zeros_like(self.__dict__[self.directions[0]].fourier)
for q in self.directions:
frequency = self.__dict__[q].frequency
S += self.__dict__[q].fourier
ax.plot(frequency*toEV,S)
ax.set_xlim(xlim)
if not ylim:
if self.spectra_type == 'abs':
ax.set_ylim([0,5])
elif self.spectra_type == 'ecd':
ax.set_ylim([-5,5])
plt.show()
def align_data(self):
lengths = []
if self.x:
lengths.append(self.x.min_length)
if self.y:
lengths.append(self.y.min_length)
if self.z:
lengths.append(self.z.min_length)
min_length = min(lengths)
if self.x:
self.x.truncate(min_length)
if self.y:
self.y.truncate(min_length)
if self.z:
self.z.truncate(min_length)
if __name__ == '__main__':
spectra = Spectra(x='cd',s='abs',d=1000)
spectra.plot()
|
Add Spectra class for plotting data from RealTime objectsimport sys
import numpy as np
from realtime import *
class Spectra(object):
"""Return a spectra object that can plot the total absorption spectrum or
circular dichroism spectra for a given system. Can accept x,y,and z RT-TDDFT
log files"""
def __init__(self,x=None,y=None,z=None,s='abs',d=150):
self.spectra_type = s
self.damp_const = d
# Load all the RealTime objects
self.directions = []
if x:
self.x = RealTime(x)
self.directions.append('x')
if y:
self.y = RealTime(y)
self.directions.append('y')
if z:
self.z = RealTime(z)
self.directions.append('z')
# Enforce consistent data lengths
self.align_data
# Do the isotropic fourier transform
for q in self.directions:
self.__dict__[q].fourier_tx(q,self.spectra_type,self.damp_const)
def plot(self,xlim=[0,15],ylim=None):
toEV = 27.2114
import matplotlib.pyplot as plt
ax = plt.subplot(111)
S = np.zeros_like(self.__dict__[self.directions[0]].fourier)
for q in self.directions:
frequency = self.__dict__[q].frequency
S += self.__dict__[q].fourier
ax.plot(frequency*toEV,S)
ax.set_xlim(xlim)
if not ylim:
if self.spectra_type == 'abs':
ax.set_ylim([0,5])
elif self.spectra_type == 'ecd':
ax.set_ylim([-5,5])
plt.show()
def align_data(self):
lengths = []
if self.x:
lengths.append(self.x.min_length)
if self.y:
lengths.append(self.y.min_length)
if self.z:
lengths.append(self.z.min_length)
min_length = min(lengths)
if self.x:
self.x.truncate(min_length)
if self.y:
self.y.truncate(min_length)
if self.z:
self.z.truncate(min_length)
if __name__ == '__main__':
spectra = Spectra(x='cd',s='abs',d=1000)
spectra.plot()
|
<commit_before><commit_msg>Add Spectra class for plotting data from RealTime objects<commit_after>import sys
import numpy as np
from realtime import *
class Spectra(object):
"""Return a spectra object that can plot the total absorption spectrum or
circular dichroism spectra for a given system. Can accept x,y,and z RT-TDDFT
log files"""
def __init__(self,x=None,y=None,z=None,s='abs',d=150):
self.spectra_type = s
self.damp_const = d
# Load all the RealTime objects
self.directions = []
if x:
self.x = RealTime(x)
self.directions.append('x')
if y:
self.y = RealTime(y)
self.directions.append('y')
if z:
self.z = RealTime(z)
self.directions.append('z')
# Enforce consistent data lengths
self.align_data
# Do the isotropic fourier transform
for q in self.directions:
self.__dict__[q].fourier_tx(q,self.spectra_type,self.damp_const)
def plot(self,xlim=[0,15],ylim=None):
toEV = 27.2114
import matplotlib.pyplot as plt
ax = plt.subplot(111)
S = np.zeros_like(self.__dict__[self.directions[0]].fourier)
for q in self.directions:
frequency = self.__dict__[q].frequency
S += self.__dict__[q].fourier
ax.plot(frequency*toEV,S)
ax.set_xlim(xlim)
if not ylim:
if self.spectra_type == 'abs':
ax.set_ylim([0,5])
elif self.spectra_type == 'ecd':
ax.set_ylim([-5,5])
plt.show()
def align_data(self):
lengths = []
if self.x:
lengths.append(self.x.min_length)
if self.y:
lengths.append(self.y.min_length)
if self.z:
lengths.append(self.z.min_length)
min_length = min(lengths)
if self.x:
self.x.truncate(min_length)
if self.y:
self.y.truncate(min_length)
if self.z:
self.z.truncate(min_length)
if __name__ == '__main__':
spectra = Spectra(x='cd',s='abs',d=1000)
spectra.plot()
|
|
e08ac069b2bee1afaaf022a884dd098dc7daac07
|
parseSubtitles.py
|
parseSubtitles.py
|
# the words "subtitles" and "captions" are used interchangeably
import re
def getVtt(fname):
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
p = re.compile('\d{2}:\d{2}:\d{2}.\d{3} --> \d{2}:\d{2}:\d{2}.\d{3}')
pureText = ""
pay_attention = False
for line in content:
if p.match(line) != None:
pay_attention = True # gets past the metadata in the first few lines
if pay_attention:
#do whatever you want
#print (line)
if p.match(line) != None:
pass
elif line == '':
pass
else:
pureText += line + " "
return pureText
# references:
# http://stackoverflow.com/questions/3277503/how-do-i-read-a-file-line-by-line-into-a-list
# http://stackoverflow.com/questions/11665582/regex-for-timestamp
# http://stackoverflow.com/questions/27805919/how-to-only-read-lines-in-a-text-file-after-a-certain-string-using-python
|
Store perfectly working parser for one of the subtitle formats.
|
Store perfectly working parser for one of the subtitle formats.
|
Python
|
mit
|
aktivkohle/youtube-curation,aktivkohle/youtube-curation,aktivkohle/youtube-curation,aktivkohle/youtube-curation
|
Store perfectly working parser for one of the subtitle formats.
|
# the words "subtitles" and "captions" are used interchangeably
import re
def getVtt(fname):
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
p = re.compile('\d{2}:\d{2}:\d{2}.\d{3} --> \d{2}:\d{2}:\d{2}.\d{3}')
pureText = ""
pay_attention = False
for line in content:
if p.match(line) != None:
pay_attention = True # gets past the metadata in the first few lines
if pay_attention:
#do whatever you want
#print (line)
if p.match(line) != None:
pass
elif line == '':
pass
else:
pureText += line + " "
return pureText
# references:
# http://stackoverflow.com/questions/3277503/how-do-i-read-a-file-line-by-line-into-a-list
# http://stackoverflow.com/questions/11665582/regex-for-timestamp
# http://stackoverflow.com/questions/27805919/how-to-only-read-lines-in-a-text-file-after-a-certain-string-using-python
|
<commit_before><commit_msg>Store perfectly working parser for one of the subtitle formats.<commit_after>
|
# the words "subtitles" and "captions" are used interchangeably
import re
def getVtt(fname):
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
p = re.compile('\d{2}:\d{2}:\d{2}.\d{3} --> \d{2}:\d{2}:\d{2}.\d{3}')
pureText = ""
pay_attention = False
for line in content:
if p.match(line) != None:
pay_attention = True # gets past the metadata in the first few lines
if pay_attention:
#do whatever you want
#print (line)
if p.match(line) != None:
pass
elif line == '':
pass
else:
pureText += line + " "
return pureText
# references:
# http://stackoverflow.com/questions/3277503/how-do-i-read-a-file-line-by-line-into-a-list
# http://stackoverflow.com/questions/11665582/regex-for-timestamp
# http://stackoverflow.com/questions/27805919/how-to-only-read-lines-in-a-text-file-after-a-certain-string-using-python
|
Store perfectly working parser for one of the subtitle formats.# the words "subtitles" and "captions" are used interchangeably
import re
def getVtt(fname):
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
p = re.compile('\d{2}:\d{2}:\d{2}.\d{3} --> \d{2}:\d{2}:\d{2}.\d{3}')
pureText = ""
pay_attention = False
for line in content:
if p.match(line) != None:
pay_attention = True # gets past the metadata in the first few lines
if pay_attention:
#do whatever you want
#print (line)
if p.match(line) != None:
pass
elif line == '':
pass
else:
pureText += line + " "
return pureText
# references:
# http://stackoverflow.com/questions/3277503/how-do-i-read-a-file-line-by-line-into-a-list
# http://stackoverflow.com/questions/11665582/regex-for-timestamp
# http://stackoverflow.com/questions/27805919/how-to-only-read-lines-in-a-text-file-after-a-certain-string-using-python
|
<commit_before><commit_msg>Store perfectly working parser for one of the subtitle formats.<commit_after># the words "subtitles" and "captions" are used interchangeably
import re
def getVtt(fname):
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
p = re.compile('\d{2}:\d{2}:\d{2}.\d{3} --> \d{2}:\d{2}:\d{2}.\d{3}')
pureText = ""
pay_attention = False
for line in content:
if p.match(line) != None:
pay_attention = True # gets past the metadata in the first few lines
if pay_attention:
#do whatever you want
#print (line)
if p.match(line) != None:
pass
elif line == '':
pass
else:
pureText += line + " "
return pureText
# references:
# http://stackoverflow.com/questions/3277503/how-do-i-read-a-file-line-by-line-into-a-list
# http://stackoverflow.com/questions/11665582/regex-for-timestamp
# http://stackoverflow.com/questions/27805919/how-to-only-read-lines-in-a-text-file-after-a-certain-string-using-python
|
|
7b4d626e9366ebe6e31d9835ce05f397056d5817
|
tile_generator/tile_unittest.py
|
tile_generator/tile_unittest.py
|
# tile-generator
#
# Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from click.testing import CliRunner
import os
import shutil
import tempfile
from . import tile
class TestTileInit(unittest.TestCase):
def test_tile_init_works(self):
tmpdir = tempfile.mkdtemp()
try:
tiledir = os.path.join(tmpdir, 'my-tile')
runner = CliRunner()
result = runner.invoke(tile.init_cmd, [tiledir])
self.assertEqual(result.exit_code, 0)
self.assertTrue(os.path.isfile(os.path.join(tiledir, 'tile.yml')))
finally:
shutil.rmtree(tmpdir)
|
Add test for the "tile init" command.
|
Add test for the "tile init" command.
|
Python
|
apache-2.0
|
alex-slynko/tile-generator,cf-platform-eng/tile-generator,cf-platform-eng/tile-generator,alex-slynko/tile-generator,cf-platform-eng/tile-generator,alex-slynko/tile-generator,cf-platform-eng/tile-generator,alex-slynko/tile-generator
|
Add test for the "tile init" command.
|
# tile-generator
#
# Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from click.testing import CliRunner
import os
import shutil
import tempfile
from . import tile
class TestTileInit(unittest.TestCase):
def test_tile_init_works(self):
tmpdir = tempfile.mkdtemp()
try:
tiledir = os.path.join(tmpdir, 'my-tile')
runner = CliRunner()
result = runner.invoke(tile.init_cmd, [tiledir])
self.assertEqual(result.exit_code, 0)
self.assertTrue(os.path.isfile(os.path.join(tiledir, 'tile.yml')))
finally:
shutil.rmtree(tmpdir)
|
<commit_before><commit_msg>Add test for the "tile init" command.<commit_after>
|
# tile-generator
#
# Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from click.testing import CliRunner
import os
import shutil
import tempfile
from . import tile
class TestTileInit(unittest.TestCase):
def test_tile_init_works(self):
tmpdir = tempfile.mkdtemp()
try:
tiledir = os.path.join(tmpdir, 'my-tile')
runner = CliRunner()
result = runner.invoke(tile.init_cmd, [tiledir])
self.assertEqual(result.exit_code, 0)
self.assertTrue(os.path.isfile(os.path.join(tiledir, 'tile.yml')))
finally:
shutil.rmtree(tmpdir)
|
Add test for the "tile init" command.# tile-generator
#
# Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from click.testing import CliRunner
import os
import shutil
import tempfile
from . import tile
class TestTileInit(unittest.TestCase):
def test_tile_init_works(self):
tmpdir = tempfile.mkdtemp()
try:
tiledir = os.path.join(tmpdir, 'my-tile')
runner = CliRunner()
result = runner.invoke(tile.init_cmd, [tiledir])
self.assertEqual(result.exit_code, 0)
self.assertTrue(os.path.isfile(os.path.join(tiledir, 'tile.yml')))
finally:
shutil.rmtree(tmpdir)
|
<commit_before><commit_msg>Add test for the "tile init" command.<commit_after># tile-generator
#
# Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from click.testing import CliRunner
import os
import shutil
import tempfile
from . import tile
class TestTileInit(unittest.TestCase):
def test_tile_init_works(self):
tmpdir = tempfile.mkdtemp()
try:
tiledir = os.path.join(tmpdir, 'my-tile')
runner = CliRunner()
result = runner.invoke(tile.init_cmd, [tiledir])
self.assertEqual(result.exit_code, 0)
self.assertTrue(os.path.isfile(os.path.join(tiledir, 'tile.yml')))
finally:
shutil.rmtree(tmpdir)
|
|
6e8bade6225d3a7108cc679f1fcbe371f7241278
|
councilmatic_core/migrations/0052_convert_last_action_date_to_datefield.py
|
councilmatic_core/migrations/0052_convert_last_action_date_to_datefield.py
|
# Generated by Django 2.2.16 on 2020-09-15 15:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('councilmatic_core', '0051_bill_last_action_date'),
]
operations = [
migrations.AlterField(
model_name='bill',
name='last_action_date',
field=models.DateField(blank=True, null=True),
),
]
|
Add migration for last_action_date field change
|
Add migration for last_action_date field change
|
Python
|
mit
|
datamade/django-councilmatic,datamade/django-councilmatic,datamade/django-councilmatic,datamade/django-councilmatic
|
Add migration for last_action_date field change
|
# Generated by Django 2.2.16 on 2020-09-15 15:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('councilmatic_core', '0051_bill_last_action_date'),
]
operations = [
migrations.AlterField(
model_name='bill',
name='last_action_date',
field=models.DateField(blank=True, null=True),
),
]
|
<commit_before><commit_msg>Add migration for last_action_date field change<commit_after>
|
# Generated by Django 2.2.16 on 2020-09-15 15:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('councilmatic_core', '0051_bill_last_action_date'),
]
operations = [
migrations.AlterField(
model_name='bill',
name='last_action_date',
field=models.DateField(blank=True, null=True),
),
]
|
Add migration for last_action_date field change# Generated by Django 2.2.16 on 2020-09-15 15:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('councilmatic_core', '0051_bill_last_action_date'),
]
operations = [
migrations.AlterField(
model_name='bill',
name='last_action_date',
field=models.DateField(blank=True, null=True),
),
]
|
<commit_before><commit_msg>Add migration for last_action_date field change<commit_after># Generated by Django 2.2.16 on 2020-09-15 15:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('councilmatic_core', '0051_bill_last_action_date'),
]
operations = [
migrations.AlterField(
model_name='bill',
name='last_action_date',
field=models.DateField(blank=True, null=True),
),
]
|
|
a486899294faa18cf8d7c51dc12a2fe87b584804
|
salt/_modules/minealiases.py
|
salt/_modules/minealiases.py
|
def psf_internal(cidr):
return __salt__["ip_picker.ip_addrs"](cidr=cidr)
def pypi_internal(cidr):
return __salt__["ip_picker.ip_addrs"](cidr=cidr)
|
Add some mine alias functions
|
Add some mine alias functions
|
Python
|
mit
|
dstufft/psf-salt,zooba/psf-salt,dstufft/psf-salt,zware/psf-salt,caktus/psf-salt,caktus/psf-salt,python/psf-salt,caktus/psf-salt,zooba/psf-salt,zooba/psf-salt,python/psf-salt,python/psf-salt,zware/psf-salt,zware/psf-salt,zware/psf-salt,dstufft/psf-salt,python/psf-salt
|
Add some mine alias functions
|
def psf_internal(cidr):
return __salt__["ip_picker.ip_addrs"](cidr=cidr)
def pypi_internal(cidr):
return __salt__["ip_picker.ip_addrs"](cidr=cidr)
|
<commit_before><commit_msg>Add some mine alias functions<commit_after>
|
def psf_internal(cidr):
return __salt__["ip_picker.ip_addrs"](cidr=cidr)
def pypi_internal(cidr):
return __salt__["ip_picker.ip_addrs"](cidr=cidr)
|
Add some mine alias functionsdef psf_internal(cidr):
return __salt__["ip_picker.ip_addrs"](cidr=cidr)
def pypi_internal(cidr):
return __salt__["ip_picker.ip_addrs"](cidr=cidr)
|
<commit_before><commit_msg>Add some mine alias functions<commit_after>def psf_internal(cidr):
return __salt__["ip_picker.ip_addrs"](cidr=cidr)
def pypi_internal(cidr):
return __salt__["ip_picker.ip_addrs"](cidr=cidr)
|
|
3451b122a7b30c1aa43b56833051c6e1ae9c5874
|
github_follow.py
|
github_follow.py
|
#!/usr/bin/env python
from __future__ import absolute_import, print_function
import getpass
import requests
from requests.auth import HTTPBasicAuth
from hs_oauth import get_access_token, get_batches, get_people_in_a_batch
def follow_user(username, auth):
url = 'https://api.github.com/user/following/%s' % username
response = requests.put(url, auth=HTTPBasicAuth(*auth))
if not response.ok:
print('Failed to follow %s' % username)
else:
print('You are now following %s' % username)
def main():
# Authenticate with HS.
hs_username = raw_input('HS Email: ')
hs_password = getpass.getpass('HS Password: ')
gh_username = raw_input('GH Email: ')
gh_password = getpass.getpass('GH Password: ')
print('Authenticating as %s' % hs_username)
access_token, _ = get_access_token(username=hs_username, password=hs_password)
batches = get_batches(access_token)[::-1]
for batch in batches:
print('%s - %s' % (batch['id'], batch['name']))
selected_id = raw_input('Enter batch id, for the batch you wish to follow: ').strip()
batch = [b for b in batches if str(b['id']) == selected_id]
if len(batch) == 1:
for hacker in get_people_in_a_batch(batch[0]['id'], access_token):
gh_name = hacker['github']
if gh_name is not None:
follow_user(gh_name, (gh_username, gh_password))
else:
print('Invalid batch selected.')
if __name__ == '__main__':
main()
|
Add script for batchwise GitHub follow.
|
Add script for batchwise GitHub follow.
|
Python
|
unlicense
|
punchagan/hs-twitter-lists
|
Add script for batchwise GitHub follow.
|
#!/usr/bin/env python
from __future__ import absolute_import, print_function
import getpass
import requests
from requests.auth import HTTPBasicAuth
from hs_oauth import get_access_token, get_batches, get_people_in_a_batch
def follow_user(username, auth):
url = 'https://api.github.com/user/following/%s' % username
response = requests.put(url, auth=HTTPBasicAuth(*auth))
if not response.ok:
print('Failed to follow %s' % username)
else:
print('You are now following %s' % username)
def main():
# Authenticate with HS.
hs_username = raw_input('HS Email: ')
hs_password = getpass.getpass('HS Password: ')
gh_username = raw_input('GH Email: ')
gh_password = getpass.getpass('GH Password: ')
print('Authenticating as %s' % hs_username)
access_token, _ = get_access_token(username=hs_username, password=hs_password)
batches = get_batches(access_token)[::-1]
for batch in batches:
print('%s - %s' % (batch['id'], batch['name']))
selected_id = raw_input('Enter batch id, for the batch you wish to follow: ').strip()
batch = [b for b in batches if str(b['id']) == selected_id]
if len(batch) == 1:
for hacker in get_people_in_a_batch(batch[0]['id'], access_token):
gh_name = hacker['github']
if gh_name is not None:
follow_user(gh_name, (gh_username, gh_password))
else:
print('Invalid batch selected.')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for batchwise GitHub follow.<commit_after>
|
#!/usr/bin/env python
from __future__ import absolute_import, print_function
import getpass
import requests
from requests.auth import HTTPBasicAuth
from hs_oauth import get_access_token, get_batches, get_people_in_a_batch
def follow_user(username, auth):
url = 'https://api.github.com/user/following/%s' % username
response = requests.put(url, auth=HTTPBasicAuth(*auth))
if not response.ok:
print('Failed to follow %s' % username)
else:
print('You are now following %s' % username)
def main():
# Authenticate with HS.
hs_username = raw_input('HS Email: ')
hs_password = getpass.getpass('HS Password: ')
gh_username = raw_input('GH Email: ')
gh_password = getpass.getpass('GH Password: ')
print('Authenticating as %s' % hs_username)
access_token, _ = get_access_token(username=hs_username, password=hs_password)
batches = get_batches(access_token)[::-1]
for batch in batches:
print('%s - %s' % (batch['id'], batch['name']))
selected_id = raw_input('Enter batch id, for the batch you wish to follow: ').strip()
batch = [b for b in batches if str(b['id']) == selected_id]
if len(batch) == 1:
for hacker in get_people_in_a_batch(batch[0]['id'], access_token):
gh_name = hacker['github']
if gh_name is not None:
follow_user(gh_name, (gh_username, gh_password))
else:
print('Invalid batch selected.')
if __name__ == '__main__':
main()
|
Add script for batchwise GitHub follow.#!/usr/bin/env python
from __future__ import absolute_import, print_function
import getpass
import requests
from requests.auth import HTTPBasicAuth
from hs_oauth import get_access_token, get_batches, get_people_in_a_batch
def follow_user(username, auth):
url = 'https://api.github.com/user/following/%s' % username
response = requests.put(url, auth=HTTPBasicAuth(*auth))
if not response.ok:
print('Failed to follow %s' % username)
else:
print('You are now following %s' % username)
def main():
# Authenticate with HS.
hs_username = raw_input('HS Email: ')
hs_password = getpass.getpass('HS Password: ')
gh_username = raw_input('GH Email: ')
gh_password = getpass.getpass('GH Password: ')
print('Authenticating as %s' % hs_username)
access_token, _ = get_access_token(username=hs_username, password=hs_password)
batches = get_batches(access_token)[::-1]
for batch in batches:
print('%s - %s' % (batch['id'], batch['name']))
selected_id = raw_input('Enter batch id, for the batch you wish to follow: ').strip()
batch = [b for b in batches if str(b['id']) == selected_id]
if len(batch) == 1:
for hacker in get_people_in_a_batch(batch[0]['id'], access_token):
gh_name = hacker['github']
if gh_name is not None:
follow_user(gh_name, (gh_username, gh_password))
else:
print('Invalid batch selected.')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for batchwise GitHub follow.<commit_after>#!/usr/bin/env python
from __future__ import absolute_import, print_function
import getpass
import requests
from requests.auth import HTTPBasicAuth
from hs_oauth import get_access_token, get_batches, get_people_in_a_batch
def follow_user(username, auth):
url = 'https://api.github.com/user/following/%s' % username
response = requests.put(url, auth=HTTPBasicAuth(*auth))
if not response.ok:
print('Failed to follow %s' % username)
else:
print('You are now following %s' % username)
def main():
# Authenticate with HS.
hs_username = raw_input('HS Email: ')
hs_password = getpass.getpass('HS Password: ')
gh_username = raw_input('GH Email: ')
gh_password = getpass.getpass('GH Password: ')
print('Authenticating as %s' % hs_username)
access_token, _ = get_access_token(username=hs_username, password=hs_password)
batches = get_batches(access_token)[::-1]
for batch in batches:
print('%s - %s' % (batch['id'], batch['name']))
selected_id = raw_input('Enter batch id, for the batch you wish to follow: ').strip()
batch = [b for b in batches if str(b['id']) == selected_id]
if len(batch) == 1:
for hacker in get_people_in_a_batch(batch[0]['id'], access_token):
gh_name = hacker['github']
if gh_name is not None:
follow_user(gh_name, (gh_username, gh_password))
else:
print('Invalid batch selected.')
if __name__ == '__main__':
main()
|
|
3dd24e35cbc79a07af49c86cc8dcdb2b20d91805
|
final/problem3.py
|
final/problem3.py
|
# Problem 3
# 10.0 points possible (graded)
# Numbers in Mandarin follow 3 simple rules:
# There are words for each of the digits from 0 to 10.
# For numbers 11-19, the number is pronounced as "ten digit", so for example, 16 would be pronounced (using Mandarin) as "ten six".
# For numbers between 20 and 99, the number is pronounced as “digit ten digit”, so for example, 37 would be pronounced (using Mandarin) as "three ten seven". If the digit is a zero, it is not included.
# We want to write a procedure that converts an American number (between 0 and 99), written as a string, into the equivalent Mandarin.
def convert_to_mandarin(us_num):
'''
us_num, a string representing a US number 0 to 99
returns the string mandarin representation of us_num
'''
trans = {'0':'ling', '1':'yi', '2':'er', '3':'san', '4': 'si',
'5':'wu', '6':'liu', '7':'qi', '8':'ba', '9':'jiu', '10': 'shi'}
us_integer = int(us_num)
if us_integer <= 10 and us_integer >= 0:
return trans[us_num]
else:
stringNum = ""
listNum = list(us_num)
if us_integer <= 19 and us_integer > 10:
stringNum += 'shi '
stringNum += trans[listNum[1]]
return stringNum
elif us_integer >= 20 and us_integer < 100:
stringNum += trans[listNum[0]]
stringNum += ' shi'
if listNum[1] != '0':
stringNum += " "
stringNum += trans[listNum[1]]
return stringNum
# convert_to_mandarin('36') will return san shi liu
# convert_to_mandarin('20') will return er shi
# convert_to_mandarin('16') will return shi liu
print(convert_to_mandarin('16'))
|
Write a procedure that converts an American number into the equivalent Mandarin.
|
Write a procedure that converts an American number into the equivalent Mandarin.
|
Python
|
mit
|
Kunal57/MIT_6.00.1x
|
Write a procedure that converts an American number into the equivalent Mandarin.
|
# Problem 3
# 10.0 points possible (graded)
# Numbers in Mandarin follow 3 simple rules:
# There are words for each of the digits from 0 to 10.
# For numbers 11-19, the number is pronounced as "ten digit", so for example, 16 would be pronounced (using Mandarin) as "ten six".
# For numbers between 20 and 99, the number is pronounced as “digit ten digit”, so for example, 37 would be pronounced (using Mandarin) as "three ten seven". If the digit is a zero, it is not included.
# We want to write a procedure that converts an American number (between 0 and 99), written as a string, into the equivalent Mandarin.
def convert_to_mandarin(us_num):
'''
us_num, a string representing a US number 0 to 99
returns the string mandarin representation of us_num
'''
trans = {'0':'ling', '1':'yi', '2':'er', '3':'san', '4': 'si',
'5':'wu', '6':'liu', '7':'qi', '8':'ba', '9':'jiu', '10': 'shi'}
us_integer = int(us_num)
if us_integer <= 10 and us_integer >= 0:
return trans[us_num]
else:
stringNum = ""
listNum = list(us_num)
if us_integer <= 19 and us_integer > 10:
stringNum += 'shi '
stringNum += trans[listNum[1]]
return stringNum
elif us_integer >= 20 and us_integer < 100:
stringNum += trans[listNum[0]]
stringNum += ' shi'
if listNum[1] != '0':
stringNum += " "
stringNum += trans[listNum[1]]
return stringNum
# convert_to_mandarin('36') will return san shi liu
# convert_to_mandarin('20') will return er shi
# convert_to_mandarin('16') will return shi liu
print(convert_to_mandarin('16'))
|
<commit_before><commit_msg>Write a procedure that converts an American number into the equivalent Mandarin.<commit_after>
|
# Problem 3
# 10.0 points possible (graded)
# Numbers in Mandarin follow 3 simple rules:
# There are words for each of the digits from 0 to 10.
# For numbers 11-19, the number is pronounced as "ten digit", so for example, 16 would be pronounced (using Mandarin) as "ten six".
# For numbers between 20 and 99, the number is pronounced as “digit ten digit”, so for example, 37 would be pronounced (using Mandarin) as "three ten seven". If the digit is a zero, it is not included.
# We want to write a procedure that converts an American number (between 0 and 99), written as a string, into the equivalent Mandarin.
def convert_to_mandarin(us_num):
'''
us_num, a string representing a US number 0 to 99
returns the string mandarin representation of us_num
'''
trans = {'0':'ling', '1':'yi', '2':'er', '3':'san', '4': 'si',
'5':'wu', '6':'liu', '7':'qi', '8':'ba', '9':'jiu', '10': 'shi'}
us_integer = int(us_num)
if us_integer <= 10 and us_integer >= 0:
return trans[us_num]
else:
stringNum = ""
listNum = list(us_num)
if us_integer <= 19 and us_integer > 10:
stringNum += 'shi '
stringNum += trans[listNum[1]]
return stringNum
elif us_integer >= 20 and us_integer < 100:
stringNum += trans[listNum[0]]
stringNum += ' shi'
if listNum[1] != '0':
stringNum += " "
stringNum += trans[listNum[1]]
return stringNum
# convert_to_mandarin('36') will return san shi liu
# convert_to_mandarin('20') will return er shi
# convert_to_mandarin('16') will return shi liu
print(convert_to_mandarin('16'))
|
Write a procedure that converts an American number into the equivalent Mandarin.# Problem 3
# 10.0 points possible (graded)
# Numbers in Mandarin follow 3 simple rules:
# There are words for each of the digits from 0 to 10.
# For numbers 11-19, the number is pronounced as "ten digit", so for example, 16 would be pronounced (using Mandarin) as "ten six".
# For numbers between 20 and 99, the number is pronounced as “digit ten digit”, so for example, 37 would be pronounced (using Mandarin) as "three ten seven". If the digit is a zero, it is not included.
# We want to write a procedure that converts an American number (between 0 and 99), written as a string, into the equivalent Mandarin.
def convert_to_mandarin(us_num):
'''
us_num, a string representing a US number 0 to 99
returns the string mandarin representation of us_num
'''
trans = {'0':'ling', '1':'yi', '2':'er', '3':'san', '4': 'si',
'5':'wu', '6':'liu', '7':'qi', '8':'ba', '9':'jiu', '10': 'shi'}
us_integer = int(us_num)
if us_integer <= 10 and us_integer >= 0:
return trans[us_num]
else:
stringNum = ""
listNum = list(us_num)
if us_integer <= 19 and us_integer > 10:
stringNum += 'shi '
stringNum += trans[listNum[1]]
return stringNum
elif us_integer >= 20 and us_integer < 100:
stringNum += trans[listNum[0]]
stringNum += ' shi'
if listNum[1] != '0':
stringNum += " "
stringNum += trans[listNum[1]]
return stringNum
# convert_to_mandarin('36') will return san shi liu
# convert_to_mandarin('20') will return er shi
# convert_to_mandarin('16') will return shi liu
print(convert_to_mandarin('16'))
|
<commit_before><commit_msg>Write a procedure that converts an American number into the equivalent Mandarin.<commit_after># Problem 3
# 10.0 points possible (graded)
# Numbers in Mandarin follow 3 simple rules:
# There are words for each of the digits from 0 to 10.
# For numbers 11-19, the number is pronounced as "ten digit", so for example, 16 would be pronounced (using Mandarin) as "ten six".
# For numbers between 20 and 99, the number is pronounced as “digit ten digit”, so for example, 37 would be pronounced (using Mandarin) as "three ten seven". If the digit is a zero, it is not included.
# We want to write a procedure that converts an American number (between 0 and 99), written as a string, into the equivalent Mandarin.
def convert_to_mandarin(us_num):
'''
us_num, a string representing a US number 0 to 99
returns the string mandarin representation of us_num
'''
trans = {'0':'ling', '1':'yi', '2':'er', '3':'san', '4': 'si',
'5':'wu', '6':'liu', '7':'qi', '8':'ba', '9':'jiu', '10': 'shi'}
us_integer = int(us_num)
if us_integer <= 10 and us_integer >= 0:
return trans[us_num]
else:
stringNum = ""
listNum = list(us_num)
if us_integer <= 19 and us_integer > 10:
stringNum += 'shi '
stringNum += trans[listNum[1]]
return stringNum
elif us_integer >= 20 and us_integer < 100:
stringNum += trans[listNum[0]]
stringNum += ' shi'
if listNum[1] != '0':
stringNum += " "
stringNum += trans[listNum[1]]
return stringNum
# convert_to_mandarin('36') will return san shi liu
# convert_to_mandarin('20') will return er shi
# convert_to_mandarin('16') will return shi liu
print(convert_to_mandarin('16'))
|
|
8ac395498ed6efd8557bf0aac8d9fb75469019a6
|
bin/religiousforums_scraper.py
|
bin/religiousforums_scraper.py
|
#!/usr/bin/env python
import argparse
import os
import os.path
from bs4 import BeautifulSoup
import requests
def post_filter(tag):
if tag.name != 'blockquote':
return False
if not tag.has_attr('class'):
return False
return isinstance(tag['class'], list) and 'messageText' in tag['class']
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Scrape Religiousforums '
'forum')
parser.add_argument('id', type=str, help='Thread ID')
parser.add_argument('-p', '--page', type=int, default=1,
help='Page number')
parser.add_argument('-o', '--outdir', type=str, help='Output directory')
parser.add_argument('-n', '--number', type=int, default=0,
help='Start post number')
args = parser.parse_args()
url = 'https://www.religiousforums.com/threads/{}/page-{}'
url = url.format(args.id, args.page)
r = requests.get(url)
if r.status_code == 200:
# Create save directory
if args.outdir is not None:
savedir = os.path.join(args.outdir, args.id)
else:
savedir = args.id
os.makedirs(savedir, exist_ok=True)
soup = BeautifulSoup(r.text)
for i, post in enumerate(soup.find_all(post_filter)):
num = i + args.number
savefile = os.path.join(savedir, 'post-{}.txt'.format(num))
with open(savefile, 'w') as fout:
print('\n'.join(list(post.stripped_strings)), file=fout)
|
Write scraper script for Religiousforums forum
|
Write scraper script for Religiousforums forum
|
Python
|
mit
|
kemskems/otdet
|
Write scraper script for Religiousforums forum
|
#!/usr/bin/env python
import argparse
import os
import os.path
from bs4 import BeautifulSoup
import requests
def post_filter(tag):
if tag.name != 'blockquote':
return False
if not tag.has_attr('class'):
return False
return isinstance(tag['class'], list) and 'messageText' in tag['class']
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Scrape Religiousforums '
'forum')
parser.add_argument('id', type=str, help='Thread ID')
parser.add_argument('-p', '--page', type=int, default=1,
help='Page number')
parser.add_argument('-o', '--outdir', type=str, help='Output directory')
parser.add_argument('-n', '--number', type=int, default=0,
help='Start post number')
args = parser.parse_args()
url = 'https://www.religiousforums.com/threads/{}/page-{}'
url = url.format(args.id, args.page)
r = requests.get(url)
if r.status_code == 200:
# Create save directory
if args.outdir is not None:
savedir = os.path.join(args.outdir, args.id)
else:
savedir = args.id
os.makedirs(savedir, exist_ok=True)
soup = BeautifulSoup(r.text)
for i, post in enumerate(soup.find_all(post_filter)):
num = i + args.number
savefile = os.path.join(savedir, 'post-{}.txt'.format(num))
with open(savefile, 'w') as fout:
print('\n'.join(list(post.stripped_strings)), file=fout)
|
<commit_before><commit_msg>Write scraper script for Religiousforums forum<commit_after>
|
#!/usr/bin/env python
import argparse
import os
import os.path
from bs4 import BeautifulSoup
import requests
def post_filter(tag):
if tag.name != 'blockquote':
return False
if not tag.has_attr('class'):
return False
return isinstance(tag['class'], list) and 'messageText' in tag['class']
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Scrape Religiousforums '
'forum')
parser.add_argument('id', type=str, help='Thread ID')
parser.add_argument('-p', '--page', type=int, default=1,
help='Page number')
parser.add_argument('-o', '--outdir', type=str, help='Output directory')
parser.add_argument('-n', '--number', type=int, default=0,
help='Start post number')
args = parser.parse_args()
url = 'https://www.religiousforums.com/threads/{}/page-{}'
url = url.format(args.id, args.page)
r = requests.get(url)
if r.status_code == 200:
# Create save directory
if args.outdir is not None:
savedir = os.path.join(args.outdir, args.id)
else:
savedir = args.id
os.makedirs(savedir, exist_ok=True)
soup = BeautifulSoup(r.text)
for i, post in enumerate(soup.find_all(post_filter)):
num = i + args.number
savefile = os.path.join(savedir, 'post-{}.txt'.format(num))
with open(savefile, 'w') as fout:
print('\n'.join(list(post.stripped_strings)), file=fout)
|
Write scraper script for Religiousforums forum#!/usr/bin/env python
import argparse
import os
import os.path
from bs4 import BeautifulSoup
import requests
def post_filter(tag):
if tag.name != 'blockquote':
return False
if not tag.has_attr('class'):
return False
return isinstance(tag['class'], list) and 'messageText' in tag['class']
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Scrape Religiousforums '
'forum')
parser.add_argument('id', type=str, help='Thread ID')
parser.add_argument('-p', '--page', type=int, default=1,
help='Page number')
parser.add_argument('-o', '--outdir', type=str, help='Output directory')
parser.add_argument('-n', '--number', type=int, default=0,
help='Start post number')
args = parser.parse_args()
url = 'https://www.religiousforums.com/threads/{}/page-{}'
url = url.format(args.id, args.page)
r = requests.get(url)
if r.status_code == 200:
# Create save directory
if args.outdir is not None:
savedir = os.path.join(args.outdir, args.id)
else:
savedir = args.id
os.makedirs(savedir, exist_ok=True)
soup = BeautifulSoup(r.text)
for i, post in enumerate(soup.find_all(post_filter)):
num = i + args.number
savefile = os.path.join(savedir, 'post-{}.txt'.format(num))
with open(savefile, 'w') as fout:
print('\n'.join(list(post.stripped_strings)), file=fout)
|
<commit_before><commit_msg>Write scraper script for Religiousforums forum<commit_after>#!/usr/bin/env python
import argparse
import os
import os.path
from bs4 import BeautifulSoup
import requests
def post_filter(tag):
if tag.name != 'blockquote':
return False
if not tag.has_attr('class'):
return False
return isinstance(tag['class'], list) and 'messageText' in tag['class']
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Scrape Religiousforums '
'forum')
parser.add_argument('id', type=str, help='Thread ID')
parser.add_argument('-p', '--page', type=int, default=1,
help='Page number')
parser.add_argument('-o', '--outdir', type=str, help='Output directory')
parser.add_argument('-n', '--number', type=int, default=0,
help='Start post number')
args = parser.parse_args()
url = 'https://www.religiousforums.com/threads/{}/page-{}'
url = url.format(args.id, args.page)
r = requests.get(url)
if r.status_code == 200:
# Create save directory
if args.outdir is not None:
savedir = os.path.join(args.outdir, args.id)
else:
savedir = args.id
os.makedirs(savedir, exist_ok=True)
soup = BeautifulSoup(r.text)
for i, post in enumerate(soup.find_all(post_filter)):
num = i + args.number
savefile = os.path.join(savedir, 'post-{}.txt'.format(num))
with open(savefile, 'w') as fout:
print('\n'.join(list(post.stripped_strings)), file=fout)
|
|
6388ac01b61266ca1cb9a5d0c3eb063c3dff0a6f
|
usingnamespace/forms/schemaform.py
|
usingnamespace/forms/schemaform.py
|
from deform import Form
class SchemaFormMixin(object):
@classmethod
def create_form(cls, *arg, **kw):
schema_vars = {}
form_vars = {}
for sn in ['validator', 'preparer', 'after_bind']:
if sn in kw:
schema_vars[sn] = kw[sn]
del kw[sn]
for fv in ['action', 'buttons', 'method', 'formid', 'autocomplete',
'use_ajax', 'ajax_options']:
if fv in kw:
form_vars[fv] = kw[fv]
del kw[fv]
if 'validator' not in schema_vars and hasattr(cls, '__validator__'):
schema_vars['validator'] = cls.__validator__
if 'buttons' not in form_vars and hasattr(cls, '__buttons__'):
form_vars['buttons'] = cls.__buttons__
schema = cls(**schema_vars).bind(**kw)
form = Form(schema, **form_vars)
return (schema, form)
|
Add a MixIn that makes it simpler to instantiate a deform
|
Add a MixIn that makes it simpler to instantiate a deform
This adds a very simple MixIn that makes it easier to instantiate a form
along wth providing the right values to the various moving pieces, such
as the schema, the bindings and the form itself. Less boiler plate to
write elsewhere.
|
Python
|
isc
|
usingnamespace/usingnamespace
|
Add a MixIn that makes it simpler to instantiate a deform
This adds a very simple MixIn that makes it easier to instantiate a form
along wth providing the right values to the various moving pieces, such
as the schema, the bindings and the form itself. Less boiler plate to
write elsewhere.
|
from deform import Form
class SchemaFormMixin(object):
@classmethod
def create_form(cls, *arg, **kw):
schema_vars = {}
form_vars = {}
for sn in ['validator', 'preparer', 'after_bind']:
if sn in kw:
schema_vars[sn] = kw[sn]
del kw[sn]
for fv in ['action', 'buttons', 'method', 'formid', 'autocomplete',
'use_ajax', 'ajax_options']:
if fv in kw:
form_vars[fv] = kw[fv]
del kw[fv]
if 'validator' not in schema_vars and hasattr(cls, '__validator__'):
schema_vars['validator'] = cls.__validator__
if 'buttons' not in form_vars and hasattr(cls, '__buttons__'):
form_vars['buttons'] = cls.__buttons__
schema = cls(**schema_vars).bind(**kw)
form = Form(schema, **form_vars)
return (schema, form)
|
<commit_before><commit_msg>Add a MixIn that makes it simpler to instantiate a deform
This adds a very simple MixIn that makes it easier to instantiate a form
along wth providing the right values to the various moving pieces, such
as the schema, the bindings and the form itself. Less boiler plate to
write elsewhere.<commit_after>
|
from deform import Form
class SchemaFormMixin(object):
@classmethod
def create_form(cls, *arg, **kw):
schema_vars = {}
form_vars = {}
for sn in ['validator', 'preparer', 'after_bind']:
if sn in kw:
schema_vars[sn] = kw[sn]
del kw[sn]
for fv in ['action', 'buttons', 'method', 'formid', 'autocomplete',
'use_ajax', 'ajax_options']:
if fv in kw:
form_vars[fv] = kw[fv]
del kw[fv]
if 'validator' not in schema_vars and hasattr(cls, '__validator__'):
schema_vars['validator'] = cls.__validator__
if 'buttons' not in form_vars and hasattr(cls, '__buttons__'):
form_vars['buttons'] = cls.__buttons__
schema = cls(**schema_vars).bind(**kw)
form = Form(schema, **form_vars)
return (schema, form)
|
Add a MixIn that makes it simpler to instantiate a deform
This adds a very simple MixIn that makes it easier to instantiate a form
along wth providing the right values to the various moving pieces, such
as the schema, the bindings and the form itself. Less boiler plate to
write elsewhere.from deform import Form
class SchemaFormMixin(object):
@classmethod
def create_form(cls, *arg, **kw):
schema_vars = {}
form_vars = {}
for sn in ['validator', 'preparer', 'after_bind']:
if sn in kw:
schema_vars[sn] = kw[sn]
del kw[sn]
for fv in ['action', 'buttons', 'method', 'formid', 'autocomplete',
'use_ajax', 'ajax_options']:
if fv in kw:
form_vars[fv] = kw[fv]
del kw[fv]
if 'validator' not in schema_vars and hasattr(cls, '__validator__'):
schema_vars['validator'] = cls.__validator__
if 'buttons' not in form_vars and hasattr(cls, '__buttons__'):
form_vars['buttons'] = cls.__buttons__
schema = cls(**schema_vars).bind(**kw)
form = Form(schema, **form_vars)
return (schema, form)
|
<commit_before><commit_msg>Add a MixIn that makes it simpler to instantiate a deform
This adds a very simple MixIn that makes it easier to instantiate a form
along wth providing the right values to the various moving pieces, such
as the schema, the bindings and the form itself. Less boiler plate to
write elsewhere.<commit_after>from deform import Form
class SchemaFormMixin(object):
@classmethod
def create_form(cls, *arg, **kw):
schema_vars = {}
form_vars = {}
for sn in ['validator', 'preparer', 'after_bind']:
if sn in kw:
schema_vars[sn] = kw[sn]
del kw[sn]
for fv in ['action', 'buttons', 'method', 'formid', 'autocomplete',
'use_ajax', 'ajax_options']:
if fv in kw:
form_vars[fv] = kw[fv]
del kw[fv]
if 'validator' not in schema_vars and hasattr(cls, '__validator__'):
schema_vars['validator'] = cls.__validator__
if 'buttons' not in form_vars and hasattr(cls, '__buttons__'):
form_vars['buttons'] = cls.__buttons__
schema = cls(**schema_vars).bind(**kw)
form = Form(schema, **form_vars)
return (schema, form)
|
|
feb8db57d5a73593f066d63f10383e77576c226d
|
crawler/management/commands/recommendation_accuracy.py
|
crawler/management/commands/recommendation_accuracy.py
|
from django.core.management import BaseCommand
from crawler.models import *
class Command(BaseCommand):
help = 'Calculate recommendation accuracy per user'
def handle(self, *args, **options):
result_dict = dict()
users = User.objects.all()
for user in users:
count = 0
recommended_apps = user.recommended_apps.all()
recommended_apps_count = len(recommended_apps)
for app in recommended_apps:
if user.userapps_set.filter(package_name=app.package_name).exists():
count = count + 1
percentage = float(count) / recommended_apps_count
if percentage not in result_dict:
result_count = 0
else:
result_count = result_dict[percentage]
result_dict[percentage] = result_count + 1
admin_file = open('recommendation_accuracy', 'w')
admin_file.write('Percentage of Recommended Installed;Instances Count\n')
for key in result_dict:
admin_file.write('{};{}\n'.format(key, result_dict[key]))
admin_file.close()
|
Create command to calculate recommendation's accuracy
|
Create command to calculate recommendation's accuracy
|
Python
|
apache-2.0
|
bkosawa/admin-recommendation
|
Create command to calculate recommendation's accuracy
|
from django.core.management import BaseCommand
from crawler.models import *
class Command(BaseCommand):
help = 'Calculate recommendation accuracy per user'
def handle(self, *args, **options):
result_dict = dict()
users = User.objects.all()
for user in users:
count = 0
recommended_apps = user.recommended_apps.all()
recommended_apps_count = len(recommended_apps)
for app in recommended_apps:
if user.userapps_set.filter(package_name=app.package_name).exists():
count = count + 1
percentage = float(count) / recommended_apps_count
if percentage not in result_dict:
result_count = 0
else:
result_count = result_dict[percentage]
result_dict[percentage] = result_count + 1
admin_file = open('recommendation_accuracy', 'w')
admin_file.write('Percentage of Recommended Installed;Instances Count\n')
for key in result_dict:
admin_file.write('{};{}\n'.format(key, result_dict[key]))
admin_file.close()
|
<commit_before><commit_msg>Create command to calculate recommendation's accuracy<commit_after>
|
from django.core.management import BaseCommand
from crawler.models import *
class Command(BaseCommand):
help = 'Calculate recommendation accuracy per user'
def handle(self, *args, **options):
result_dict = dict()
users = User.objects.all()
for user in users:
count = 0
recommended_apps = user.recommended_apps.all()
recommended_apps_count = len(recommended_apps)
for app in recommended_apps:
if user.userapps_set.filter(package_name=app.package_name).exists():
count = count + 1
percentage = float(count) / recommended_apps_count
if percentage not in result_dict:
result_count = 0
else:
result_count = result_dict[percentage]
result_dict[percentage] = result_count + 1
admin_file = open('recommendation_accuracy', 'w')
admin_file.write('Percentage of Recommended Installed;Instances Count\n')
for key in result_dict:
admin_file.write('{};{}\n'.format(key, result_dict[key]))
admin_file.close()
|
Create command to calculate recommendation's accuracyfrom django.core.management import BaseCommand
from crawler.models import *
class Command(BaseCommand):
help = 'Calculate recommendation accuracy per user'
def handle(self, *args, **options):
result_dict = dict()
users = User.objects.all()
for user in users:
count = 0
recommended_apps = user.recommended_apps.all()
recommended_apps_count = len(recommended_apps)
for app in recommended_apps:
if user.userapps_set.filter(package_name=app.package_name).exists():
count = count + 1
percentage = float(count) / recommended_apps_count
if percentage not in result_dict:
result_count = 0
else:
result_count = result_dict[percentage]
result_dict[percentage] = result_count + 1
admin_file = open('recommendation_accuracy', 'w')
admin_file.write('Percentage of Recommended Installed;Instances Count\n')
for key in result_dict:
admin_file.write('{};{}\n'.format(key, result_dict[key]))
admin_file.close()
|
<commit_before><commit_msg>Create command to calculate recommendation's accuracy<commit_after>from django.core.management import BaseCommand
from crawler.models import *
class Command(BaseCommand):
help = 'Calculate recommendation accuracy per user'
def handle(self, *args, **options):
result_dict = dict()
users = User.objects.all()
for user in users:
count = 0
recommended_apps = user.recommended_apps.all()
recommended_apps_count = len(recommended_apps)
for app in recommended_apps:
if user.userapps_set.filter(package_name=app.package_name).exists():
count = count + 1
percentage = float(count) / recommended_apps_count
if percentage not in result_dict:
result_count = 0
else:
result_count = result_dict[percentage]
result_dict[percentage] = result_count + 1
admin_file = open('recommendation_accuracy', 'w')
admin_file.write('Percentage of Recommended Installed;Instances Count\n')
for key in result_dict:
admin_file.write('{};{}\n'.format(key, result_dict[key]))
admin_file.close()
|
|
c32a12ed83ec033462126dfacf3fcf144626cebf
|
euler021.py
|
euler021.py
|
#!/usr/bin/python
"""
First attempt to resolve was really brute force and slow,
this is a better solution, see problem page for explanation
"""
from math import floor, sqrt
LIMIT = 10000
result = 0
def d(n):
res = 0
r = floor(sqrt(n))
if r * r == n:
res = r + 1
r -= 1
else:
res = 1
f, step = 0, 0
if n % 2 == 1:
f, step = 3, 2
else:
f, step = 2, 1
while f <= r:
if n % f == 0:
res += f + (n // f)
f += step
return res
for test in range(2, LIMIT):
b = d(test)
if b > test:
if d(b) == test:
result += test + b
print(result)
|
Add solution for problem 21
|
Add solution for problem 21
|
Python
|
mit
|
cifvts/PyEuler
|
Add solution for problem 21
|
#!/usr/bin/python
"""
First attempt to resolve was really brute force and slow,
this is a better solution, see problem page for explanation
"""
from math import floor, sqrt
LIMIT = 10000
result = 0
def d(n):
res = 0
r = floor(sqrt(n))
if r * r == n:
res = r + 1
r -= 1
else:
res = 1
f, step = 0, 0
if n % 2 == 1:
f, step = 3, 2
else:
f, step = 2, 1
while f <= r:
if n % f == 0:
res += f + (n // f)
f += step
return res
for test in range(2, LIMIT):
b = d(test)
if b > test:
if d(b) == test:
result += test + b
print(result)
|
<commit_before><commit_msg>Add solution for problem 21<commit_after>
|
#!/usr/bin/python
"""
First attempt to resolve was really brute force and slow,
this is a better solution, see problem page for explanation
"""
from math import floor, sqrt
LIMIT = 10000
result = 0
def d(n):
res = 0
r = floor(sqrt(n))
if r * r == n:
res = r + 1
r -= 1
else:
res = 1
f, step = 0, 0
if n % 2 == 1:
f, step = 3, 2
else:
f, step = 2, 1
while f <= r:
if n % f == 0:
res += f + (n // f)
f += step
return res
for test in range(2, LIMIT):
b = d(test)
if b > test:
if d(b) == test:
result += test + b
print(result)
|
Add solution for problem 21#!/usr/bin/python
"""
First attempt to resolve was really brute force and slow,
this is a better solution, see problem page for explanation
"""
from math import floor, sqrt
LIMIT = 10000
result = 0
def d(n):
res = 0
r = floor(sqrt(n))
if r * r == n:
res = r + 1
r -= 1
else:
res = 1
f, step = 0, 0
if n % 2 == 1:
f, step = 3, 2
else:
f, step = 2, 1
while f <= r:
if n % f == 0:
res += f + (n // f)
f += step
return res
for test in range(2, LIMIT):
b = d(test)
if b > test:
if d(b) == test:
result += test + b
print(result)
|
<commit_before><commit_msg>Add solution for problem 21<commit_after>#!/usr/bin/python
"""
First attempt to resolve was really brute force and slow,
this is a better solution, see problem page for explanation
"""
from math import floor, sqrt
LIMIT = 10000
result = 0
def d(n):
res = 0
r = floor(sqrt(n))
if r * r == n:
res = r + 1
r -= 1
else:
res = 1
f, step = 0, 0
if n % 2 == 1:
f, step = 3, 2
else:
f, step = 2, 1
while f <= r:
if n % f == 0:
res += f + (n // f)
f += step
return res
for test in range(2, LIMIT):
b = d(test)
if b > test:
if d(b) == test:
result += test + b
print(result)
|
|
c3b531f6c1dcd719adfa69f72ae95e4018debbac
|
euler035.py
|
euler035.py
|
#!/usr/bin/python
from math import sqrt, ceil, floor, log10, pow
LIMIT = 1000000
def decimalShift(x):
dec = x % 10;
power = floor(log10(x))
x //= 10;
x += dec * int(pow(10, power))
return x
sievebound = (LIMIT - 1) // 2
sieve = [0] * (sievebound)
crosslimit = (floor(sqrt(LIMIT)) - 1) // 2
for i in range(1, crosslimit):
if not sieve[i]:
for j in range(2 * i * (i + 1), sievebound, 2 * i + 1):
sieve[j] = 1
count = 0
for i in range(sievebound):
if not sieve[i]:
prime = i * 2 + 1
test = prime
for j in range(ceil(log10(prime))):
test = decimalShift(test)
if test % 2 == 0 or sieve[(test - 1) // 2]:
break
if prime == test:
count += 1
print(count)
|
Add solution for problem 35
|
Add solution for problem 35
|
Python
|
mit
|
cifvts/PyEuler
|
Add solution for problem 35
|
#!/usr/bin/python
from math import sqrt, ceil, floor, log10, pow
LIMIT = 1000000
def decimalShift(x):
dec = x % 10;
power = floor(log10(x))
x //= 10;
x += dec * int(pow(10, power))
return x
sievebound = (LIMIT - 1) // 2
sieve = [0] * (sievebound)
crosslimit = (floor(sqrt(LIMIT)) - 1) // 2
for i in range(1, crosslimit):
if not sieve[i]:
for j in range(2 * i * (i + 1), sievebound, 2 * i + 1):
sieve[j] = 1
count = 0
for i in range(sievebound):
if not sieve[i]:
prime = i * 2 + 1
test = prime
for j in range(ceil(log10(prime))):
test = decimalShift(test)
if test % 2 == 0 or sieve[(test - 1) // 2]:
break
if prime == test:
count += 1
print(count)
|
<commit_before><commit_msg>Add solution for problem 35<commit_after>
|
#!/usr/bin/python
from math import sqrt, ceil, floor, log10, pow
LIMIT = 1000000
def decimalShift(x):
dec = x % 10;
power = floor(log10(x))
x //= 10;
x += dec * int(pow(10, power))
return x
sievebound = (LIMIT - 1) // 2
sieve = [0] * (sievebound)
crosslimit = (floor(sqrt(LIMIT)) - 1) // 2
for i in range(1, crosslimit):
if not sieve[i]:
for j in range(2 * i * (i + 1), sievebound, 2 * i + 1):
sieve[j] = 1
count = 0
for i in range(sievebound):
if not sieve[i]:
prime = i * 2 + 1
test = prime
for j in range(ceil(log10(prime))):
test = decimalShift(test)
if test % 2 == 0 or sieve[(test - 1) // 2]:
break
if prime == test:
count += 1
print(count)
|
Add solution for problem 35#!/usr/bin/python
from math import sqrt, ceil, floor, log10, pow
LIMIT = 1000000
def decimalShift(x):
dec = x % 10;
power = floor(log10(x))
x //= 10;
x += dec * int(pow(10, power))
return x
sievebound = (LIMIT - 1) // 2
sieve = [0] * (sievebound)
crosslimit = (floor(sqrt(LIMIT)) - 1) // 2
for i in range(1, crosslimit):
if not sieve[i]:
for j in range(2 * i * (i + 1), sievebound, 2 * i + 1):
sieve[j] = 1
count = 0
for i in range(sievebound):
if not sieve[i]:
prime = i * 2 + 1
test = prime
for j in range(ceil(log10(prime))):
test = decimalShift(test)
if test % 2 == 0 or sieve[(test - 1) // 2]:
break
if prime == test:
count += 1
print(count)
|
<commit_before><commit_msg>Add solution for problem 35<commit_after>#!/usr/bin/python
from math import sqrt, ceil, floor, log10, pow
LIMIT = 1000000
def decimalShift(x):
dec = x % 10;
power = floor(log10(x))
x //= 10;
x += dec * int(pow(10, power))
return x
sievebound = (LIMIT - 1) // 2
sieve = [0] * (sievebound)
crosslimit = (floor(sqrt(LIMIT)) - 1) // 2
for i in range(1, crosslimit):
if not sieve[i]:
for j in range(2 * i * (i + 1), sievebound, 2 * i + 1):
sieve[j] = 1
count = 0
for i in range(sievebound):
if not sieve[i]:
prime = i * 2 + 1
test = prime
for j in range(ceil(log10(prime))):
test = decimalShift(test)
if test % 2 == 0 or sieve[(test - 1) // 2]:
break
if prime == test:
count += 1
print(count)
|
|
b0c87f3959b747f5ea12c74434e6361296be0db9
|
PVGeo/omf/__test__.py
|
PVGeo/omf/__test__.py
|
import unittest
import shutil
import tempfile
import os
import numpy as np
# VTK imports:
from vtk.util import numpy_support as nps
from .. import _helpers
# Functionality to test:
from .reader import *
RTOL = 0.000001
###############################################################################
class TestOMFReader(unittest.TestCase):
"""
Test the `OMFReader`
"""
def test_commpile(self):
"""Simply makes sure code compiles"""
reader = OMFReader()
return
|
Add complimation test for OMF
|
Add complimation test for OMF
|
Python
|
bsd-3-clause
|
banesullivan/ParaViewGeophysics,banesullivan/ParaViewGeophysics,banesullivan/ParaViewGeophysics
|
Add complimation test for OMF
|
import unittest
import shutil
import tempfile
import os
import numpy as np
# VTK imports:
from vtk.util import numpy_support as nps
from .. import _helpers
# Functionality to test:
from .reader import *
RTOL = 0.000001
###############################################################################
class TestOMFReader(unittest.TestCase):
"""
Test the `OMFReader`
"""
def test_commpile(self):
"""Simply makes sure code compiles"""
reader = OMFReader()
return
|
<commit_before><commit_msg>Add complimation test for OMF<commit_after>
|
import unittest
import shutil
import tempfile
import os
import numpy as np
# VTK imports:
from vtk.util import numpy_support as nps
from .. import _helpers
# Functionality to test:
from .reader import *
RTOL = 0.000001
###############################################################################
class TestOMFReader(unittest.TestCase):
"""
Test the `OMFReader`
"""
def test_commpile(self):
"""Simply makes sure code compiles"""
reader = OMFReader()
return
|
Add complimation test for OMFimport unittest
import shutil
import tempfile
import os
import numpy as np
# VTK imports:
from vtk.util import numpy_support as nps
from .. import _helpers
# Functionality to test:
from .reader import *
RTOL = 0.000001
###############################################################################
class TestOMFReader(unittest.TestCase):
"""
Test the `OMFReader`
"""
def test_commpile(self):
"""Simply makes sure code compiles"""
reader = OMFReader()
return
|
<commit_before><commit_msg>Add complimation test for OMF<commit_after>import unittest
import shutil
import tempfile
import os
import numpy as np
# VTK imports:
from vtk.util import numpy_support as nps
from .. import _helpers
# Functionality to test:
from .reader import *
RTOL = 0.000001
###############################################################################
class TestOMFReader(unittest.TestCase):
"""
Test the `OMFReader`
"""
def test_commpile(self):
"""Simply makes sure code compiles"""
reader = OMFReader()
return
|
|
43687a8c59c9ceb26073ba9517004426820ec45c
|
readthedocs/core/management/commands/whitelist_users.py
|
readthedocs/core/management/commands/whitelist_users.py
|
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from core.models import UserProfile
from projects import tasks
from projects.models import Project
class Command(BaseCommand):
def handle(self, *args, **kwargs):
for user in User.objects.filter(profile__whitelisted=False):
print "Whitelisting %s" % user
UserProfile.objects.create(user=user, whitelisted=True)
|
Add ability to easily whitelist users.
|
Add ability to easily whitelist users.
|
Python
|
mit
|
mrshoki/readthedocs.org,davidfischer/readthedocs.org,fujita-shintaro/readthedocs.org,attakei/readthedocs-oauth,VishvajitP/readthedocs.org,nikolas/readthedocs.org,fujita-shintaro/readthedocs.org,VishvajitP/readthedocs.org,wijerasa/readthedocs.org,kenshinthebattosai/readthedocs.org,agjohnson/readthedocs.org,royalwang/readthedocs.org,KamranMackey/readthedocs.org,attakei/readthedocs-oauth,royalwang/readthedocs.org,Tazer/readthedocs.org,michaelmcandrew/readthedocs.org,sunnyzwh/readthedocs.org,wijerasa/readthedocs.org,johncosta/private-readthedocs.org,d0ugal/readthedocs.org,rtfd/readthedocs.org,sid-kap/readthedocs.org,hach-que/readthedocs.org,davidfischer/readthedocs.org,rtfd/readthedocs.org,attakei/readthedocs-oauth,titiushko/readthedocs.org,jerel/readthedocs.org,clarkperkins/readthedocs.org,safwanrahman/readthedocs.org,ojii/readthedocs.org,agjohnson/readthedocs.org,sils1297/readthedocs.org,asampat3090/readthedocs.org,safwanrahman/readthedocs.org,royalwang/readthedocs.org,kdkeyser/readthedocs.org,laplaceliu/readthedocs.org,kdkeyser/readthedocs.org,nikolas/readthedocs.org,istresearch/readthedocs.org,kenwang76/readthedocs.org,d0ugal/readthedocs.org,takluyver/readthedocs.org,LukasBoersma/readthedocs.org,KamranMackey/readthedocs.org,laplaceliu/readthedocs.org,singingwolfboy/readthedocs.org,ojii/readthedocs.org,singingwolfboy/readthedocs.org,dirn/readthedocs.org,michaelmcandrew/readthedocs.org,wanghaven/readthedocs.org,Carreau/readthedocs.org,singingwolfboy/readthedocs.org,hach-que/readthedocs.org,dirn/readthedocs.org,mrshoki/readthedocs.org,johncosta/private-readthedocs.org,VishvajitP/readthedocs.org,nyergler/pythonslides,alex/readthedocs.org,cgourlay/readthedocs.org,attakei/readthedocs-oauth,kenshinthebattosai/readthedocs.org,gjtorikian/readthedocs.org,CedarLogic/readthedocs.org,SteveViss/readthedocs.org,sils1297/readthedocs.org,gjtorikian/readthedocs.org,gjtorikian/readthedocs.org,jerel/readthedocs.org,kdkeyser/readthedocs.org,Tazer/readthedocs.org,johncosta/private-readthedocs.org,michaelmcandrew/readthedocs.org,mhils/readthedocs.org,pombredanne/readthedocs.org,nikolas/readthedocs.org,alex/readthedocs.org,d0ugal/readthedocs.org,asampat3090/readthedocs.org,wanghaven/readthedocs.org,GovReady/readthedocs.org,jerel/readthedocs.org,fujita-shintaro/readthedocs.org,raven47git/readthedocs.org,titiushko/readthedocs.org,emawind84/readthedocs.org,alex/readthedocs.org,asampat3090/readthedocs.org,safwanrahman/readthedocs.org,sils1297/readthedocs.org,davidfischer/readthedocs.org,takluyver/readthedocs.org,emawind84/readthedocs.org,fujita-shintaro/readthedocs.org,wanghaven/readthedocs.org,nikolas/readthedocs.org,VishvajitP/readthedocs.org,rtfd/readthedocs.org,nyergler/pythonslides,clarkperkins/readthedocs.org,techtonik/readthedocs.org,clarkperkins/readthedocs.org,hach-que/readthedocs.org,SteveViss/readthedocs.org,laplaceliu/readthedocs.org,istresearch/readthedocs.org,pombredanne/readthedocs.org,soulshake/readthedocs.org,kdkeyser/readthedocs.org,titiushko/readthedocs.org,stevepiercy/readthedocs.org,kenshinthebattosai/readthedocs.org,hach-que/readthedocs.org,LukasBoersma/readthedocs.org,nyergler/pythonslides,atsuyim/readthedocs.org,sils1297/readthedocs.org,davidfischer/readthedocs.org,espdev/readthedocs.org,agjohnson/readthedocs.org,michaelmcandrew/readthedocs.org,Carreau/readthedocs.org,GovReady/readthedocs.org,cgourlay/readthedocs.org,soulshake/readthedocs.org,techtonik/readthedocs.org,Carreau/readthedocs.org,d0ugal/readthedocs.org,Tazer/readthedocs.org,mrshoki/readthedocs.org,atsuyim/readthedocs.org,mhils/readthedocs.org,CedarLogic/readthedocs.org,tddv/readthedocs.org,raven47git/readthedocs.org,istresearch/readthedocs.org,clarkperkins/readthedocs.org,takluyver/readthedocs.org,sid-kap/readthedocs.org,wanghaven/readthedocs.org,espdev/readthedocs.org,tddv/readthedocs.org,kenwang76/readthedocs.org,KamranMackey/readthedocs.org,GovReady/readthedocs.org,LukasBoersma/readthedocs.org,safwanrahman/readthedocs.org,stevepiercy/readthedocs.org,kenwang76/readthedocs.org,istresearch/readthedocs.org,takluyver/readthedocs.org,sunnyzwh/readthedocs.org,espdev/readthedocs.org,ojii/readthedocs.org,atsuyim/readthedocs.org,techtonik/readthedocs.org,emawind84/readthedocs.org,laplaceliu/readthedocs.org,espdev/readthedocs.org,tddv/readthedocs.org,kenshinthebattosai/readthedocs.org,CedarLogic/readthedocs.org,SteveViss/readthedocs.org,stevepiercy/readthedocs.org,emawind84/readthedocs.org,soulshake/readthedocs.org,mhils/readthedocs.org,sunnyzwh/readthedocs.org,dirn/readthedocs.org,alex/readthedocs.org,GovReady/readthedocs.org,jerel/readthedocs.org,singingwolfboy/readthedocs.org,SteveViss/readthedocs.org,agjohnson/readthedocs.org,sid-kap/readthedocs.org,mhils/readthedocs.org,espdev/readthedocs.org,atsuyim/readthedocs.org,raven47git/readthedocs.org,dirn/readthedocs.org,kenwang76/readthedocs.org,sunnyzwh/readthedocs.org,techtonik/readthedocs.org,asampat3090/readthedocs.org,ojii/readthedocs.org,nyergler/pythonslides,wijerasa/readthedocs.org,stevepiercy/readthedocs.org,raven47git/readthedocs.org,rtfd/readthedocs.org,Carreau/readthedocs.org,CedarLogic/readthedocs.org,royalwang/readthedocs.org,soulshake/readthedocs.org,sid-kap/readthedocs.org,titiushko/readthedocs.org,LukasBoersma/readthedocs.org,mrshoki/readthedocs.org,pombredanne/readthedocs.org,gjtorikian/readthedocs.org,KamranMackey/readthedocs.org,cgourlay/readthedocs.org,wijerasa/readthedocs.org,cgourlay/readthedocs.org,Tazer/readthedocs.org
|
Add ability to easily whitelist users.
|
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from core.models import UserProfile
from projects import tasks
from projects.models import Project
class Command(BaseCommand):
def handle(self, *args, **kwargs):
for user in User.objects.filter(profile__whitelisted=False):
print "Whitelisting %s" % user
UserProfile.objects.create(user=user, whitelisted=True)
|
<commit_before><commit_msg>Add ability to easily whitelist users.<commit_after>
|
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from core.models import UserProfile
from projects import tasks
from projects.models import Project
class Command(BaseCommand):
def handle(self, *args, **kwargs):
for user in User.objects.filter(profile__whitelisted=False):
print "Whitelisting %s" % user
UserProfile.objects.create(user=user, whitelisted=True)
|
Add ability to easily whitelist users.from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from core.models import UserProfile
from projects import tasks
from projects.models import Project
class Command(BaseCommand):
def handle(self, *args, **kwargs):
for user in User.objects.filter(profile__whitelisted=False):
print "Whitelisting %s" % user
UserProfile.objects.create(user=user, whitelisted=True)
|
<commit_before><commit_msg>Add ability to easily whitelist users.<commit_after>from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from core.models import UserProfile
from projects import tasks
from projects.models import Project
class Command(BaseCommand):
def handle(self, *args, **kwargs):
for user in User.objects.filter(profile__whitelisted=False):
print "Whitelisting %s" % user
UserProfile.objects.create(user=user, whitelisted=True)
|
|
0fa2f20e7c79ce2ebe88b01847d875e37ff21e62
|
py/tests/scale.py
|
py/tests/scale.py
|
#!/usr/bin/python3
import pykms
import time
import random
card = pykms.Card()
res = pykms.ResourceManager(card)
conn = res.reserve_connector("hdmi")
crtc = res.reserve_crtc(conn)
plane = res.reserve_overlay_plane(crtc)
mode = conn.get_default_mode()
#mode = conn.get_mode(1920, 1080, 60, False)
# Blank framefuffer for primary plane
fb0 = pykms.DumbFramebuffer(card, mode.hdisplay, mode.vdisplay, "AR24");
crtc.set_mode(conn, fb0, mode)
# Initialize framebuffer for the scaled plane
fbX = 1920
fbY = 1080
fb = pykms.DumbFramebuffer(card, fbX, fbY, "RG16");
pykms.draw_test_pattern(fb);
# Plane's scaled size and size increments
W = 72
H = 54
Winc = 1
Hinc = 1
# Plane's position and position increments
X = 0
Y = 0
Xinc = 1
Yinc = 1
while True:
print("+%d+%d %dx%d" % (X, Y, W, H))
crtc.set_plane(plane, fb, X, Y, W, H, 0, 0, fbX, fbY)
W = W + Winc
H = H + Hinc
if (Winc == 1 and W >= mode.hdisplay - X):
Winc = -1
if (Winc == -1 and W <= fbX/32):
Winc = 1
if (Hinc == 1 and H >= mode.vdisplay - Y):
Hinc = -1
if (Hinc == -1 and H <= fbY/32):
Hinc = 1
X = X + Xinc
Y = Y + Yinc
if (Xinc == 1 and X >= mode.hdisplay - W):
Xinc = -1
if (Xinc == -1 and X <= 0):
Xinc = 1
if (Yinc == 1 and Y >= mode.vdisplay - H):
Yinc = -1
if (Yinc == -1 and Y <= 0):
Yinc = 1
|
Add a simple and hackish plane scaling test.
|
Add a simple and hackish plane scaling test.
Signed-off-by: Tomi Valkeinen <e1ca4dbb8be1acaf20734fecd2da10ed1d46a9bb@ti.com>
|
Python
|
mpl-2.0
|
tomba/kmsxx,tomba/kmsxx,tomba/kmsxx,tomba/kmsxx
|
Add a simple and hackish plane scaling test.
Signed-off-by: Tomi Valkeinen <e1ca4dbb8be1acaf20734fecd2da10ed1d46a9bb@ti.com>
|
#!/usr/bin/python3
import pykms
import time
import random
card = pykms.Card()
res = pykms.ResourceManager(card)
conn = res.reserve_connector("hdmi")
crtc = res.reserve_crtc(conn)
plane = res.reserve_overlay_plane(crtc)
mode = conn.get_default_mode()
#mode = conn.get_mode(1920, 1080, 60, False)
# Blank framefuffer for primary plane
fb0 = pykms.DumbFramebuffer(card, mode.hdisplay, mode.vdisplay, "AR24");
crtc.set_mode(conn, fb0, mode)
# Initialize framebuffer for the scaled plane
fbX = 1920
fbY = 1080
fb = pykms.DumbFramebuffer(card, fbX, fbY, "RG16");
pykms.draw_test_pattern(fb);
# Plane's scaled size and size increments
W = 72
H = 54
Winc = 1
Hinc = 1
# Plane's position and position increments
X = 0
Y = 0
Xinc = 1
Yinc = 1
while True:
print("+%d+%d %dx%d" % (X, Y, W, H))
crtc.set_plane(plane, fb, X, Y, W, H, 0, 0, fbX, fbY)
W = W + Winc
H = H + Hinc
if (Winc == 1 and W >= mode.hdisplay - X):
Winc = -1
if (Winc == -1 and W <= fbX/32):
Winc = 1
if (Hinc == 1 and H >= mode.vdisplay - Y):
Hinc = -1
if (Hinc == -1 and H <= fbY/32):
Hinc = 1
X = X + Xinc
Y = Y + Yinc
if (Xinc == 1 and X >= mode.hdisplay - W):
Xinc = -1
if (Xinc == -1 and X <= 0):
Xinc = 1
if (Yinc == 1 and Y >= mode.vdisplay - H):
Yinc = -1
if (Yinc == -1 and Y <= 0):
Yinc = 1
|
<commit_before><commit_msg>Add a simple and hackish plane scaling test.
Signed-off-by: Tomi Valkeinen <e1ca4dbb8be1acaf20734fecd2da10ed1d46a9bb@ti.com><commit_after>
|
#!/usr/bin/python3
import pykms
import time
import random
card = pykms.Card()
res = pykms.ResourceManager(card)
conn = res.reserve_connector("hdmi")
crtc = res.reserve_crtc(conn)
plane = res.reserve_overlay_plane(crtc)
mode = conn.get_default_mode()
#mode = conn.get_mode(1920, 1080, 60, False)
# Blank framefuffer for primary plane
fb0 = pykms.DumbFramebuffer(card, mode.hdisplay, mode.vdisplay, "AR24");
crtc.set_mode(conn, fb0, mode)
# Initialize framebuffer for the scaled plane
fbX = 1920
fbY = 1080
fb = pykms.DumbFramebuffer(card, fbX, fbY, "RG16");
pykms.draw_test_pattern(fb);
# Plane's scaled size and size increments
W = 72
H = 54
Winc = 1
Hinc = 1
# Plane's position and position increments
X = 0
Y = 0
Xinc = 1
Yinc = 1
while True:
print("+%d+%d %dx%d" % (X, Y, W, H))
crtc.set_plane(plane, fb, X, Y, W, H, 0, 0, fbX, fbY)
W = W + Winc
H = H + Hinc
if (Winc == 1 and W >= mode.hdisplay - X):
Winc = -1
if (Winc == -1 and W <= fbX/32):
Winc = 1
if (Hinc == 1 and H >= mode.vdisplay - Y):
Hinc = -1
if (Hinc == -1 and H <= fbY/32):
Hinc = 1
X = X + Xinc
Y = Y + Yinc
if (Xinc == 1 and X >= mode.hdisplay - W):
Xinc = -1
if (Xinc == -1 and X <= 0):
Xinc = 1
if (Yinc == 1 and Y >= mode.vdisplay - H):
Yinc = -1
if (Yinc == -1 and Y <= 0):
Yinc = 1
|
Add a simple and hackish plane scaling test.
Signed-off-by: Tomi Valkeinen <e1ca4dbb8be1acaf20734fecd2da10ed1d46a9bb@ti.com>#!/usr/bin/python3
import pykms
import time
import random
card = pykms.Card()
res = pykms.ResourceManager(card)
conn = res.reserve_connector("hdmi")
crtc = res.reserve_crtc(conn)
plane = res.reserve_overlay_plane(crtc)
mode = conn.get_default_mode()
#mode = conn.get_mode(1920, 1080, 60, False)
# Blank framefuffer for primary plane
fb0 = pykms.DumbFramebuffer(card, mode.hdisplay, mode.vdisplay, "AR24");
crtc.set_mode(conn, fb0, mode)
# Initialize framebuffer for the scaled plane
fbX = 1920
fbY = 1080
fb = pykms.DumbFramebuffer(card, fbX, fbY, "RG16");
pykms.draw_test_pattern(fb);
# Plane's scaled size and size increments
W = 72
H = 54
Winc = 1
Hinc = 1
# Plane's position and position increments
X = 0
Y = 0
Xinc = 1
Yinc = 1
while True:
print("+%d+%d %dx%d" % (X, Y, W, H))
crtc.set_plane(plane, fb, X, Y, W, H, 0, 0, fbX, fbY)
W = W + Winc
H = H + Hinc
if (Winc == 1 and W >= mode.hdisplay - X):
Winc = -1
if (Winc == -1 and W <= fbX/32):
Winc = 1
if (Hinc == 1 and H >= mode.vdisplay - Y):
Hinc = -1
if (Hinc == -1 and H <= fbY/32):
Hinc = 1
X = X + Xinc
Y = Y + Yinc
if (Xinc == 1 and X >= mode.hdisplay - W):
Xinc = -1
if (Xinc == -1 and X <= 0):
Xinc = 1
if (Yinc == 1 and Y >= mode.vdisplay - H):
Yinc = -1
if (Yinc == -1 and Y <= 0):
Yinc = 1
|
<commit_before><commit_msg>Add a simple and hackish plane scaling test.
Signed-off-by: Tomi Valkeinen <e1ca4dbb8be1acaf20734fecd2da10ed1d46a9bb@ti.com><commit_after>#!/usr/bin/python3
import pykms
import time
import random
card = pykms.Card()
res = pykms.ResourceManager(card)
conn = res.reserve_connector("hdmi")
crtc = res.reserve_crtc(conn)
plane = res.reserve_overlay_plane(crtc)
mode = conn.get_default_mode()
#mode = conn.get_mode(1920, 1080, 60, False)
# Blank framefuffer for primary plane
fb0 = pykms.DumbFramebuffer(card, mode.hdisplay, mode.vdisplay, "AR24");
crtc.set_mode(conn, fb0, mode)
# Initialize framebuffer for the scaled plane
fbX = 1920
fbY = 1080
fb = pykms.DumbFramebuffer(card, fbX, fbY, "RG16");
pykms.draw_test_pattern(fb);
# Plane's scaled size and size increments
W = 72
H = 54
Winc = 1
Hinc = 1
# Plane's position and position increments
X = 0
Y = 0
Xinc = 1
Yinc = 1
while True:
print("+%d+%d %dx%d" % (X, Y, W, H))
crtc.set_plane(plane, fb, X, Y, W, H, 0, 0, fbX, fbY)
W = W + Winc
H = H + Hinc
if (Winc == 1 and W >= mode.hdisplay - X):
Winc = -1
if (Winc == -1 and W <= fbX/32):
Winc = 1
if (Hinc == 1 and H >= mode.vdisplay - Y):
Hinc = -1
if (Hinc == -1 and H <= fbY/32):
Hinc = 1
X = X + Xinc
Y = Y + Yinc
if (Xinc == 1 and X >= mode.hdisplay - W):
Xinc = -1
if (Xinc == -1 and X <= 0):
Xinc = 1
if (Yinc == 1 and Y >= mode.vdisplay - H):
Yinc = -1
if (Yinc == -1 and Y <= 0):
Yinc = 1
|
|
333f22e90994170b67ba1cb833d0020208c00efe
|
pombola/hansard/tests/test_sitting_view.py
|
pombola/hansard/tests/test_sitting_view.py
|
from django.contrib.auth.models import User
from django_webtest import WebTest
from pombola.core import models
from ..models import Entry, Sitting
class TestSittingView(WebTest):
fixtures = ['hansard_test_data']
def setUp(self):
self.staffuser = User.objects.create(
username='editor',
is_staff=True)
self.person = models.Person.objects.create(
legal_name="Alfred Smith",
slug='alfred-smith')
self.sitting = Sitting.objects.get(
venue__slug='national_assembly',
start_date='2010-04-11',
start_time='09:30:00')
Entry.objects.create(
sitting=self.sitting,
type='speech',
page_number=1,
text_counter=1,
speaker_name='John Smith',
speaker=self.person,
content='Good morning, everyone')
def test_normal_view(self):
response = self.app.get('/hansard/sitting/national_assembly/2010-04-11-09-30-00')
self.assertIn('Good morning, everyone', response.content)
self.assertIn(
'<strong><a href="/person/alfred-smith/">Alfred Smith</a></strong>',
response.content)
self.assertNotIn('John Smith', response.content)
def test_with_speaker_names_anonymous_user(self):
response = self.app.get(
'/hansard/sitting/national_assembly/2010-04-11-09-30-00?show_original_name=1')
self.assertIn('Good morning, everyone', response.content)
self.assertIn(
'<strong><a href="/person/alfred-smith/">Alfred Smith</a></strong>',
response.content)
self.assertNotIn('John Smith', response.content)
def test_with_speaker_names_user_is_staff(self):
response = self.app.get(
'/hansard/sitting/national_assembly/2010-04-11-09-30-00?show_original_name=1',
user=self.staffuser)
self.assertIn('Good morning, everyone', response.content)
self.assertIn(
'<strong><a href="/person/alfred-smith/">Alfred Smith</a></strong>',
response.content)
self.assertIn('John Smith', response.content)
|
Add some tests for letting staff users see original speaker names
|
Add some tests for letting staff users see original speaker names
|
Python
|
agpl-3.0
|
mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola
|
Add some tests for letting staff users see original speaker names
|
from django.contrib.auth.models import User
from django_webtest import WebTest
from pombola.core import models
from ..models import Entry, Sitting
class TestSittingView(WebTest):
fixtures = ['hansard_test_data']
def setUp(self):
self.staffuser = User.objects.create(
username='editor',
is_staff=True)
self.person = models.Person.objects.create(
legal_name="Alfred Smith",
slug='alfred-smith')
self.sitting = Sitting.objects.get(
venue__slug='national_assembly',
start_date='2010-04-11',
start_time='09:30:00')
Entry.objects.create(
sitting=self.sitting,
type='speech',
page_number=1,
text_counter=1,
speaker_name='John Smith',
speaker=self.person,
content='Good morning, everyone')
def test_normal_view(self):
response = self.app.get('/hansard/sitting/national_assembly/2010-04-11-09-30-00')
self.assertIn('Good morning, everyone', response.content)
self.assertIn(
'<strong><a href="/person/alfred-smith/">Alfred Smith</a></strong>',
response.content)
self.assertNotIn('John Smith', response.content)
def test_with_speaker_names_anonymous_user(self):
response = self.app.get(
'/hansard/sitting/national_assembly/2010-04-11-09-30-00?show_original_name=1')
self.assertIn('Good morning, everyone', response.content)
self.assertIn(
'<strong><a href="/person/alfred-smith/">Alfred Smith</a></strong>',
response.content)
self.assertNotIn('John Smith', response.content)
def test_with_speaker_names_user_is_staff(self):
response = self.app.get(
'/hansard/sitting/national_assembly/2010-04-11-09-30-00?show_original_name=1',
user=self.staffuser)
self.assertIn('Good morning, everyone', response.content)
self.assertIn(
'<strong><a href="/person/alfred-smith/">Alfred Smith</a></strong>',
response.content)
self.assertIn('John Smith', response.content)
|
<commit_before><commit_msg>Add some tests for letting staff users see original speaker names<commit_after>
|
from django.contrib.auth.models import User
from django_webtest import WebTest
from pombola.core import models
from ..models import Entry, Sitting
class TestSittingView(WebTest):
fixtures = ['hansard_test_data']
def setUp(self):
self.staffuser = User.objects.create(
username='editor',
is_staff=True)
self.person = models.Person.objects.create(
legal_name="Alfred Smith",
slug='alfred-smith')
self.sitting = Sitting.objects.get(
venue__slug='national_assembly',
start_date='2010-04-11',
start_time='09:30:00')
Entry.objects.create(
sitting=self.sitting,
type='speech',
page_number=1,
text_counter=1,
speaker_name='John Smith',
speaker=self.person,
content='Good morning, everyone')
def test_normal_view(self):
response = self.app.get('/hansard/sitting/national_assembly/2010-04-11-09-30-00')
self.assertIn('Good morning, everyone', response.content)
self.assertIn(
'<strong><a href="/person/alfred-smith/">Alfred Smith</a></strong>',
response.content)
self.assertNotIn('John Smith', response.content)
def test_with_speaker_names_anonymous_user(self):
response = self.app.get(
'/hansard/sitting/national_assembly/2010-04-11-09-30-00?show_original_name=1')
self.assertIn('Good morning, everyone', response.content)
self.assertIn(
'<strong><a href="/person/alfred-smith/">Alfred Smith</a></strong>',
response.content)
self.assertNotIn('John Smith', response.content)
def test_with_speaker_names_user_is_staff(self):
response = self.app.get(
'/hansard/sitting/national_assembly/2010-04-11-09-30-00?show_original_name=1',
user=self.staffuser)
self.assertIn('Good morning, everyone', response.content)
self.assertIn(
'<strong><a href="/person/alfred-smith/">Alfred Smith</a></strong>',
response.content)
self.assertIn('John Smith', response.content)
|
Add some tests for letting staff users see original speaker namesfrom django.contrib.auth.models import User
from django_webtest import WebTest
from pombola.core import models
from ..models import Entry, Sitting
class TestSittingView(WebTest):
fixtures = ['hansard_test_data']
def setUp(self):
self.staffuser = User.objects.create(
username='editor',
is_staff=True)
self.person = models.Person.objects.create(
legal_name="Alfred Smith",
slug='alfred-smith')
self.sitting = Sitting.objects.get(
venue__slug='national_assembly',
start_date='2010-04-11',
start_time='09:30:00')
Entry.objects.create(
sitting=self.sitting,
type='speech',
page_number=1,
text_counter=1,
speaker_name='John Smith',
speaker=self.person,
content='Good morning, everyone')
def test_normal_view(self):
response = self.app.get('/hansard/sitting/national_assembly/2010-04-11-09-30-00')
self.assertIn('Good morning, everyone', response.content)
self.assertIn(
'<strong><a href="/person/alfred-smith/">Alfred Smith</a></strong>',
response.content)
self.assertNotIn('John Smith', response.content)
def test_with_speaker_names_anonymous_user(self):
response = self.app.get(
'/hansard/sitting/national_assembly/2010-04-11-09-30-00?show_original_name=1')
self.assertIn('Good morning, everyone', response.content)
self.assertIn(
'<strong><a href="/person/alfred-smith/">Alfred Smith</a></strong>',
response.content)
self.assertNotIn('John Smith', response.content)
def test_with_speaker_names_user_is_staff(self):
response = self.app.get(
'/hansard/sitting/national_assembly/2010-04-11-09-30-00?show_original_name=1',
user=self.staffuser)
self.assertIn('Good morning, everyone', response.content)
self.assertIn(
'<strong><a href="/person/alfred-smith/">Alfred Smith</a></strong>',
response.content)
self.assertIn('John Smith', response.content)
|
<commit_before><commit_msg>Add some tests for letting staff users see original speaker names<commit_after>from django.contrib.auth.models import User
from django_webtest import WebTest
from pombola.core import models
from ..models import Entry, Sitting
class TestSittingView(WebTest):
fixtures = ['hansard_test_data']
def setUp(self):
self.staffuser = User.objects.create(
username='editor',
is_staff=True)
self.person = models.Person.objects.create(
legal_name="Alfred Smith",
slug='alfred-smith')
self.sitting = Sitting.objects.get(
venue__slug='national_assembly',
start_date='2010-04-11',
start_time='09:30:00')
Entry.objects.create(
sitting=self.sitting,
type='speech',
page_number=1,
text_counter=1,
speaker_name='John Smith',
speaker=self.person,
content='Good morning, everyone')
def test_normal_view(self):
response = self.app.get('/hansard/sitting/national_assembly/2010-04-11-09-30-00')
self.assertIn('Good morning, everyone', response.content)
self.assertIn(
'<strong><a href="/person/alfred-smith/">Alfred Smith</a></strong>',
response.content)
self.assertNotIn('John Smith', response.content)
def test_with_speaker_names_anonymous_user(self):
response = self.app.get(
'/hansard/sitting/national_assembly/2010-04-11-09-30-00?show_original_name=1')
self.assertIn('Good morning, everyone', response.content)
self.assertIn(
'<strong><a href="/person/alfred-smith/">Alfred Smith</a></strong>',
response.content)
self.assertNotIn('John Smith', response.content)
def test_with_speaker_names_user_is_staff(self):
response = self.app.get(
'/hansard/sitting/national_assembly/2010-04-11-09-30-00?show_original_name=1',
user=self.staffuser)
self.assertIn('Good morning, everyone', response.content)
self.assertIn(
'<strong><a href="/person/alfred-smith/">Alfred Smith</a></strong>',
response.content)
self.assertIn('John Smith', response.content)
|
|
9771450342d65f4a87f502312f53058e91a1438e
|
eventkit/migrations/0003_auto_20150607_2314.py
|
eventkit/migrations/0003_auto_20150607_2314.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import polymorphic_tree.models
class Migration(migrations.Migration):
dependencies = [
('eventkit', '0002_auto_20150605_1418'),
]
operations = [
migrations.AlterField(
model_name='event',
name='is_repeat',
field=models.BooleanField(default=False, editable=False),
preserve_default=True,
),
migrations.AlterField(
model_name='event',
name='parent',
field=polymorphic_tree.models.PolymorphicTreeForeignKey(related_name='children', blank=True, editable=False, to='eventkit.Event', null=True),
preserve_default=True,
),
]
|
Add missing migration for MPTT update.
|
Add missing migration for MPTT update.
|
Python
|
mit
|
ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/icekit-events,ic-labs/icekit-events,ic-labs/icekit-events,ic-labs/django-icekit,ic-labs/django-icekit
|
Add missing migration for MPTT update.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import polymorphic_tree.models
class Migration(migrations.Migration):
dependencies = [
('eventkit', '0002_auto_20150605_1418'),
]
operations = [
migrations.AlterField(
model_name='event',
name='is_repeat',
field=models.BooleanField(default=False, editable=False),
preserve_default=True,
),
migrations.AlterField(
model_name='event',
name='parent',
field=polymorphic_tree.models.PolymorphicTreeForeignKey(related_name='children', blank=True, editable=False, to='eventkit.Event', null=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add missing migration for MPTT update.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import polymorphic_tree.models
class Migration(migrations.Migration):
dependencies = [
('eventkit', '0002_auto_20150605_1418'),
]
operations = [
migrations.AlterField(
model_name='event',
name='is_repeat',
field=models.BooleanField(default=False, editable=False),
preserve_default=True,
),
migrations.AlterField(
model_name='event',
name='parent',
field=polymorphic_tree.models.PolymorphicTreeForeignKey(related_name='children', blank=True, editable=False, to='eventkit.Event', null=True),
preserve_default=True,
),
]
|
Add missing migration for MPTT update.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import polymorphic_tree.models
class Migration(migrations.Migration):
dependencies = [
('eventkit', '0002_auto_20150605_1418'),
]
operations = [
migrations.AlterField(
model_name='event',
name='is_repeat',
field=models.BooleanField(default=False, editable=False),
preserve_default=True,
),
migrations.AlterField(
model_name='event',
name='parent',
field=polymorphic_tree.models.PolymorphicTreeForeignKey(related_name='children', blank=True, editable=False, to='eventkit.Event', null=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add missing migration for MPTT update.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import polymorphic_tree.models
class Migration(migrations.Migration):
dependencies = [
('eventkit', '0002_auto_20150605_1418'),
]
operations = [
migrations.AlterField(
model_name='event',
name='is_repeat',
field=models.BooleanField(default=False, editable=False),
preserve_default=True,
),
migrations.AlterField(
model_name='event',
name='parent',
field=polymorphic_tree.models.PolymorphicTreeForeignKey(related_name='children', blank=True, editable=False, to='eventkit.Event', null=True),
preserve_default=True,
),
]
|
|
68769e2bc5bc7a633ef2bcbd2efdc6f69952858d
|
scripts/find_duplicate_identifiers.py
|
scripts/find_duplicate_identifiers.py
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
This scripts looks for duplicate identifiers in the "Identifiers" table.
The caller passes the name of one of the source ID indexes (e.g. MiroID),
and the script looks at every instance of that source ID. If a source ID
has multiple canonical IDs, it prints information about it to stdout
and exits with code 1. Otherwise exits with code 0.
"""
import collections
import sys
import boto3
def get_records(table, index_name):
"""
Retrieve all the records in a DynamoDB table.
"""
kwargs = {'IndexName': index_name}
while True:
resp = table.scan(**kwargs)
yield from resp['Items']
# DynamoDB results are paginated, with the ``LastEvaluatedKey`` in
# the response defining a parameter to be passed into the next page,
# as the start of the next response. When it's no longer present,
# we're at the end of the table. For more details:
# http://boto3.readthedocs.io/en/latest/reference/services/dynamodb.html#DynamoDB.Table.scan
try:
kwargs['ExclusiveStartKey'] = resp['LastEvaluatedKey']
except KeyError:
break
def build_id_cache(records, index_name):
"""
Given a series of records from DynamoDB, produce a mapping from their
source IDs to their canonical IDs.
"""
id_cache = collections.defaultdict(set)
for r in records:
id_cache[r[index_name]].add(r['CanonicalID'])
return dict(id_cache)
def main():
if len(sys.argv) != 2:
sys.exit(f"Usage: {sys.argv[0]} <index_name>")
index_name = sys.argv[1]
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('Identifiers')
id_cache = build_id_cache(
records=get_records(table, index_name=index_name),
index_name=index_name
)
duplicates = [
(orig_id, canonical_ids)
for orig_id, canonical_ids in sorted(id_cache.items())
if len(canonical_ids) > 1]
if duplicates:
for orig_id, canonical_ids in duplicates:
print(f'{orig_id}\t{canonical_ids}')
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add a script for spotting doubly-minted identifiers
|
Add a script for spotting doubly-minted identifiers
|
Python
|
mit
|
wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api
|
Add a script for spotting doubly-minted identifiers
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
This scripts looks for duplicate identifiers in the "Identifiers" table.
The caller passes the name of one of the source ID indexes (e.g. MiroID),
and the script looks at every instance of that source ID. If a source ID
has multiple canonical IDs, it prints information about it to stdout
and exits with code 1. Otherwise exits with code 0.
"""
import collections
import sys
import boto3
def get_records(table, index_name):
"""
Retrieve all the records in a DynamoDB table.
"""
kwargs = {'IndexName': index_name}
while True:
resp = table.scan(**kwargs)
yield from resp['Items']
# DynamoDB results are paginated, with the ``LastEvaluatedKey`` in
# the response defining a parameter to be passed into the next page,
# as the start of the next response. When it's no longer present,
# we're at the end of the table. For more details:
# http://boto3.readthedocs.io/en/latest/reference/services/dynamodb.html#DynamoDB.Table.scan
try:
kwargs['ExclusiveStartKey'] = resp['LastEvaluatedKey']
except KeyError:
break
def build_id_cache(records, index_name):
"""
Given a series of records from DynamoDB, produce a mapping from their
source IDs to their canonical IDs.
"""
id_cache = collections.defaultdict(set)
for r in records:
id_cache[r[index_name]].add(r['CanonicalID'])
return dict(id_cache)
def main():
if len(sys.argv) != 2:
sys.exit(f"Usage: {sys.argv[0]} <index_name>")
index_name = sys.argv[1]
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('Identifiers')
id_cache = build_id_cache(
records=get_records(table, index_name=index_name),
index_name=index_name
)
duplicates = [
(orig_id, canonical_ids)
for orig_id, canonical_ids in sorted(id_cache.items())
if len(canonical_ids) > 1]
if duplicates:
for orig_id, canonical_ids in duplicates:
print(f'{orig_id}\t{canonical_ids}')
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add a script for spotting doubly-minted identifiers<commit_after>
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
This scripts looks for duplicate identifiers in the "Identifiers" table.
The caller passes the name of one of the source ID indexes (e.g. MiroID),
and the script looks at every instance of that source ID. If a source ID
has multiple canonical IDs, it prints information about it to stdout
and exits with code 1. Otherwise exits with code 0.
"""
import collections
import sys
import boto3
def get_records(table, index_name):
"""
Retrieve all the records in a DynamoDB table.
"""
kwargs = {'IndexName': index_name}
while True:
resp = table.scan(**kwargs)
yield from resp['Items']
# DynamoDB results are paginated, with the ``LastEvaluatedKey`` in
# the response defining a parameter to be passed into the next page,
# as the start of the next response. When it's no longer present,
# we're at the end of the table. For more details:
# http://boto3.readthedocs.io/en/latest/reference/services/dynamodb.html#DynamoDB.Table.scan
try:
kwargs['ExclusiveStartKey'] = resp['LastEvaluatedKey']
except KeyError:
break
def build_id_cache(records, index_name):
"""
Given a series of records from DynamoDB, produce a mapping from their
source IDs to their canonical IDs.
"""
id_cache = collections.defaultdict(set)
for r in records:
id_cache[r[index_name]].add(r['CanonicalID'])
return dict(id_cache)
def main():
if len(sys.argv) != 2:
sys.exit(f"Usage: {sys.argv[0]} <index_name>")
index_name = sys.argv[1]
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('Identifiers')
id_cache = build_id_cache(
records=get_records(table, index_name=index_name),
index_name=index_name
)
duplicates = [
(orig_id, canonical_ids)
for orig_id, canonical_ids in sorted(id_cache.items())
if len(canonical_ids) > 1]
if duplicates:
for orig_id, canonical_ids in duplicates:
print(f'{orig_id}\t{canonical_ids}')
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add a script for spotting doubly-minted identifiers#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
This scripts looks for duplicate identifiers in the "Identifiers" table.
The caller passes the name of one of the source ID indexes (e.g. MiroID),
and the script looks at every instance of that source ID. If a source ID
has multiple canonical IDs, it prints information about it to stdout
and exits with code 1. Otherwise exits with code 0.
"""
import collections
import sys
import boto3
def get_records(table, index_name):
"""
Retrieve all the records in a DynamoDB table.
"""
kwargs = {'IndexName': index_name}
while True:
resp = table.scan(**kwargs)
yield from resp['Items']
# DynamoDB results are paginated, with the ``LastEvaluatedKey`` in
# the response defining a parameter to be passed into the next page,
# as the start of the next response. When it's no longer present,
# we're at the end of the table. For more details:
# http://boto3.readthedocs.io/en/latest/reference/services/dynamodb.html#DynamoDB.Table.scan
try:
kwargs['ExclusiveStartKey'] = resp['LastEvaluatedKey']
except KeyError:
break
def build_id_cache(records, index_name):
"""
Given a series of records from DynamoDB, produce a mapping from their
source IDs to their canonical IDs.
"""
id_cache = collections.defaultdict(set)
for r in records:
id_cache[r[index_name]].add(r['CanonicalID'])
return dict(id_cache)
def main():
if len(sys.argv) != 2:
sys.exit(f"Usage: {sys.argv[0]} <index_name>")
index_name = sys.argv[1]
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('Identifiers')
id_cache = build_id_cache(
records=get_records(table, index_name=index_name),
index_name=index_name
)
duplicates = [
(orig_id, canonical_ids)
for orig_id, canonical_ids in sorted(id_cache.items())
if len(canonical_ids) > 1]
if duplicates:
for orig_id, canonical_ids in duplicates:
print(f'{orig_id}\t{canonical_ids}')
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add a script for spotting doubly-minted identifiers<commit_after>#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
This scripts looks for duplicate identifiers in the "Identifiers" table.
The caller passes the name of one of the source ID indexes (e.g. MiroID),
and the script looks at every instance of that source ID. If a source ID
has multiple canonical IDs, it prints information about it to stdout
and exits with code 1. Otherwise exits with code 0.
"""
import collections
import sys
import boto3
def get_records(table, index_name):
"""
Retrieve all the records in a DynamoDB table.
"""
kwargs = {'IndexName': index_name}
while True:
resp = table.scan(**kwargs)
yield from resp['Items']
# DynamoDB results are paginated, with the ``LastEvaluatedKey`` in
# the response defining a parameter to be passed into the next page,
# as the start of the next response. When it's no longer present,
# we're at the end of the table. For more details:
# http://boto3.readthedocs.io/en/latest/reference/services/dynamodb.html#DynamoDB.Table.scan
try:
kwargs['ExclusiveStartKey'] = resp['LastEvaluatedKey']
except KeyError:
break
def build_id_cache(records, index_name):
"""
Given a series of records from DynamoDB, produce a mapping from their
source IDs to their canonical IDs.
"""
id_cache = collections.defaultdict(set)
for r in records:
id_cache[r[index_name]].add(r['CanonicalID'])
return dict(id_cache)
def main():
if len(sys.argv) != 2:
sys.exit(f"Usage: {sys.argv[0]} <index_name>")
index_name = sys.argv[1]
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('Identifiers')
id_cache = build_id_cache(
records=get_records(table, index_name=index_name),
index_name=index_name
)
duplicates = [
(orig_id, canonical_ids)
for orig_id, canonical_ids in sorted(id_cache.items())
if len(canonical_ids) > 1]
if duplicates:
for orig_id, canonical_ids in duplicates:
print(f'{orig_id}\t{canonical_ids}')
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
d154a2dbdb45d28e12a1b3d67790653bbf4d17dc
|
src/teammails/__init__.py
|
src/teammails/__init__.py
|
from contextlib import contextmanager
import os
import smtplib
import database as db
from email.mime.text import MIMEText
from jinja2 import Template
from database.model import Team
from webapp.cfg import config
@contextmanager
def smtp_session():
session = smtplib.SMTP(config.MAIL_SERVER, config.MAIL_PORT)
if config.MAIL_USERNAME is not None:
session.starttls()
session.login(config.MAIL_USERNAME, config.MAIL_PASSWORD)
try:
yield session
except:
session.quit()
raise
def get_template(name):
filename = "%s.txt" % name
filepath = os.path.join(os.path.dirname(__file__), "templates", filename)
if not os.path.isfile(filename):
raise Exception("File not found!")
with open(filepath, "r") as fn:
return Template(unicode(fn.read(), "utf8"))
def informal_to_teams(template_name, subject, debug=True):
template = get_template(template_name)
sender = "meet&eat Orga <%s>" % config.MAIL_DEFAULT_SENDER
envelope = config.MAIL_DEFAULT_SENDER
print "connect to SMTP...."
with smtp_session() as session:
print "Send Mails..."
i = 0
for team in db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True):
content = template.render(name=team.name)
msg = MIMEText(content, "plain", "utf8")
rcpt = team.email
if debug:
rcpt = config.MAIL_DEFAULT_SENDER
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = rcpt
session.sendmail(envelope, [rcpt] + ["redaktion@exmatrikulationsamt.de"], msg.as_string())
i+=1
print "Mails sent: %d" % i
|
Add code to send informal emails to the teams.
|
Add code to send informal emails to the teams.
|
Python
|
bsd-3-clause
|
eXma/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system
|
Add code to send informal emails to the teams.
|
from contextlib import contextmanager
import os
import smtplib
import database as db
from email.mime.text import MIMEText
from jinja2 import Template
from database.model import Team
from webapp.cfg import config
@contextmanager
def smtp_session():
session = smtplib.SMTP(config.MAIL_SERVER, config.MAIL_PORT)
if config.MAIL_USERNAME is not None:
session.starttls()
session.login(config.MAIL_USERNAME, config.MAIL_PASSWORD)
try:
yield session
except:
session.quit()
raise
def get_template(name):
filename = "%s.txt" % name
filepath = os.path.join(os.path.dirname(__file__), "templates", filename)
if not os.path.isfile(filename):
raise Exception("File not found!")
with open(filepath, "r") as fn:
return Template(unicode(fn.read(), "utf8"))
def informal_to_teams(template_name, subject, debug=True):
template = get_template(template_name)
sender = "meet&eat Orga <%s>" % config.MAIL_DEFAULT_SENDER
envelope = config.MAIL_DEFAULT_SENDER
print "connect to SMTP...."
with smtp_session() as session:
print "Send Mails..."
i = 0
for team in db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True):
content = template.render(name=team.name)
msg = MIMEText(content, "plain", "utf8")
rcpt = team.email
if debug:
rcpt = config.MAIL_DEFAULT_SENDER
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = rcpt
session.sendmail(envelope, [rcpt] + ["redaktion@exmatrikulationsamt.de"], msg.as_string())
i+=1
print "Mails sent: %d" % i
|
<commit_before><commit_msg>Add code to send informal emails to the teams.<commit_after>
|
from contextlib import contextmanager
import os
import smtplib
import database as db
from email.mime.text import MIMEText
from jinja2 import Template
from database.model import Team
from webapp.cfg import config
@contextmanager
def smtp_session():
session = smtplib.SMTP(config.MAIL_SERVER, config.MAIL_PORT)
if config.MAIL_USERNAME is not None:
session.starttls()
session.login(config.MAIL_USERNAME, config.MAIL_PASSWORD)
try:
yield session
except:
session.quit()
raise
def get_template(name):
filename = "%s.txt" % name
filepath = os.path.join(os.path.dirname(__file__), "templates", filename)
if not os.path.isfile(filename):
raise Exception("File not found!")
with open(filepath, "r") as fn:
return Template(unicode(fn.read(), "utf8"))
def informal_to_teams(template_name, subject, debug=True):
template = get_template(template_name)
sender = "meet&eat Orga <%s>" % config.MAIL_DEFAULT_SENDER
envelope = config.MAIL_DEFAULT_SENDER
print "connect to SMTP...."
with smtp_session() as session:
print "Send Mails..."
i = 0
for team in db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True):
content = template.render(name=team.name)
msg = MIMEText(content, "plain", "utf8")
rcpt = team.email
if debug:
rcpt = config.MAIL_DEFAULT_SENDER
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = rcpt
session.sendmail(envelope, [rcpt] + ["redaktion@exmatrikulationsamt.de"], msg.as_string())
i+=1
print "Mails sent: %d" % i
|
Add code to send informal emails to the teams.from contextlib import contextmanager
import os
import smtplib
import database as db
from email.mime.text import MIMEText
from jinja2 import Template
from database.model import Team
from webapp.cfg import config
@contextmanager
def smtp_session():
session = smtplib.SMTP(config.MAIL_SERVER, config.MAIL_PORT)
if config.MAIL_USERNAME is not None:
session.starttls()
session.login(config.MAIL_USERNAME, config.MAIL_PASSWORD)
try:
yield session
except:
session.quit()
raise
def get_template(name):
filename = "%s.txt" % name
filepath = os.path.join(os.path.dirname(__file__), "templates", filename)
if not os.path.isfile(filename):
raise Exception("File not found!")
with open(filepath, "r") as fn:
return Template(unicode(fn.read(), "utf8"))
def informal_to_teams(template_name, subject, debug=True):
template = get_template(template_name)
sender = "meet&eat Orga <%s>" % config.MAIL_DEFAULT_SENDER
envelope = config.MAIL_DEFAULT_SENDER
print "connect to SMTP...."
with smtp_session() as session:
print "Send Mails..."
i = 0
for team in db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True):
content = template.render(name=team.name)
msg = MIMEText(content, "plain", "utf8")
rcpt = team.email
if debug:
rcpt = config.MAIL_DEFAULT_SENDER
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = rcpt
session.sendmail(envelope, [rcpt] + ["redaktion@exmatrikulationsamt.de"], msg.as_string())
i+=1
print "Mails sent: %d" % i
|
<commit_before><commit_msg>Add code to send informal emails to the teams.<commit_after>from contextlib import contextmanager
import os
import smtplib
import database as db
from email.mime.text import MIMEText
from jinja2 import Template
from database.model import Team
from webapp.cfg import config
@contextmanager
def smtp_session():
session = smtplib.SMTP(config.MAIL_SERVER, config.MAIL_PORT)
if config.MAIL_USERNAME is not None:
session.starttls()
session.login(config.MAIL_USERNAME, config.MAIL_PASSWORD)
try:
yield session
except:
session.quit()
raise
def get_template(name):
filename = "%s.txt" % name
filepath = os.path.join(os.path.dirname(__file__), "templates", filename)
if not os.path.isfile(filename):
raise Exception("File not found!")
with open(filepath, "r") as fn:
return Template(unicode(fn.read(), "utf8"))
def informal_to_teams(template_name, subject, debug=True):
template = get_template(template_name)
sender = "meet&eat Orga <%s>" % config.MAIL_DEFAULT_SENDER
envelope = config.MAIL_DEFAULT_SENDER
print "connect to SMTP...."
with smtp_session() as session:
print "Send Mails..."
i = 0
for team in db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True):
content = template.render(name=team.name)
msg = MIMEText(content, "plain", "utf8")
rcpt = team.email
if debug:
rcpt = config.MAIL_DEFAULT_SENDER
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = rcpt
session.sendmail(envelope, [rcpt] + ["redaktion@exmatrikulationsamt.de"], msg.as_string())
i+=1
print "Mails sent: %d" % i
|
|
f8eaf2bb9046d5d6a9dd6a1ad91e7ae3b4a88688
|
tests/api/views/airports_test.py
|
tests/api/views/airports_test.py
|
# coding=utf-8
from tests.data import airports
def test_read_missing(db_session, client):
res = client.get('/airports/1')
assert res.status_code == 404
assert res.json == {
'message': 'Sorry, there is no such record (1) in our database.'
}
def test_read(db_session, client):
airport = airports.test_airport()
db_session.add(airport)
db_session.commit()
res = client.get('/airports/{id}'.format(id=airport.id))
assert res.status_code == 200
print res.json
assert res.json == {
'id': airport.id,
'name': 'Aachen Merzbruck',
'elevation': 189.0,
'short_name': None,
'countryCode': 'DE',
'icao': 'EDKA',
'location': [6.186389, 50.823056],
}
|
Add tests for "GET /airport/:id"
|
tests/api: Add tests for "GET /airport/:id"
|
Python
|
agpl-3.0
|
shadowoneau/skylines,Turbo87/skylines,shadowoneau/skylines,Turbo87/skylines,RBE-Avionik/skylines,Harry-R/skylines,RBE-Avionik/skylines,Harry-R/skylines,Harry-R/skylines,skylines-project/skylines,RBE-Avionik/skylines,shadowoneau/skylines,skylines-project/skylines,Harry-R/skylines,Turbo87/skylines,RBE-Avionik/skylines,skylines-project/skylines,shadowoneau/skylines,skylines-project/skylines,Turbo87/skylines
|
tests/api: Add tests for "GET /airport/:id"
|
# coding=utf-8
from tests.data import airports
def test_read_missing(db_session, client):
res = client.get('/airports/1')
assert res.status_code == 404
assert res.json == {
'message': 'Sorry, there is no such record (1) in our database.'
}
def test_read(db_session, client):
airport = airports.test_airport()
db_session.add(airport)
db_session.commit()
res = client.get('/airports/{id}'.format(id=airport.id))
assert res.status_code == 200
print res.json
assert res.json == {
'id': airport.id,
'name': 'Aachen Merzbruck',
'elevation': 189.0,
'short_name': None,
'countryCode': 'DE',
'icao': 'EDKA',
'location': [6.186389, 50.823056],
}
|
<commit_before><commit_msg>tests/api: Add tests for "GET /airport/:id"<commit_after>
|
# coding=utf-8
from tests.data import airports
def test_read_missing(db_session, client):
res = client.get('/airports/1')
assert res.status_code == 404
assert res.json == {
'message': 'Sorry, there is no such record (1) in our database.'
}
def test_read(db_session, client):
airport = airports.test_airport()
db_session.add(airport)
db_session.commit()
res = client.get('/airports/{id}'.format(id=airport.id))
assert res.status_code == 200
print res.json
assert res.json == {
'id': airport.id,
'name': 'Aachen Merzbruck',
'elevation': 189.0,
'short_name': None,
'countryCode': 'DE',
'icao': 'EDKA',
'location': [6.186389, 50.823056],
}
|
tests/api: Add tests for "GET /airport/:id"# coding=utf-8
from tests.data import airports
def test_read_missing(db_session, client):
res = client.get('/airports/1')
assert res.status_code == 404
assert res.json == {
'message': 'Sorry, there is no such record (1) in our database.'
}
def test_read(db_session, client):
airport = airports.test_airport()
db_session.add(airport)
db_session.commit()
res = client.get('/airports/{id}'.format(id=airport.id))
assert res.status_code == 200
print res.json
assert res.json == {
'id': airport.id,
'name': 'Aachen Merzbruck',
'elevation': 189.0,
'short_name': None,
'countryCode': 'DE',
'icao': 'EDKA',
'location': [6.186389, 50.823056],
}
|
<commit_before><commit_msg>tests/api: Add tests for "GET /airport/:id"<commit_after># coding=utf-8
from tests.data import airports
def test_read_missing(db_session, client):
res = client.get('/airports/1')
assert res.status_code == 404
assert res.json == {
'message': 'Sorry, there is no such record (1) in our database.'
}
def test_read(db_session, client):
airport = airports.test_airport()
db_session.add(airport)
db_session.commit()
res = client.get('/airports/{id}'.format(id=airport.id))
assert res.status_code == 200
print res.json
assert res.json == {
'id': airport.id,
'name': 'Aachen Merzbruck',
'elevation': 189.0,
'short_name': None,
'countryCode': 'DE',
'icao': 'EDKA',
'location': [6.186389, 50.823056],
}
|
|
84f17a5443e1268c62f94f104b0a7d4d6a631f22
|
symcalc.py
|
symcalc.py
|
from sympy.abc import *
from flask import Flask, request
app = Flask(__name__)
@app.route('/code', methods=['GET', 'POST'])
def code():
return str(eval(request.json['code']))
if __name__ == "__main__":
app.run(debug=True, port=80)
|
Add flask server for evaluating expressions
|
Add flask server for evaluating expressions
|
Python
|
mit
|
boppreh/symcalc
|
Add flask server for evaluating expressions
|
from sympy.abc import *
from flask import Flask, request
app = Flask(__name__)
@app.route('/code', methods=['GET', 'POST'])
def code():
return str(eval(request.json['code']))
if __name__ == "__main__":
app.run(debug=True, port=80)
|
<commit_before><commit_msg>Add flask server for evaluating expressions<commit_after>
|
from sympy.abc import *
from flask import Flask, request
app = Flask(__name__)
@app.route('/code', methods=['GET', 'POST'])
def code():
return str(eval(request.json['code']))
if __name__ == "__main__":
app.run(debug=True, port=80)
|
Add flask server for evaluating expressionsfrom sympy.abc import *
from flask import Flask, request
app = Flask(__name__)
@app.route('/code', methods=['GET', 'POST'])
def code():
return str(eval(request.json['code']))
if __name__ == "__main__":
app.run(debug=True, port=80)
|
<commit_before><commit_msg>Add flask server for evaluating expressions<commit_after>from sympy.abc import *
from flask import Flask, request
app = Flask(__name__)
@app.route('/code', methods=['GET', 'POST'])
def code():
return str(eval(request.json['code']))
if __name__ == "__main__":
app.run(debug=True, port=80)
|
|
1b19af261261d16c37885bda634f02f9a3abc027
|
zerver/tests/test_subdomains.py
|
zerver/tests/test_subdomains.py
|
import mock
from typing import Any, List
from django.test import TestCase, override_settings
from zerver.lib.subdomains import get_subdomain
from zerver.models import Realm
class SubdomainsTest(TestCase):
def test_get_subdomain(self):
# type: () -> None
def request_mock(host):
# type: (str) -> Any
request = mock.Mock(spec=['get_host'])
request.attach_mock(mock.Mock(return_value=host), 'get_host')
return request
def test(expected, host, *, root_aliases=[]):
# type: (str, str, List[str]) -> None
with self.settings(EXTERNAL_HOST='example.org',
ROOT_SUBDOMAIN_ALIASES=root_aliases):
self.assertEqual(get_subdomain(request_mock(host)), expected)
self.assertEqual(get_subdomain(request_mock(host + ':443')), expected)
ROOT = Realm.SUBDOMAIN_FOR_ROOT_DOMAIN
test(ROOT, 'example.org')
test('foo', 'foo.example.org')
test(ROOT, 'www.example.org', root_aliases=['www'])
test(ROOT, 'arbitrary.com')
|
Write some tests for get_subdomain.
|
subdomains: Write some tests for get_subdomain.
This logic is a bit subtle, and we're about to make changes to it.
So let's have some tests.
|
Python
|
apache-2.0
|
mahim97/zulip,kou/zulip,shubhamdhama/zulip,showell/zulip,punchagan/zulip,dhcrzf/zulip,brainwane/zulip,rht/zulip,tommyip/zulip,brockwhittaker/zulip,kou/zulip,eeshangarg/zulip,rishig/zulip,rishig/zulip,dhcrzf/zulip,eeshangarg/zulip,eeshangarg/zulip,timabbott/zulip,rishig/zulip,rishig/zulip,brockwhittaker/zulip,zulip/zulip,rht/zulip,brockwhittaker/zulip,mahim97/zulip,rishig/zulip,Galexrt/zulip,timabbott/zulip,punchagan/zulip,brockwhittaker/zulip,brainwane/zulip,dhcrzf/zulip,timabbott/zulip,dhcrzf/zulip,kou/zulip,showell/zulip,showell/zulip,dhcrzf/zulip,tommyip/zulip,dhcrzf/zulip,brainwane/zulip,andersk/zulip,tommyip/zulip,brockwhittaker/zulip,andersk/zulip,rht/zulip,andersk/zulip,showell/zulip,Galexrt/zulip,rishig/zulip,synicalsyntax/zulip,Galexrt/zulip,shubhamdhama/zulip,kou/zulip,jackrzhang/zulip,shubhamdhama/zulip,showell/zulip,punchagan/zulip,zulip/zulip,mahim97/zulip,rht/zulip,hackerkid/zulip,mahim97/zulip,tommyip/zulip,brainwane/zulip,brainwane/zulip,hackerkid/zulip,synicalsyntax/zulip,jackrzhang/zulip,punchagan/zulip,kou/zulip,timabbott/zulip,punchagan/zulip,eeshangarg/zulip,eeshangarg/zulip,synicalsyntax/zulip,zulip/zulip,kou/zulip,synicalsyntax/zulip,hackerkid/zulip,zulip/zulip,shubhamdhama/zulip,jackrzhang/zulip,kou/zulip,timabbott/zulip,showell/zulip,eeshangarg/zulip,shubhamdhama/zulip,mahim97/zulip,Galexrt/zulip,punchagan/zulip,Galexrt/zulip,dhcrzf/zulip,hackerkid/zulip,shubhamdhama/zulip,tommyip/zulip,Galexrt/zulip,jackrzhang/zulip,zulip/zulip,showell/zulip,zulip/zulip,hackerkid/zulip,jackrzhang/zulip,jackrzhang/zulip,synicalsyntax/zulip,rishig/zulip,timabbott/zulip,synicalsyntax/zulip,zulip/zulip,tommyip/zulip,timabbott/zulip,punchagan/zulip,rht/zulip,synicalsyntax/zulip,brainwane/zulip,hackerkid/zulip,mahim97/zulip,hackerkid/zulip,tommyip/zulip,andersk/zulip,rht/zulip,rht/zulip,brockwhittaker/zulip,andersk/zulip,Galexrt/zulip,eeshangarg/zulip,andersk/zulip,brainwane/zulip,shubhamdhama/zulip,jackrzhang/zulip,andersk/zulip
|
subdomains: Write some tests for get_subdomain.
This logic is a bit subtle, and we're about to make changes to it.
So let's have some tests.
|
import mock
from typing import Any, List
from django.test import TestCase, override_settings
from zerver.lib.subdomains import get_subdomain
from zerver.models import Realm
class SubdomainsTest(TestCase):
def test_get_subdomain(self):
# type: () -> None
def request_mock(host):
# type: (str) -> Any
request = mock.Mock(spec=['get_host'])
request.attach_mock(mock.Mock(return_value=host), 'get_host')
return request
def test(expected, host, *, root_aliases=[]):
# type: (str, str, List[str]) -> None
with self.settings(EXTERNAL_HOST='example.org',
ROOT_SUBDOMAIN_ALIASES=root_aliases):
self.assertEqual(get_subdomain(request_mock(host)), expected)
self.assertEqual(get_subdomain(request_mock(host + ':443')), expected)
ROOT = Realm.SUBDOMAIN_FOR_ROOT_DOMAIN
test(ROOT, 'example.org')
test('foo', 'foo.example.org')
test(ROOT, 'www.example.org', root_aliases=['www'])
test(ROOT, 'arbitrary.com')
|
<commit_before><commit_msg>subdomains: Write some tests for get_subdomain.
This logic is a bit subtle, and we're about to make changes to it.
So let's have some tests.<commit_after>
|
import mock
from typing import Any, List
from django.test import TestCase, override_settings
from zerver.lib.subdomains import get_subdomain
from zerver.models import Realm
class SubdomainsTest(TestCase):
def test_get_subdomain(self):
# type: () -> None
def request_mock(host):
# type: (str) -> Any
request = mock.Mock(spec=['get_host'])
request.attach_mock(mock.Mock(return_value=host), 'get_host')
return request
def test(expected, host, *, root_aliases=[]):
# type: (str, str, List[str]) -> None
with self.settings(EXTERNAL_HOST='example.org',
ROOT_SUBDOMAIN_ALIASES=root_aliases):
self.assertEqual(get_subdomain(request_mock(host)), expected)
self.assertEqual(get_subdomain(request_mock(host + ':443')), expected)
ROOT = Realm.SUBDOMAIN_FOR_ROOT_DOMAIN
test(ROOT, 'example.org')
test('foo', 'foo.example.org')
test(ROOT, 'www.example.org', root_aliases=['www'])
test(ROOT, 'arbitrary.com')
|
subdomains: Write some tests for get_subdomain.
This logic is a bit subtle, and we're about to make changes to it.
So let's have some tests.
import mock
from typing import Any, List
from django.test import TestCase, override_settings
from zerver.lib.subdomains import get_subdomain
from zerver.models import Realm
class SubdomainsTest(TestCase):
def test_get_subdomain(self):
# type: () -> None
def request_mock(host):
# type: (str) -> Any
request = mock.Mock(spec=['get_host'])
request.attach_mock(mock.Mock(return_value=host), 'get_host')
return request
def test(expected, host, *, root_aliases=[]):
# type: (str, str, List[str]) -> None
with self.settings(EXTERNAL_HOST='example.org',
ROOT_SUBDOMAIN_ALIASES=root_aliases):
self.assertEqual(get_subdomain(request_mock(host)), expected)
self.assertEqual(get_subdomain(request_mock(host + ':443')), expected)
ROOT = Realm.SUBDOMAIN_FOR_ROOT_DOMAIN
test(ROOT, 'example.org')
test('foo', 'foo.example.org')
test(ROOT, 'www.example.org', root_aliases=['www'])
test(ROOT, 'arbitrary.com')
|
<commit_before><commit_msg>subdomains: Write some tests for get_subdomain.
This logic is a bit subtle, and we're about to make changes to it.
So let's have some tests.<commit_after>
import mock
from typing import Any, List
from django.test import TestCase, override_settings
from zerver.lib.subdomains import get_subdomain
from zerver.models import Realm
class SubdomainsTest(TestCase):
def test_get_subdomain(self):
# type: () -> None
def request_mock(host):
# type: (str) -> Any
request = mock.Mock(spec=['get_host'])
request.attach_mock(mock.Mock(return_value=host), 'get_host')
return request
def test(expected, host, *, root_aliases=[]):
# type: (str, str, List[str]) -> None
with self.settings(EXTERNAL_HOST='example.org',
ROOT_SUBDOMAIN_ALIASES=root_aliases):
self.assertEqual(get_subdomain(request_mock(host)), expected)
self.assertEqual(get_subdomain(request_mock(host + ':443')), expected)
ROOT = Realm.SUBDOMAIN_FOR_ROOT_DOMAIN
test(ROOT, 'example.org')
test('foo', 'foo.example.org')
test(ROOT, 'www.example.org', root_aliases=['www'])
test(ROOT, 'arbitrary.com')
|
|
4fb5d4895ba955e30cb7908921ef7036c904d646
|
wrapper.py
|
wrapper.py
|
from subprocess import check_output
import sys
def list_accounts():
stdout = check_output(['aqhbci-tool4', 'listaccounts'])
account_list = []
for account_row in stdout.decode(sys.stdout.encoding).split('\n'):
account_row = account_row.split()
if len(account_row):
account_list.append({'account_name': account_row[1][:-1],
'bank': account_row[3],
'account_number': account_row[6]})
return account_list
|
Add first attempt to pull information out of aqhbci
|
Add first attempt to pull information out of aqhbci
|
Python
|
apache-2.0
|
hedderich/aqbanking-cli-wrapper
|
Add first attempt to pull information out of aqhbci
|
from subprocess import check_output
import sys
def list_accounts():
stdout = check_output(['aqhbci-tool4', 'listaccounts'])
account_list = []
for account_row in stdout.decode(sys.stdout.encoding).split('\n'):
account_row = account_row.split()
if len(account_row):
account_list.append({'account_name': account_row[1][:-1],
'bank': account_row[3],
'account_number': account_row[6]})
return account_list
|
<commit_before><commit_msg>Add first attempt to pull information out of aqhbci<commit_after>
|
from subprocess import check_output
import sys
def list_accounts():
stdout = check_output(['aqhbci-tool4', 'listaccounts'])
account_list = []
for account_row in stdout.decode(sys.stdout.encoding).split('\n'):
account_row = account_row.split()
if len(account_row):
account_list.append({'account_name': account_row[1][:-1],
'bank': account_row[3],
'account_number': account_row[6]})
return account_list
|
Add first attempt to pull information out of aqhbcifrom subprocess import check_output
import sys
def list_accounts():
stdout = check_output(['aqhbci-tool4', 'listaccounts'])
account_list = []
for account_row in stdout.decode(sys.stdout.encoding).split('\n'):
account_row = account_row.split()
if len(account_row):
account_list.append({'account_name': account_row[1][:-1],
'bank': account_row[3],
'account_number': account_row[6]})
return account_list
|
<commit_before><commit_msg>Add first attempt to pull information out of aqhbci<commit_after>from subprocess import check_output
import sys
def list_accounts():
stdout = check_output(['aqhbci-tool4', 'listaccounts'])
account_list = []
for account_row in stdout.decode(sys.stdout.encoding).split('\n'):
account_row = account_row.split()
if len(account_row):
account_list.append({'account_name': account_row[1][:-1],
'bank': account_row[3],
'account_number': account_row[6]})
return account_list
|
|
ff85bc7013eeae9c092a855787d454cf39bdcf98
|
mysite/fix_duplicate_citations.py
|
mysite/fix_duplicate_citations.py
|
for citation in Citation.objects.all().order_by('pk'):
citation.ignored_due_to_duplicate = False
citation.save()
citation.save_and_check_for_duplicates()
|
Add script for fixing duplicate citations.
|
Add script for fixing duplicate citations.
|
Python
|
agpl-3.0
|
waseem18/oh-mainline,mzdaniel/oh-mainline,nirmeshk/oh-mainline,sudheesh001/oh-mainline,willingc/oh-mainline,onceuponatimeforever/oh-mainline,heeraj123/oh-mainline,mzdaniel/oh-mainline,heeraj123/oh-mainline,heeraj123/oh-mainline,ehashman/oh-mainline,mzdaniel/oh-mainline,eeshangarg/oh-mainline,campbe13/openhatch,SnappleCap/oh-mainline,jledbetter/openhatch,moijes12/oh-mainline,ojengwa/oh-mainline,campbe13/openhatch,eeshangarg/oh-mainline,ehashman/oh-mainline,mzdaniel/oh-mainline,ojengwa/oh-mainline,mzdaniel/oh-mainline,Changaco/oh-mainline,eeshangarg/oh-mainline,SnappleCap/oh-mainline,vipul-sharma20/oh-mainline,onceuponatimeforever/oh-mainline,sudheesh001/oh-mainline,nirmeshk/oh-mainline,vipul-sharma20/oh-mainline,openhatch/oh-mainline,ojengwa/oh-mainline,heeraj123/oh-mainline,ehashman/oh-mainline,ojengwa/oh-mainline,SnappleCap/oh-mainline,sudheesh001/oh-mainline,sudheesh001/oh-mainline,moijes12/oh-mainline,nirmeshk/oh-mainline,vipul-sharma20/oh-mainline,waseem18/oh-mainline,moijes12/oh-mainline,onceuponatimeforever/oh-mainline,waseem18/oh-mainline,Changaco/oh-mainline,openhatch/oh-mainline,openhatch/oh-mainline,campbe13/openhatch,nirmeshk/oh-mainline,heeraj123/oh-mainline,nirmeshk/oh-mainline,Changaco/oh-mainline,willingc/oh-mainline,jledbetter/openhatch,moijes12/oh-mainline,campbe13/openhatch,SnappleCap/oh-mainline,ehashman/oh-mainline,willingc/oh-mainline,waseem18/oh-mainline,jledbetter/openhatch,vipul-sharma20/oh-mainline,onceuponatimeforever/oh-mainline,sudheesh001/oh-mainline,ojengwa/oh-mainline,onceuponatimeforever/oh-mainline,Changaco/oh-mainline,ehashman/oh-mainline,willingc/oh-mainline,mzdaniel/oh-mainline,campbe13/openhatch,jledbetter/openhatch,mzdaniel/oh-mainline,openhatch/oh-mainline,eeshangarg/oh-mainline,vipul-sharma20/oh-mainline,willingc/oh-mainline,SnappleCap/oh-mainline,openhatch/oh-mainline,eeshangarg/oh-mainline,moijes12/oh-mainline,waseem18/oh-mainline,Changaco/oh-mainline,jledbetter/openhatch
|
Add script for fixing duplicate citations.
|
for citation in Citation.objects.all().order_by('pk'):
citation.ignored_due_to_duplicate = False
citation.save()
citation.save_and_check_for_duplicates()
|
<commit_before><commit_msg>Add script for fixing duplicate citations.<commit_after>
|
for citation in Citation.objects.all().order_by('pk'):
citation.ignored_due_to_duplicate = False
citation.save()
citation.save_and_check_for_duplicates()
|
Add script for fixing duplicate citations.for citation in Citation.objects.all().order_by('pk'):
citation.ignored_due_to_duplicate = False
citation.save()
citation.save_and_check_for_duplicates()
|
<commit_before><commit_msg>Add script for fixing duplicate citations.<commit_after>for citation in Citation.objects.all().order_by('pk'):
citation.ignored_due_to_duplicate = False
citation.save()
citation.save_and_check_for_duplicates()
|
|
e6a90e5926d589e405b3a575de442e2f399e81cf
|
03_task/sample_test.py
|
03_task/sample_test.py
|
import datetime
import unittest
import solution
class TestSocialGraph(unittest.TestCase):
def setUp(self):
self.terry = solution.User("Terry Gilliam")
self.eric = solution.User("Eric Idle")
self.graham = solution.User("Graham Chapman")
self.john = solution.User("John Cleese")
self.michael = solution.User("Michael Palin")
self.graph = solution.SocialGraph()
self.graph.add_user(self.terry)
self.graph.add_user(self.eric)
self.graph.add_user(self.graham)
self.graph.add_user(self.john)
self.graph.add_user(self.michael)
def test_add_get_and_delete_user(self):
with self.assertRaises(solution.UserAlreadyExistsError):
self.graph.add_user(self.terry)
self.graph.delete_user(self.terry.uuid)
self.graph.add_user(self.terry)
self.assertEqual(self.graph.get_user(self.terry.uuid), self.terry)
def test_following(self):
self.graph.follow(self.terry.uuid, self.eric.uuid)
self.assertTrue(
self.graph.is_following(self.terry.uuid, self.eric.uuid))
self.assertFalse(
self.graph.is_following(self.eric.uuid, self.terry.uuid))
def test_friends(self):
self.graph.follow(self.terry.uuid, self.eric.uuid)
self.assertNotIn(self.eric.uuid, self.graph.friends(self.terry.uuid))
self.assertNotIn(self.terry.uuid, self.graph.friends(self.eric.uuid))
self.graph.follow(self.eric.uuid, self.terry.uuid)
self.assertIn(self.eric.uuid, self.graph.friends(self.terry.uuid))
self.assertIn(self.terry.uuid, self.graph.friends(self.eric.uuid))
class TestUser(unittest.TestCase):
def setUp(self):
self.michael = solution.User("Michael Palin")
def test_has_uuid(self):
self.assertIsNotNone(getattr(self.michael, 'uuid'))
def test_add_post(self):
self.michael.add_post("larodi")
post = next(self.michael.get_post())
self.assertEqual(post.author, self.michael.uuid)
self.assertEqual(post.content, "larodi")
self.assertTrue(isinstance(post.published_at, datetime.datetime))
if __name__ == '__main__':
unittest.main()
|
Add 03-task sample test file.
|
Add 03-task sample test file.
|
Python
|
mit
|
pepincho/Python-Course-FMI
|
Add 03-task sample test file.
|
import datetime
import unittest
import solution
class TestSocialGraph(unittest.TestCase):
def setUp(self):
self.terry = solution.User("Terry Gilliam")
self.eric = solution.User("Eric Idle")
self.graham = solution.User("Graham Chapman")
self.john = solution.User("John Cleese")
self.michael = solution.User("Michael Palin")
self.graph = solution.SocialGraph()
self.graph.add_user(self.terry)
self.graph.add_user(self.eric)
self.graph.add_user(self.graham)
self.graph.add_user(self.john)
self.graph.add_user(self.michael)
def test_add_get_and_delete_user(self):
with self.assertRaises(solution.UserAlreadyExistsError):
self.graph.add_user(self.terry)
self.graph.delete_user(self.terry.uuid)
self.graph.add_user(self.terry)
self.assertEqual(self.graph.get_user(self.terry.uuid), self.terry)
def test_following(self):
self.graph.follow(self.terry.uuid, self.eric.uuid)
self.assertTrue(
self.graph.is_following(self.terry.uuid, self.eric.uuid))
self.assertFalse(
self.graph.is_following(self.eric.uuid, self.terry.uuid))
def test_friends(self):
self.graph.follow(self.terry.uuid, self.eric.uuid)
self.assertNotIn(self.eric.uuid, self.graph.friends(self.terry.uuid))
self.assertNotIn(self.terry.uuid, self.graph.friends(self.eric.uuid))
self.graph.follow(self.eric.uuid, self.terry.uuid)
self.assertIn(self.eric.uuid, self.graph.friends(self.terry.uuid))
self.assertIn(self.terry.uuid, self.graph.friends(self.eric.uuid))
class TestUser(unittest.TestCase):
def setUp(self):
self.michael = solution.User("Michael Palin")
def test_has_uuid(self):
self.assertIsNotNone(getattr(self.michael, 'uuid'))
def test_add_post(self):
self.michael.add_post("larodi")
post = next(self.michael.get_post())
self.assertEqual(post.author, self.michael.uuid)
self.assertEqual(post.content, "larodi")
self.assertTrue(isinstance(post.published_at, datetime.datetime))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add 03-task sample test file.<commit_after>
|
import datetime
import unittest
import solution
class TestSocialGraph(unittest.TestCase):
def setUp(self):
self.terry = solution.User("Terry Gilliam")
self.eric = solution.User("Eric Idle")
self.graham = solution.User("Graham Chapman")
self.john = solution.User("John Cleese")
self.michael = solution.User("Michael Palin")
self.graph = solution.SocialGraph()
self.graph.add_user(self.terry)
self.graph.add_user(self.eric)
self.graph.add_user(self.graham)
self.graph.add_user(self.john)
self.graph.add_user(self.michael)
def test_add_get_and_delete_user(self):
with self.assertRaises(solution.UserAlreadyExistsError):
self.graph.add_user(self.terry)
self.graph.delete_user(self.terry.uuid)
self.graph.add_user(self.terry)
self.assertEqual(self.graph.get_user(self.terry.uuid), self.terry)
def test_following(self):
self.graph.follow(self.terry.uuid, self.eric.uuid)
self.assertTrue(
self.graph.is_following(self.terry.uuid, self.eric.uuid))
self.assertFalse(
self.graph.is_following(self.eric.uuid, self.terry.uuid))
def test_friends(self):
self.graph.follow(self.terry.uuid, self.eric.uuid)
self.assertNotIn(self.eric.uuid, self.graph.friends(self.terry.uuid))
self.assertNotIn(self.terry.uuid, self.graph.friends(self.eric.uuid))
self.graph.follow(self.eric.uuid, self.terry.uuid)
self.assertIn(self.eric.uuid, self.graph.friends(self.terry.uuid))
self.assertIn(self.terry.uuid, self.graph.friends(self.eric.uuid))
class TestUser(unittest.TestCase):
def setUp(self):
self.michael = solution.User("Michael Palin")
def test_has_uuid(self):
self.assertIsNotNone(getattr(self.michael, 'uuid'))
def test_add_post(self):
self.michael.add_post("larodi")
post = next(self.michael.get_post())
self.assertEqual(post.author, self.michael.uuid)
self.assertEqual(post.content, "larodi")
self.assertTrue(isinstance(post.published_at, datetime.datetime))
if __name__ == '__main__':
unittest.main()
|
Add 03-task sample test file.import datetime
import unittest
import solution
class TestSocialGraph(unittest.TestCase):
def setUp(self):
self.terry = solution.User("Terry Gilliam")
self.eric = solution.User("Eric Idle")
self.graham = solution.User("Graham Chapman")
self.john = solution.User("John Cleese")
self.michael = solution.User("Michael Palin")
self.graph = solution.SocialGraph()
self.graph.add_user(self.terry)
self.graph.add_user(self.eric)
self.graph.add_user(self.graham)
self.graph.add_user(self.john)
self.graph.add_user(self.michael)
def test_add_get_and_delete_user(self):
with self.assertRaises(solution.UserAlreadyExistsError):
self.graph.add_user(self.terry)
self.graph.delete_user(self.terry.uuid)
self.graph.add_user(self.terry)
self.assertEqual(self.graph.get_user(self.terry.uuid), self.terry)
def test_following(self):
self.graph.follow(self.terry.uuid, self.eric.uuid)
self.assertTrue(
self.graph.is_following(self.terry.uuid, self.eric.uuid))
self.assertFalse(
self.graph.is_following(self.eric.uuid, self.terry.uuid))
def test_friends(self):
self.graph.follow(self.terry.uuid, self.eric.uuid)
self.assertNotIn(self.eric.uuid, self.graph.friends(self.terry.uuid))
self.assertNotIn(self.terry.uuid, self.graph.friends(self.eric.uuid))
self.graph.follow(self.eric.uuid, self.terry.uuid)
self.assertIn(self.eric.uuid, self.graph.friends(self.terry.uuid))
self.assertIn(self.terry.uuid, self.graph.friends(self.eric.uuid))
class TestUser(unittest.TestCase):
def setUp(self):
self.michael = solution.User("Michael Palin")
def test_has_uuid(self):
self.assertIsNotNone(getattr(self.michael, 'uuid'))
def test_add_post(self):
self.michael.add_post("larodi")
post = next(self.michael.get_post())
self.assertEqual(post.author, self.michael.uuid)
self.assertEqual(post.content, "larodi")
self.assertTrue(isinstance(post.published_at, datetime.datetime))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add 03-task sample test file.<commit_after>import datetime
import unittest
import solution
class TestSocialGraph(unittest.TestCase):
def setUp(self):
self.terry = solution.User("Terry Gilliam")
self.eric = solution.User("Eric Idle")
self.graham = solution.User("Graham Chapman")
self.john = solution.User("John Cleese")
self.michael = solution.User("Michael Palin")
self.graph = solution.SocialGraph()
self.graph.add_user(self.terry)
self.graph.add_user(self.eric)
self.graph.add_user(self.graham)
self.graph.add_user(self.john)
self.graph.add_user(self.michael)
def test_add_get_and_delete_user(self):
with self.assertRaises(solution.UserAlreadyExistsError):
self.graph.add_user(self.terry)
self.graph.delete_user(self.terry.uuid)
self.graph.add_user(self.terry)
self.assertEqual(self.graph.get_user(self.terry.uuid), self.terry)
def test_following(self):
self.graph.follow(self.terry.uuid, self.eric.uuid)
self.assertTrue(
self.graph.is_following(self.terry.uuid, self.eric.uuid))
self.assertFalse(
self.graph.is_following(self.eric.uuid, self.terry.uuid))
def test_friends(self):
self.graph.follow(self.terry.uuid, self.eric.uuid)
self.assertNotIn(self.eric.uuid, self.graph.friends(self.terry.uuid))
self.assertNotIn(self.terry.uuid, self.graph.friends(self.eric.uuid))
self.graph.follow(self.eric.uuid, self.terry.uuid)
self.assertIn(self.eric.uuid, self.graph.friends(self.terry.uuid))
self.assertIn(self.terry.uuid, self.graph.friends(self.eric.uuid))
class TestUser(unittest.TestCase):
def setUp(self):
self.michael = solution.User("Michael Palin")
def test_has_uuid(self):
self.assertIsNotNone(getattr(self.michael, 'uuid'))
def test_add_post(self):
self.michael.add_post("larodi")
post = next(self.michael.get_post())
self.assertEqual(post.author, self.michael.uuid)
self.assertEqual(post.content, "larodi")
self.assertTrue(isinstance(post.published_at, datetime.datetime))
if __name__ == '__main__':
unittest.main()
|
|
47961252b369d1fd946020816f46a54b6ee00f84
|
antxetamedia/news/migrations/0011_auto_20150915_0327.py
|
antxetamedia/news/migrations/0011_auto_20150915_0327.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('news', '0010_auto_20150807_0733'),
]
operations = [
migrations.AlterModelOptions(
name='newspodcast',
options={'verbose_name_plural': 'Berriak', 'verbose_name': 'Berria', 'ordering': ['-pub_date']},
),
migrations.AlterModelOptions(
name='newsshow',
options={'verbose_name_plural': 'Berrien ekoizleak', 'verbose_name': 'Berrien ekoizlea'},
),
]
|
Change verbose name for some news models fields
|
Change verbose name for some news models fields
|
Python
|
agpl-3.0
|
GISAElkartea/amv2,GISAElkartea/amv2,GISAElkartea/amv2
|
Change verbose name for some news models fields
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('news', '0010_auto_20150807_0733'),
]
operations = [
migrations.AlterModelOptions(
name='newspodcast',
options={'verbose_name_plural': 'Berriak', 'verbose_name': 'Berria', 'ordering': ['-pub_date']},
),
migrations.AlterModelOptions(
name='newsshow',
options={'verbose_name_plural': 'Berrien ekoizleak', 'verbose_name': 'Berrien ekoizlea'},
),
]
|
<commit_before><commit_msg>Change verbose name for some news models fields<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('news', '0010_auto_20150807_0733'),
]
operations = [
migrations.AlterModelOptions(
name='newspodcast',
options={'verbose_name_plural': 'Berriak', 'verbose_name': 'Berria', 'ordering': ['-pub_date']},
),
migrations.AlterModelOptions(
name='newsshow',
options={'verbose_name_plural': 'Berrien ekoizleak', 'verbose_name': 'Berrien ekoizlea'},
),
]
|
Change verbose name for some news models fields# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('news', '0010_auto_20150807_0733'),
]
operations = [
migrations.AlterModelOptions(
name='newspodcast',
options={'verbose_name_plural': 'Berriak', 'verbose_name': 'Berria', 'ordering': ['-pub_date']},
),
migrations.AlterModelOptions(
name='newsshow',
options={'verbose_name_plural': 'Berrien ekoizleak', 'verbose_name': 'Berrien ekoizlea'},
),
]
|
<commit_before><commit_msg>Change verbose name for some news models fields<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('news', '0010_auto_20150807_0733'),
]
operations = [
migrations.AlterModelOptions(
name='newspodcast',
options={'verbose_name_plural': 'Berriak', 'verbose_name': 'Berria', 'ordering': ['-pub_date']},
),
migrations.AlterModelOptions(
name='newsshow',
options={'verbose_name_plural': 'Berrien ekoizleak', 'verbose_name': 'Berrien ekoizlea'},
),
]
|
|
02832aee17b4cae8dc49a035bc8c6d11a69dd7ac
|
ws-tests/test_valid_study_put_override_author.py
|
ws-tests/test_valid_study_put_override_author.py
|
#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import datetime
import codecs
import json
import sys
import os
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/v1/study/1003'
inpf = codecs.open('../nexson-validator/tests/single/input/1003.json', 'rU', encoding='utf-8')
n = json.load(inpf)
# refresh a timestamp so that the test generates a commit
m = n['nexml']['meta']
short_list = [i for i in m if i.get('@property') == 'bogus_timestamp']
if short_list:
el = short_list[0]
else:
el = {'@property': 'bogus_timestamp', '@xsi:type': 'nex:LiteralMeta'}
m.append(el)
el['$'] = datetime.datetime.utcnow().isoformat()
data = { 'nexson' : n,
'auth_token': os.environ.get('GITHUB_OAUTH_TOKEN', 'bogus_token'),
'author_name': "Some Dude",
'author_email': "dude@dude.com",
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=200):
sys.exit(0)
sys.exit(1)
|
Add a test for over-riding author info
|
Add a test for over-riding author info
|
Python
|
bsd-2-clause
|
leto/new_opentree_api,leto/new_opentree_api
|
Add a test for over-riding author info
|
#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import datetime
import codecs
import json
import sys
import os
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/v1/study/1003'
inpf = codecs.open('../nexson-validator/tests/single/input/1003.json', 'rU', encoding='utf-8')
n = json.load(inpf)
# refresh a timestamp so that the test generates a commit
m = n['nexml']['meta']
short_list = [i for i in m if i.get('@property') == 'bogus_timestamp']
if short_list:
el = short_list[0]
else:
el = {'@property': 'bogus_timestamp', '@xsi:type': 'nex:LiteralMeta'}
m.append(el)
el['$'] = datetime.datetime.utcnow().isoformat()
data = { 'nexson' : n,
'auth_token': os.environ.get('GITHUB_OAUTH_TOKEN', 'bogus_token'),
'author_name': "Some Dude",
'author_email': "dude@dude.com",
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=200):
sys.exit(0)
sys.exit(1)
|
<commit_before><commit_msg>Add a test for over-riding author info<commit_after>
|
#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import datetime
import codecs
import json
import sys
import os
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/v1/study/1003'
inpf = codecs.open('../nexson-validator/tests/single/input/1003.json', 'rU', encoding='utf-8')
n = json.load(inpf)
# refresh a timestamp so that the test generates a commit
m = n['nexml']['meta']
short_list = [i for i in m if i.get('@property') == 'bogus_timestamp']
if short_list:
el = short_list[0]
else:
el = {'@property': 'bogus_timestamp', '@xsi:type': 'nex:LiteralMeta'}
m.append(el)
el['$'] = datetime.datetime.utcnow().isoformat()
data = { 'nexson' : n,
'auth_token': os.environ.get('GITHUB_OAUTH_TOKEN', 'bogus_token'),
'author_name': "Some Dude",
'author_email': "dude@dude.com",
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=200):
sys.exit(0)
sys.exit(1)
|
Add a test for over-riding author info#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import datetime
import codecs
import json
import sys
import os
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/v1/study/1003'
inpf = codecs.open('../nexson-validator/tests/single/input/1003.json', 'rU', encoding='utf-8')
n = json.load(inpf)
# refresh a timestamp so that the test generates a commit
m = n['nexml']['meta']
short_list = [i for i in m if i.get('@property') == 'bogus_timestamp']
if short_list:
el = short_list[0]
else:
el = {'@property': 'bogus_timestamp', '@xsi:type': 'nex:LiteralMeta'}
m.append(el)
el['$'] = datetime.datetime.utcnow().isoformat()
data = { 'nexson' : n,
'auth_token': os.environ.get('GITHUB_OAUTH_TOKEN', 'bogus_token'),
'author_name': "Some Dude",
'author_email': "dude@dude.com",
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=200):
sys.exit(0)
sys.exit(1)
|
<commit_before><commit_msg>Add a test for over-riding author info<commit_after>#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import datetime
import codecs
import json
import sys
import os
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/v1/study/1003'
inpf = codecs.open('../nexson-validator/tests/single/input/1003.json', 'rU', encoding='utf-8')
n = json.load(inpf)
# refresh a timestamp so that the test generates a commit
m = n['nexml']['meta']
short_list = [i for i in m if i.get('@property') == 'bogus_timestamp']
if short_list:
el = short_list[0]
else:
el = {'@property': 'bogus_timestamp', '@xsi:type': 'nex:LiteralMeta'}
m.append(el)
el['$'] = datetime.datetime.utcnow().isoformat()
data = { 'nexson' : n,
'auth_token': os.environ.get('GITHUB_OAUTH_TOKEN', 'bogus_token'),
'author_name': "Some Dude",
'author_email': "dude@dude.com",
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=200):
sys.exit(0)
sys.exit(1)
|
|
0faf570db96db22ddbbd8dc81053bcbd36a1aa74
|
tests/unit/config_test.py
|
tests/unit/config_test.py
|
# -*- coding: utf-8 -*-\
'''
unit.config_test
~~~~~~~~~~~~~~~~
Configuration related unit testing
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
'''
# Import python libs
import os
import shutil
import tempfile
# Import salt libs
import salt.utils
import salt.version
# Import salt testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../')
# Import salt cloud libs
from saltcloud import config as cloudconfig
class CloudConfigTestCase(TestCase):
def test_load_cloud_config_from_environ_var(self):
if salt.version.__version_info__ < (0, 16, 0):
self.skipTest(
'This test will always fail in salt >= 0.16.0 is not available'
)
original_environ = os.environ.copy()
tempdir = tempfile.mkdtemp()
try:
env_root_dir = os.path.join(tempdir, 'foo', 'env')
os.makedirs(env_root_dir)
env_fpath = os.path.join(env_root_dir, 'config-env')
salt.utils.fopen(env_fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(env_root_dir, env_fpath)
)
os.environ['SALT_CLOUD_CONFIG'] = env_fpath
# Should load from env variable, not the default configuration file
config = cloudconfig.cloud_config('/etc/salt/cloud')
self.assertEqual(config['log_file'], env_fpath)
os.environ.clear()
os.environ.update(original_environ)
root_dir = os.path.join(tempdir, 'foo', 'bar')
os.makedirs(root_dir)
fpath = os.path.join(root_dir, 'config')
salt.utils.fopen(fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(root_dir, fpath)
)
# Let's set the environment variable, yet, since the configuration
# file path is not the default one, ie, the user has passed an
# alternative configuration file form the CLI parser, the
# environment variable will be ignored.
os.environ['SALT_CLOUD_CONFIG'] = env_fpath
config = cloudconfig.cloud_config(fpath)
self.assertEqual(config['log_file'], fpath)
os.environ.clear()
os.environ.update(original_environ)
finally:
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
if __name__ == '__main__':
from salttesting.parser import run_testcase
run_testcase(CloudConfigTestCase)
|
Add a test case to test loading config from an environment variable.
|
Add a test case to test loading config from an environment variable.
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add a test case to test loading config from an environment variable.
|
# -*- coding: utf-8 -*-\
'''
unit.config_test
~~~~~~~~~~~~~~~~
Configuration related unit testing
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
'''
# Import python libs
import os
import shutil
import tempfile
# Import salt libs
import salt.utils
import salt.version
# Import salt testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../')
# Import salt cloud libs
from saltcloud import config as cloudconfig
class CloudConfigTestCase(TestCase):
def test_load_cloud_config_from_environ_var(self):
if salt.version.__version_info__ < (0, 16, 0):
self.skipTest(
'This test will always fail in salt >= 0.16.0 is not available'
)
original_environ = os.environ.copy()
tempdir = tempfile.mkdtemp()
try:
env_root_dir = os.path.join(tempdir, 'foo', 'env')
os.makedirs(env_root_dir)
env_fpath = os.path.join(env_root_dir, 'config-env')
salt.utils.fopen(env_fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(env_root_dir, env_fpath)
)
os.environ['SALT_CLOUD_CONFIG'] = env_fpath
# Should load from env variable, not the default configuration file
config = cloudconfig.cloud_config('/etc/salt/cloud')
self.assertEqual(config['log_file'], env_fpath)
os.environ.clear()
os.environ.update(original_environ)
root_dir = os.path.join(tempdir, 'foo', 'bar')
os.makedirs(root_dir)
fpath = os.path.join(root_dir, 'config')
salt.utils.fopen(fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(root_dir, fpath)
)
# Let's set the environment variable, yet, since the configuration
# file path is not the default one, ie, the user has passed an
# alternative configuration file form the CLI parser, the
# environment variable will be ignored.
os.environ['SALT_CLOUD_CONFIG'] = env_fpath
config = cloudconfig.cloud_config(fpath)
self.assertEqual(config['log_file'], fpath)
os.environ.clear()
os.environ.update(original_environ)
finally:
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
if __name__ == '__main__':
from salttesting.parser import run_testcase
run_testcase(CloudConfigTestCase)
|
<commit_before><commit_msg>Add a test case to test loading config from an environment variable.<commit_after>
|
# -*- coding: utf-8 -*-\
'''
unit.config_test
~~~~~~~~~~~~~~~~
Configuration related unit testing
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
'''
# Import python libs
import os
import shutil
import tempfile
# Import salt libs
import salt.utils
import salt.version
# Import salt testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../')
# Import salt cloud libs
from saltcloud import config as cloudconfig
class CloudConfigTestCase(TestCase):
def test_load_cloud_config_from_environ_var(self):
if salt.version.__version_info__ < (0, 16, 0):
self.skipTest(
'This test will always fail in salt >= 0.16.0 is not available'
)
original_environ = os.environ.copy()
tempdir = tempfile.mkdtemp()
try:
env_root_dir = os.path.join(tempdir, 'foo', 'env')
os.makedirs(env_root_dir)
env_fpath = os.path.join(env_root_dir, 'config-env')
salt.utils.fopen(env_fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(env_root_dir, env_fpath)
)
os.environ['SALT_CLOUD_CONFIG'] = env_fpath
# Should load from env variable, not the default configuration file
config = cloudconfig.cloud_config('/etc/salt/cloud')
self.assertEqual(config['log_file'], env_fpath)
os.environ.clear()
os.environ.update(original_environ)
root_dir = os.path.join(tempdir, 'foo', 'bar')
os.makedirs(root_dir)
fpath = os.path.join(root_dir, 'config')
salt.utils.fopen(fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(root_dir, fpath)
)
# Let's set the environment variable, yet, since the configuration
# file path is not the default one, ie, the user has passed an
# alternative configuration file form the CLI parser, the
# environment variable will be ignored.
os.environ['SALT_CLOUD_CONFIG'] = env_fpath
config = cloudconfig.cloud_config(fpath)
self.assertEqual(config['log_file'], fpath)
os.environ.clear()
os.environ.update(original_environ)
finally:
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
if __name__ == '__main__':
from salttesting.parser import run_testcase
run_testcase(CloudConfigTestCase)
|
Add a test case to test loading config from an environment variable.# -*- coding: utf-8 -*-\
'''
unit.config_test
~~~~~~~~~~~~~~~~
Configuration related unit testing
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
'''
# Import python libs
import os
import shutil
import tempfile
# Import salt libs
import salt.utils
import salt.version
# Import salt testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../')
# Import salt cloud libs
from saltcloud import config as cloudconfig
class CloudConfigTestCase(TestCase):
def test_load_cloud_config_from_environ_var(self):
if salt.version.__version_info__ < (0, 16, 0):
self.skipTest(
'This test will always fail in salt >= 0.16.0 is not available'
)
original_environ = os.environ.copy()
tempdir = tempfile.mkdtemp()
try:
env_root_dir = os.path.join(tempdir, 'foo', 'env')
os.makedirs(env_root_dir)
env_fpath = os.path.join(env_root_dir, 'config-env')
salt.utils.fopen(env_fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(env_root_dir, env_fpath)
)
os.environ['SALT_CLOUD_CONFIG'] = env_fpath
# Should load from env variable, not the default configuration file
config = cloudconfig.cloud_config('/etc/salt/cloud')
self.assertEqual(config['log_file'], env_fpath)
os.environ.clear()
os.environ.update(original_environ)
root_dir = os.path.join(tempdir, 'foo', 'bar')
os.makedirs(root_dir)
fpath = os.path.join(root_dir, 'config')
salt.utils.fopen(fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(root_dir, fpath)
)
# Let's set the environment variable, yet, since the configuration
# file path is not the default one, ie, the user has passed an
# alternative configuration file form the CLI parser, the
# environment variable will be ignored.
os.environ['SALT_CLOUD_CONFIG'] = env_fpath
config = cloudconfig.cloud_config(fpath)
self.assertEqual(config['log_file'], fpath)
os.environ.clear()
os.environ.update(original_environ)
finally:
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
if __name__ == '__main__':
from salttesting.parser import run_testcase
run_testcase(CloudConfigTestCase)
|
<commit_before><commit_msg>Add a test case to test loading config from an environment variable.<commit_after># -*- coding: utf-8 -*-\
'''
unit.config_test
~~~~~~~~~~~~~~~~
Configuration related unit testing
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
'''
# Import python libs
import os
import shutil
import tempfile
# Import salt libs
import salt.utils
import salt.version
# Import salt testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../')
# Import salt cloud libs
from saltcloud import config as cloudconfig
class CloudConfigTestCase(TestCase):
def test_load_cloud_config_from_environ_var(self):
if salt.version.__version_info__ < (0, 16, 0):
self.skipTest(
'This test will always fail in salt >= 0.16.0 is not available'
)
original_environ = os.environ.copy()
tempdir = tempfile.mkdtemp()
try:
env_root_dir = os.path.join(tempdir, 'foo', 'env')
os.makedirs(env_root_dir)
env_fpath = os.path.join(env_root_dir, 'config-env')
salt.utils.fopen(env_fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(env_root_dir, env_fpath)
)
os.environ['SALT_CLOUD_CONFIG'] = env_fpath
# Should load from env variable, not the default configuration file
config = cloudconfig.cloud_config('/etc/salt/cloud')
self.assertEqual(config['log_file'], env_fpath)
os.environ.clear()
os.environ.update(original_environ)
root_dir = os.path.join(tempdir, 'foo', 'bar')
os.makedirs(root_dir)
fpath = os.path.join(root_dir, 'config')
salt.utils.fopen(fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(root_dir, fpath)
)
# Let's set the environment variable, yet, since the configuration
# file path is not the default one, ie, the user has passed an
# alternative configuration file form the CLI parser, the
# environment variable will be ignored.
os.environ['SALT_CLOUD_CONFIG'] = env_fpath
config = cloudconfig.cloud_config(fpath)
self.assertEqual(config['log_file'], fpath)
os.environ.clear()
os.environ.update(original_environ)
finally:
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
if __name__ == '__main__':
from salttesting.parser import run_testcase
run_testcase(CloudConfigTestCase)
|
|
25fba421673c1e86d71b42af208fc47996f1c326
|
site_analytics.py
|
site_analytics.py
|
#!/usr/local/bin/python3.6
# read nginx access log
# parse and get the ip addresses and times
# match ip addresses to geoip
# possibly ignore bots
import re
def get_log_lines(path):
"""Return a list of regex matched log lines from the passed nginx access log path"""
lines = []
with open(path) as f:
r = re.compile("""(?P<remote>[^ ]*) (?P<host>[^ ]*) (?P<user>[^ ]*) \[(?P<time>[^\]]*)\] "(?P<method>\S+)(?: +(?P<path>[^\"]*) +\S*)?" (?P<code>[^ ]*) (?P<size>[^ ]*)(?: "(?P<referer>[^\"]*)" "(?P<agent>[^\"]*)")""")
for line in f:
m = r.match(line)
if m is not None:
md = m.groupdict()
lines.append(md)
return lines
def get_ip_address_city(ip_address):
pass
def is_bot(useragent):
pass
def summarize(data):
pass
if __name__ == "__main__":
access_file_path = "/var/log/nginx/imadm.ca.access.log"
access_log = get_log_lines(access_file_path)
|
Read and regex match the log lines
|
Read and regex match the log lines
|
Python
|
mit
|
mouhtasi/basic_site_analytics
|
Read and regex match the log lines
|
#!/usr/local/bin/python3.6
# read nginx access log
# parse and get the ip addresses and times
# match ip addresses to geoip
# possibly ignore bots
import re
def get_log_lines(path):
"""Return a list of regex matched log lines from the passed nginx access log path"""
lines = []
with open(path) as f:
r = re.compile("""(?P<remote>[^ ]*) (?P<host>[^ ]*) (?P<user>[^ ]*) \[(?P<time>[^\]]*)\] "(?P<method>\S+)(?: +(?P<path>[^\"]*) +\S*)?" (?P<code>[^ ]*) (?P<size>[^ ]*)(?: "(?P<referer>[^\"]*)" "(?P<agent>[^\"]*)")""")
for line in f:
m = r.match(line)
if m is not None:
md = m.groupdict()
lines.append(md)
return lines
def get_ip_address_city(ip_address):
pass
def is_bot(useragent):
pass
def summarize(data):
pass
if __name__ == "__main__":
access_file_path = "/var/log/nginx/imadm.ca.access.log"
access_log = get_log_lines(access_file_path)
|
<commit_before><commit_msg>Read and regex match the log lines<commit_after>
|
#!/usr/local/bin/python3.6
# read nginx access log
# parse and get the ip addresses and times
# match ip addresses to geoip
# possibly ignore bots
import re
def get_log_lines(path):
"""Return a list of regex matched log lines from the passed nginx access log path"""
lines = []
with open(path) as f:
r = re.compile("""(?P<remote>[^ ]*) (?P<host>[^ ]*) (?P<user>[^ ]*) \[(?P<time>[^\]]*)\] "(?P<method>\S+)(?: +(?P<path>[^\"]*) +\S*)?" (?P<code>[^ ]*) (?P<size>[^ ]*)(?: "(?P<referer>[^\"]*)" "(?P<agent>[^\"]*)")""")
for line in f:
m = r.match(line)
if m is not None:
md = m.groupdict()
lines.append(md)
return lines
def get_ip_address_city(ip_address):
pass
def is_bot(useragent):
pass
def summarize(data):
pass
if __name__ == "__main__":
access_file_path = "/var/log/nginx/imadm.ca.access.log"
access_log = get_log_lines(access_file_path)
|
Read and regex match the log lines#!/usr/local/bin/python3.6
# read nginx access log
# parse and get the ip addresses and times
# match ip addresses to geoip
# possibly ignore bots
import re
def get_log_lines(path):
"""Return a list of regex matched log lines from the passed nginx access log path"""
lines = []
with open(path) as f:
r = re.compile("""(?P<remote>[^ ]*) (?P<host>[^ ]*) (?P<user>[^ ]*) \[(?P<time>[^\]]*)\] "(?P<method>\S+)(?: +(?P<path>[^\"]*) +\S*)?" (?P<code>[^ ]*) (?P<size>[^ ]*)(?: "(?P<referer>[^\"]*)" "(?P<agent>[^\"]*)")""")
for line in f:
m = r.match(line)
if m is not None:
md = m.groupdict()
lines.append(md)
return lines
def get_ip_address_city(ip_address):
pass
def is_bot(useragent):
pass
def summarize(data):
pass
if __name__ == "__main__":
access_file_path = "/var/log/nginx/imadm.ca.access.log"
access_log = get_log_lines(access_file_path)
|
<commit_before><commit_msg>Read and regex match the log lines<commit_after>#!/usr/local/bin/python3.6
# read nginx access log
# parse and get the ip addresses and times
# match ip addresses to geoip
# possibly ignore bots
import re
def get_log_lines(path):
"""Return a list of regex matched log lines from the passed nginx access log path"""
lines = []
with open(path) as f:
r = re.compile("""(?P<remote>[^ ]*) (?P<host>[^ ]*) (?P<user>[^ ]*) \[(?P<time>[^\]]*)\] "(?P<method>\S+)(?: +(?P<path>[^\"]*) +\S*)?" (?P<code>[^ ]*) (?P<size>[^ ]*)(?: "(?P<referer>[^\"]*)" "(?P<agent>[^\"]*)")""")
for line in f:
m = r.match(line)
if m is not None:
md = m.groupdict()
lines.append(md)
return lines
def get_ip_address_city(ip_address):
pass
def is_bot(useragent):
pass
def summarize(data):
pass
if __name__ == "__main__":
access_file_path = "/var/log/nginx/imadm.ca.access.log"
access_log = get_log_lines(access_file_path)
|
|
18f84f266d172e0390eef61250909dc9c0401f4b
|
utils/swift_build_support/swift_build_support/compiler_stage.py
|
utils/swift_build_support/swift_build_support/compiler_stage.py
|
# ===--- compiler_stage.py -----------------------------------------------===#
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2021 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https:#swift.org/LICENSE.txt for license information
# See https:#swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===---------------------------------------------------------------------===#
class StageArgs(object):
def __init__(self, stage, args):
self.stage = stage
self.args = args
def __getattr__(self, key):
real_key = '{}{}'.format(key, self.stage.postfix)
if not hasattr(self.args, real_key):
return None
return getattr(self.args, real_key)
class Stage(object):
def __init__(self, identifier, postfix=""):
self.identifier = identifier
self.postfix = postfix
STAGE_1 = Stage(1, "")
STAGE_2 = Stage(2, "_stage2")
|
Introduce infrastructure for auto-prefixing stage2 options.
|
[build-script] Introduce infrastructure for auto-prefixing stage2 options.
This will let me introduce a stage generic swift implementation that adds a
postfix '_stage2' to certain stage specific options. This ensures we can have a
single swift build-script product implementation for both stage1 and stage2
compilers.
|
Python
|
apache-2.0
|
benlangmuir/swift,xwu/swift,xwu/swift,JGiola/swift,gregomni/swift,atrick/swift,apple/swift,roambotics/swift,apple/swift,atrick/swift,gregomni/swift,xwu/swift,xwu/swift,hooman/swift,hooman/swift,benlangmuir/swift,ahoppen/swift,glessard/swift,benlangmuir/swift,roambotics/swift,apple/swift,rudkx/swift,roambotics/swift,atrick/swift,hooman/swift,glessard/swift,atrick/swift,rudkx/swift,hooman/swift,hooman/swift,rudkx/swift,JGiola/swift,ahoppen/swift,benlangmuir/swift,rudkx/swift,ahoppen/swift,atrick/swift,glessard/swift,glessard/swift,xwu/swift,hooman/swift,JGiola/swift,rudkx/swift,xwu/swift,gregomni/swift,apple/swift,ahoppen/swift,glessard/swift,gregomni/swift,hooman/swift,roambotics/swift,roambotics/swift,benlangmuir/swift,glessard/swift,JGiola/swift,apple/swift,ahoppen/swift,xwu/swift,JGiola/swift,JGiola/swift,apple/swift,benlangmuir/swift,rudkx/swift,atrick/swift,ahoppen/swift,gregomni/swift,gregomni/swift,roambotics/swift
|
[build-script] Introduce infrastructure for auto-prefixing stage2 options.
This will let me introduce a stage generic swift implementation that adds a
postfix '_stage2' to certain stage specific options. This ensures we can have a
single swift build-script product implementation for both stage1 and stage2
compilers.
|
# ===--- compiler_stage.py -----------------------------------------------===#
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2021 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https:#swift.org/LICENSE.txt for license information
# See https:#swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===---------------------------------------------------------------------===#
class StageArgs(object):
def __init__(self, stage, args):
self.stage = stage
self.args = args
def __getattr__(self, key):
real_key = '{}{}'.format(key, self.stage.postfix)
if not hasattr(self.args, real_key):
return None
return getattr(self.args, real_key)
class Stage(object):
def __init__(self, identifier, postfix=""):
self.identifier = identifier
self.postfix = postfix
STAGE_1 = Stage(1, "")
STAGE_2 = Stage(2, "_stage2")
|
<commit_before><commit_msg>[build-script] Introduce infrastructure for auto-prefixing stage2 options.
This will let me introduce a stage generic swift implementation that adds a
postfix '_stage2' to certain stage specific options. This ensures we can have a
single swift build-script product implementation for both stage1 and stage2
compilers.<commit_after>
|
# ===--- compiler_stage.py -----------------------------------------------===#
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2021 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https:#swift.org/LICENSE.txt for license information
# See https:#swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===---------------------------------------------------------------------===#
class StageArgs(object):
def __init__(self, stage, args):
self.stage = stage
self.args = args
def __getattr__(self, key):
real_key = '{}{}'.format(key, self.stage.postfix)
if not hasattr(self.args, real_key):
return None
return getattr(self.args, real_key)
class Stage(object):
def __init__(self, identifier, postfix=""):
self.identifier = identifier
self.postfix = postfix
STAGE_1 = Stage(1, "")
STAGE_2 = Stage(2, "_stage2")
|
[build-script] Introduce infrastructure for auto-prefixing stage2 options.
This will let me introduce a stage generic swift implementation that adds a
postfix '_stage2' to certain stage specific options. This ensures we can have a
single swift build-script product implementation for both stage1 and stage2
compilers.# ===--- compiler_stage.py -----------------------------------------------===#
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2021 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https:#swift.org/LICENSE.txt for license information
# See https:#swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===---------------------------------------------------------------------===#
class StageArgs(object):
def __init__(self, stage, args):
self.stage = stage
self.args = args
def __getattr__(self, key):
real_key = '{}{}'.format(key, self.stage.postfix)
if not hasattr(self.args, real_key):
return None
return getattr(self.args, real_key)
class Stage(object):
def __init__(self, identifier, postfix=""):
self.identifier = identifier
self.postfix = postfix
STAGE_1 = Stage(1, "")
STAGE_2 = Stage(2, "_stage2")
|
<commit_before><commit_msg>[build-script] Introduce infrastructure for auto-prefixing stage2 options.
This will let me introduce a stage generic swift implementation that adds a
postfix '_stage2' to certain stage specific options. This ensures we can have a
single swift build-script product implementation for both stage1 and stage2
compilers.<commit_after># ===--- compiler_stage.py -----------------------------------------------===#
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2021 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https:#swift.org/LICENSE.txt for license information
# See https:#swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===---------------------------------------------------------------------===#
class StageArgs(object):
def __init__(self, stage, args):
self.stage = stage
self.args = args
def __getattr__(self, key):
real_key = '{}{}'.format(key, self.stage.postfix)
if not hasattr(self.args, real_key):
return None
return getattr(self.args, real_key)
class Stage(object):
def __init__(self, identifier, postfix=""):
self.identifier = identifier
self.postfix = postfix
STAGE_1 = Stage(1, "")
STAGE_2 = Stage(2, "_stage2")
|
|
539b680a6c9bc416d28ba02087db2e46b4037f06
|
salt/modules/openbsdservice.py
|
salt/modules/openbsdservice.py
|
'''
The service module for OpenBSD
'''
import os
# XXX enable/disable support would be nice
def __virtual__():
'''
Only work on OpenBSD
'''
if __grains__['os'] == 'OpenBSD' and os.path.exists('/etc/rc.d/rc.subr'):
return 'service'
return False
def start(name):
'''
Start the specified service
CLI Example::
salt '*' service.start <service name>
'''
cmd = '/etc/rc.d/{0} -f start'.format(name)
return not __salt__['cmd.retcode'](cmd)
def stop(name):
'''
Stop the specified service
CLI Example::
salt '*' service.stop <service name>
'''
cmd = '/etc/rc.d/{0} -f stop'.format(name)
return not __salt__['cmd.retcode'](cmd)
def restart(name):
'''
Restart the named service
CLI Example::
salt '*' service.restart <service name>
'''
cmd = '/etc/rc.d/{0} -f restart'.format(name)
return not __salt__['cmd.retcode'](cmd)
def status(name):
'''
Return the status for a service, returns a bool whether the service is
running.
CLI Example::
salt '*' service.status <service name>
'''
cmd = '/etc/rc.d/{0} -f check'.format(name)
return not __salt__['cmd.retcode'](cmd)
|
Add an openbsd service module.
|
Add an openbsd service module.
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add an openbsd service module.
|
'''
The service module for OpenBSD
'''
import os
# XXX enable/disable support would be nice
def __virtual__():
'''
Only work on OpenBSD
'''
if __grains__['os'] == 'OpenBSD' and os.path.exists('/etc/rc.d/rc.subr'):
return 'service'
return False
def start(name):
'''
Start the specified service
CLI Example::
salt '*' service.start <service name>
'''
cmd = '/etc/rc.d/{0} -f start'.format(name)
return not __salt__['cmd.retcode'](cmd)
def stop(name):
'''
Stop the specified service
CLI Example::
salt '*' service.stop <service name>
'''
cmd = '/etc/rc.d/{0} -f stop'.format(name)
return not __salt__['cmd.retcode'](cmd)
def restart(name):
'''
Restart the named service
CLI Example::
salt '*' service.restart <service name>
'''
cmd = '/etc/rc.d/{0} -f restart'.format(name)
return not __salt__['cmd.retcode'](cmd)
def status(name):
'''
Return the status for a service, returns a bool whether the service is
running.
CLI Example::
salt '*' service.status <service name>
'''
cmd = '/etc/rc.d/{0} -f check'.format(name)
return not __salt__['cmd.retcode'](cmd)
|
<commit_before><commit_msg>Add an openbsd service module.<commit_after>
|
'''
The service module for OpenBSD
'''
import os
# XXX enable/disable support would be nice
def __virtual__():
'''
Only work on OpenBSD
'''
if __grains__['os'] == 'OpenBSD' and os.path.exists('/etc/rc.d/rc.subr'):
return 'service'
return False
def start(name):
'''
Start the specified service
CLI Example::
salt '*' service.start <service name>
'''
cmd = '/etc/rc.d/{0} -f start'.format(name)
return not __salt__['cmd.retcode'](cmd)
def stop(name):
'''
Stop the specified service
CLI Example::
salt '*' service.stop <service name>
'''
cmd = '/etc/rc.d/{0} -f stop'.format(name)
return not __salt__['cmd.retcode'](cmd)
def restart(name):
'''
Restart the named service
CLI Example::
salt '*' service.restart <service name>
'''
cmd = '/etc/rc.d/{0} -f restart'.format(name)
return not __salt__['cmd.retcode'](cmd)
def status(name):
'''
Return the status for a service, returns a bool whether the service is
running.
CLI Example::
salt '*' service.status <service name>
'''
cmd = '/etc/rc.d/{0} -f check'.format(name)
return not __salt__['cmd.retcode'](cmd)
|
Add an openbsd service module.'''
The service module for OpenBSD
'''
import os
# XXX enable/disable support would be nice
def __virtual__():
'''
Only work on OpenBSD
'''
if __grains__['os'] == 'OpenBSD' and os.path.exists('/etc/rc.d/rc.subr'):
return 'service'
return False
def start(name):
'''
Start the specified service
CLI Example::
salt '*' service.start <service name>
'''
cmd = '/etc/rc.d/{0} -f start'.format(name)
return not __salt__['cmd.retcode'](cmd)
def stop(name):
'''
Stop the specified service
CLI Example::
salt '*' service.stop <service name>
'''
cmd = '/etc/rc.d/{0} -f stop'.format(name)
return not __salt__['cmd.retcode'](cmd)
def restart(name):
'''
Restart the named service
CLI Example::
salt '*' service.restart <service name>
'''
cmd = '/etc/rc.d/{0} -f restart'.format(name)
return not __salt__['cmd.retcode'](cmd)
def status(name):
'''
Return the status for a service, returns a bool whether the service is
running.
CLI Example::
salt '*' service.status <service name>
'''
cmd = '/etc/rc.d/{0} -f check'.format(name)
return not __salt__['cmd.retcode'](cmd)
|
<commit_before><commit_msg>Add an openbsd service module.<commit_after>'''
The service module for OpenBSD
'''
import os
# XXX enable/disable support would be nice
def __virtual__():
'''
Only work on OpenBSD
'''
if __grains__['os'] == 'OpenBSD' and os.path.exists('/etc/rc.d/rc.subr'):
return 'service'
return False
def start(name):
'''
Start the specified service
CLI Example::
salt '*' service.start <service name>
'''
cmd = '/etc/rc.d/{0} -f start'.format(name)
return not __salt__['cmd.retcode'](cmd)
def stop(name):
'''
Stop the specified service
CLI Example::
salt '*' service.stop <service name>
'''
cmd = '/etc/rc.d/{0} -f stop'.format(name)
return not __salt__['cmd.retcode'](cmd)
def restart(name):
'''
Restart the named service
CLI Example::
salt '*' service.restart <service name>
'''
cmd = '/etc/rc.d/{0} -f restart'.format(name)
return not __salt__['cmd.retcode'](cmd)
def status(name):
'''
Return the status for a service, returns a bool whether the service is
running.
CLI Example::
salt '*' service.status <service name>
'''
cmd = '/etc/rc.d/{0} -f check'.format(name)
return not __salt__['cmd.retcode'](cmd)
|
|
b9f2a5816c55334013a8447da705066f783dfda2
|
paperwork_parser/base.py
|
paperwork_parser/base.py
|
import inspect
from enum import IntEnum
from pdfquery import PDFQuery
class DocFieldType(IntEnum):
NUMBER = 1
TEXT = 2
CUSTOM = 3 # TODO: Forget this and have 'type' take a callable instead?
class DocField(object):
def __init__(self, bbox, type=DocFieldType.TEXT, required=False,
description=None):
self.bbox = bbox
self.type = type
self.required = required
self.description = description
class DocSchema(object):
@classmethod
def as_pdf_selectors(cls, field_name=None):
"""Return pdfminer selector for specified field. If no field is
specified, then selectors for all fields are returned.
"""
if field_name is not None:
field = getattr(cls, field_name, None)
if (field is None) or (not isinstance(field, DocField)):
raise ValueError(
'{field} is not a DocField attribute on {klass}'.format(
field=field_name, klass=cls.__name__
)
)
pdf_fields = [('assessment_year', field)]
else:
pdf_fields = inspect.getmembers(
cls, lambda f: isinstance(f, DocField)
)
selectors = [('with_formatter', 'text')]
selectors.extend(
(key, 'LTTextLineHorizontal:in_bbox("{bbox}")'.format(
bbox=', '.join(str(coord) for coord in field.bbox)
))
for key, field in pdf_fields
)
return selectors
class Document(object):
variants = []
def __init__(self, file):
# TODO: Check for str or actual file inst?
self._file = PDFQuery(file)
self._data = {}
self._check_configuration()
@property
def data(self):
"""Read only property that is loaded with document data once
`extract()` is called.
"""
return self._data
def detect_variant(self):
raise NotImplementedError('Subclass Document and override this method')
def extract(self):
self._file.load()
variant = self.detect_variant()
selectors = variant.as_pdf_selectors()
extracted = self._file.extract(selectors)
self._data = extracted
def _check_configuration(self):
if not self.variants:
raise ValueError(
"The class '{name}' hasn't been configured with any variants."
" Set {name}.variants to a list of DocSchema types.".format(
name=self.__class__.__name__
)
)
|
Add primitives for pdf parsing
|
Add primitives for pdf parsing
|
Python
|
mit
|
loanzen/zen_document_parser
|
Add primitives for pdf parsing
|
import inspect
from enum import IntEnum
from pdfquery import PDFQuery
class DocFieldType(IntEnum):
NUMBER = 1
TEXT = 2
CUSTOM = 3 # TODO: Forget this and have 'type' take a callable instead?
class DocField(object):
def __init__(self, bbox, type=DocFieldType.TEXT, required=False,
description=None):
self.bbox = bbox
self.type = type
self.required = required
self.description = description
class DocSchema(object):
@classmethod
def as_pdf_selectors(cls, field_name=None):
"""Return pdfminer selector for specified field. If no field is
specified, then selectors for all fields are returned.
"""
if field_name is not None:
field = getattr(cls, field_name, None)
if (field is None) or (not isinstance(field, DocField)):
raise ValueError(
'{field} is not a DocField attribute on {klass}'.format(
field=field_name, klass=cls.__name__
)
)
pdf_fields = [('assessment_year', field)]
else:
pdf_fields = inspect.getmembers(
cls, lambda f: isinstance(f, DocField)
)
selectors = [('with_formatter', 'text')]
selectors.extend(
(key, 'LTTextLineHorizontal:in_bbox("{bbox}")'.format(
bbox=', '.join(str(coord) for coord in field.bbox)
))
for key, field in pdf_fields
)
return selectors
class Document(object):
variants = []
def __init__(self, file):
# TODO: Check for str or actual file inst?
self._file = PDFQuery(file)
self._data = {}
self._check_configuration()
@property
def data(self):
"""Read only property that is loaded with document data once
`extract()` is called.
"""
return self._data
def detect_variant(self):
raise NotImplementedError('Subclass Document and override this method')
def extract(self):
self._file.load()
variant = self.detect_variant()
selectors = variant.as_pdf_selectors()
extracted = self._file.extract(selectors)
self._data = extracted
def _check_configuration(self):
if not self.variants:
raise ValueError(
"The class '{name}' hasn't been configured with any variants."
" Set {name}.variants to a list of DocSchema types.".format(
name=self.__class__.__name__
)
)
|
<commit_before><commit_msg>Add primitives for pdf parsing<commit_after>
|
import inspect
from enum import IntEnum
from pdfquery import PDFQuery
class DocFieldType(IntEnum):
NUMBER = 1
TEXT = 2
CUSTOM = 3 # TODO: Forget this and have 'type' take a callable instead?
class DocField(object):
def __init__(self, bbox, type=DocFieldType.TEXT, required=False,
description=None):
self.bbox = bbox
self.type = type
self.required = required
self.description = description
class DocSchema(object):
@classmethod
def as_pdf_selectors(cls, field_name=None):
"""Return pdfminer selector for specified field. If no field is
specified, then selectors for all fields are returned.
"""
if field_name is not None:
field = getattr(cls, field_name, None)
if (field is None) or (not isinstance(field, DocField)):
raise ValueError(
'{field} is not a DocField attribute on {klass}'.format(
field=field_name, klass=cls.__name__
)
)
pdf_fields = [('assessment_year', field)]
else:
pdf_fields = inspect.getmembers(
cls, lambda f: isinstance(f, DocField)
)
selectors = [('with_formatter', 'text')]
selectors.extend(
(key, 'LTTextLineHorizontal:in_bbox("{bbox}")'.format(
bbox=', '.join(str(coord) for coord in field.bbox)
))
for key, field in pdf_fields
)
return selectors
class Document(object):
variants = []
def __init__(self, file):
# TODO: Check for str or actual file inst?
self._file = PDFQuery(file)
self._data = {}
self._check_configuration()
@property
def data(self):
"""Read only property that is loaded with document data once
`extract()` is called.
"""
return self._data
def detect_variant(self):
raise NotImplementedError('Subclass Document and override this method')
def extract(self):
self._file.load()
variant = self.detect_variant()
selectors = variant.as_pdf_selectors()
extracted = self._file.extract(selectors)
self._data = extracted
def _check_configuration(self):
if not self.variants:
raise ValueError(
"The class '{name}' hasn't been configured with any variants."
" Set {name}.variants to a list of DocSchema types.".format(
name=self.__class__.__name__
)
)
|
Add primitives for pdf parsingimport inspect
from enum import IntEnum
from pdfquery import PDFQuery
class DocFieldType(IntEnum):
NUMBER = 1
TEXT = 2
CUSTOM = 3 # TODO: Forget this and have 'type' take a callable instead?
class DocField(object):
def __init__(self, bbox, type=DocFieldType.TEXT, required=False,
description=None):
self.bbox = bbox
self.type = type
self.required = required
self.description = description
class DocSchema(object):
@classmethod
def as_pdf_selectors(cls, field_name=None):
"""Return pdfminer selector for specified field. If no field is
specified, then selectors for all fields are returned.
"""
if field_name is not None:
field = getattr(cls, field_name, None)
if (field is None) or (not isinstance(field, DocField)):
raise ValueError(
'{field} is not a DocField attribute on {klass}'.format(
field=field_name, klass=cls.__name__
)
)
pdf_fields = [('assessment_year', field)]
else:
pdf_fields = inspect.getmembers(
cls, lambda f: isinstance(f, DocField)
)
selectors = [('with_formatter', 'text')]
selectors.extend(
(key, 'LTTextLineHorizontal:in_bbox("{bbox}")'.format(
bbox=', '.join(str(coord) for coord in field.bbox)
))
for key, field in pdf_fields
)
return selectors
class Document(object):
variants = []
def __init__(self, file):
# TODO: Check for str or actual file inst?
self._file = PDFQuery(file)
self._data = {}
self._check_configuration()
@property
def data(self):
"""Read only property that is loaded with document data once
`extract()` is called.
"""
return self._data
def detect_variant(self):
raise NotImplementedError('Subclass Document and override this method')
def extract(self):
self._file.load()
variant = self.detect_variant()
selectors = variant.as_pdf_selectors()
extracted = self._file.extract(selectors)
self._data = extracted
def _check_configuration(self):
if not self.variants:
raise ValueError(
"The class '{name}' hasn't been configured with any variants."
" Set {name}.variants to a list of DocSchema types.".format(
name=self.__class__.__name__
)
)
|
<commit_before><commit_msg>Add primitives for pdf parsing<commit_after>import inspect
from enum import IntEnum
from pdfquery import PDFQuery
class DocFieldType(IntEnum):
NUMBER = 1
TEXT = 2
CUSTOM = 3 # TODO: Forget this and have 'type' take a callable instead?
class DocField(object):
def __init__(self, bbox, type=DocFieldType.TEXT, required=False,
description=None):
self.bbox = bbox
self.type = type
self.required = required
self.description = description
class DocSchema(object):
@classmethod
def as_pdf_selectors(cls, field_name=None):
"""Return pdfminer selector for specified field. If no field is
specified, then selectors for all fields are returned.
"""
if field_name is not None:
field = getattr(cls, field_name, None)
if (field is None) or (not isinstance(field, DocField)):
raise ValueError(
'{field} is not a DocField attribute on {klass}'.format(
field=field_name, klass=cls.__name__
)
)
pdf_fields = [('assessment_year', field)]
else:
pdf_fields = inspect.getmembers(
cls, lambda f: isinstance(f, DocField)
)
selectors = [('with_formatter', 'text')]
selectors.extend(
(key, 'LTTextLineHorizontal:in_bbox("{bbox}")'.format(
bbox=', '.join(str(coord) for coord in field.bbox)
))
for key, field in pdf_fields
)
return selectors
class Document(object):
variants = []
def __init__(self, file):
# TODO: Check for str or actual file inst?
self._file = PDFQuery(file)
self._data = {}
self._check_configuration()
@property
def data(self):
"""Read only property that is loaded with document data once
`extract()` is called.
"""
return self._data
def detect_variant(self):
raise NotImplementedError('Subclass Document and override this method')
def extract(self):
self._file.load()
variant = self.detect_variant()
selectors = variant.as_pdf_selectors()
extracted = self._file.extract(selectors)
self._data = extracted
def _check_configuration(self):
if not self.variants:
raise ValueError(
"The class '{name}' hasn't been configured with any variants."
" Set {name}.variants to a list of DocSchema types.".format(
name=self.__class__.__name__
)
)
|
|
a4ffbfa5204a2eeaba7291881064d0c6ba5ebc3e
|
test/filter_test.py
|
test/filter_test.py
|
import unittest
import vapoursynth as vs
class FilterTestSequence(unittest.TestCase):
def setUp(self):
self.core = vs.get_core()
self.Transpose = self.core.std.Transpose
self.BlankClip = self.core.std.BlankClip
def test_transpose8_test(self):
clip = self.BlankClip(format=vs.YUV420P8, color=[0, 0, 0], width=1156, height=752)
self.Transpose(clip).get_frame(0)
def test_transpose16(self):
clip = self.BlankClip(format=vs.YUV420P16, color=[0, 0, 0], width=1156, height=752)
self.Transpose(clip).get_frame(0)
def test_transpose32(self):
clip = self.BlankClip(format=vs.YUV420P32, color=[0, 0, 0], width=1156, height=752)
self.Transpose(clip).get_frame(0)
def test_transposeS(self):
clip = self.BlankClip(format=vs.YUV444PS, color=[0, 0, 0], width=1156, height=752)
self.Transpose(clip).get_frame(0)
if __name__ == '__main__':
unittest.main()
|
Add general filter test file
|
Add general filter test file
|
Python
|
lgpl-2.1
|
vapoursynth/vapoursynth,Kamekameha/vapoursynth,Kamekameha/vapoursynth,Kamekameha/vapoursynth,vapoursynth/vapoursynth,vapoursynth/vapoursynth,vapoursynth/vapoursynth,Kamekameha/vapoursynth
|
Add general filter test file
|
import unittest
import vapoursynth as vs
class FilterTestSequence(unittest.TestCase):
def setUp(self):
self.core = vs.get_core()
self.Transpose = self.core.std.Transpose
self.BlankClip = self.core.std.BlankClip
def test_transpose8_test(self):
clip = self.BlankClip(format=vs.YUV420P8, color=[0, 0, 0], width=1156, height=752)
self.Transpose(clip).get_frame(0)
def test_transpose16(self):
clip = self.BlankClip(format=vs.YUV420P16, color=[0, 0, 0], width=1156, height=752)
self.Transpose(clip).get_frame(0)
def test_transpose32(self):
clip = self.BlankClip(format=vs.YUV420P32, color=[0, 0, 0], width=1156, height=752)
self.Transpose(clip).get_frame(0)
def test_transposeS(self):
clip = self.BlankClip(format=vs.YUV444PS, color=[0, 0, 0], width=1156, height=752)
self.Transpose(clip).get_frame(0)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add general filter test file<commit_after>
|
import unittest
import vapoursynth as vs
class FilterTestSequence(unittest.TestCase):
def setUp(self):
self.core = vs.get_core()
self.Transpose = self.core.std.Transpose
self.BlankClip = self.core.std.BlankClip
def test_transpose8_test(self):
clip = self.BlankClip(format=vs.YUV420P8, color=[0, 0, 0], width=1156, height=752)
self.Transpose(clip).get_frame(0)
def test_transpose16(self):
clip = self.BlankClip(format=vs.YUV420P16, color=[0, 0, 0], width=1156, height=752)
self.Transpose(clip).get_frame(0)
def test_transpose32(self):
clip = self.BlankClip(format=vs.YUV420P32, color=[0, 0, 0], width=1156, height=752)
self.Transpose(clip).get_frame(0)
def test_transposeS(self):
clip = self.BlankClip(format=vs.YUV444PS, color=[0, 0, 0], width=1156, height=752)
self.Transpose(clip).get_frame(0)
if __name__ == '__main__':
unittest.main()
|
Add general filter test fileimport unittest
import vapoursynth as vs
class FilterTestSequence(unittest.TestCase):
def setUp(self):
self.core = vs.get_core()
self.Transpose = self.core.std.Transpose
self.BlankClip = self.core.std.BlankClip
def test_transpose8_test(self):
clip = self.BlankClip(format=vs.YUV420P8, color=[0, 0, 0], width=1156, height=752)
self.Transpose(clip).get_frame(0)
def test_transpose16(self):
clip = self.BlankClip(format=vs.YUV420P16, color=[0, 0, 0], width=1156, height=752)
self.Transpose(clip).get_frame(0)
def test_transpose32(self):
clip = self.BlankClip(format=vs.YUV420P32, color=[0, 0, 0], width=1156, height=752)
self.Transpose(clip).get_frame(0)
def test_transposeS(self):
clip = self.BlankClip(format=vs.YUV444PS, color=[0, 0, 0], width=1156, height=752)
self.Transpose(clip).get_frame(0)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add general filter test file<commit_after>import unittest
import vapoursynth as vs
class FilterTestSequence(unittest.TestCase):
def setUp(self):
self.core = vs.get_core()
self.Transpose = self.core.std.Transpose
self.BlankClip = self.core.std.BlankClip
def test_transpose8_test(self):
clip = self.BlankClip(format=vs.YUV420P8, color=[0, 0, 0], width=1156, height=752)
self.Transpose(clip).get_frame(0)
def test_transpose16(self):
clip = self.BlankClip(format=vs.YUV420P16, color=[0, 0, 0], width=1156, height=752)
self.Transpose(clip).get_frame(0)
def test_transpose32(self):
clip = self.BlankClip(format=vs.YUV420P32, color=[0, 0, 0], width=1156, height=752)
self.Transpose(clip).get_frame(0)
def test_transposeS(self):
clip = self.BlankClip(format=vs.YUV444PS, color=[0, 0, 0], width=1156, height=752)
self.Transpose(clip).get_frame(0)
if __name__ == '__main__':
unittest.main()
|
|
bfbf598cb80ad5df0f6032197f173da794311a33
|
test/test_logger.py
|
test/test_logger.py
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
import pytest
from simplesqlite import (
set_logger,
set_log_level,
)
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
|
Add test cases for the logger
|
Add test cases for the logger
|
Python
|
mit
|
thombashi/SimpleSQLite,thombashi/SimpleSQLite
|
Add test cases for the logger
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
import pytest
from simplesqlite import (
set_logger,
set_log_level,
)
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
|
<commit_before><commit_msg>Add test cases for the logger<commit_after>
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
import pytest
from simplesqlite import (
set_logger,
set_log_level,
)
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
|
Add test cases for the logger# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
import pytest
from simplesqlite import (
set_logger,
set_log_level,
)
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
|
<commit_before><commit_msg>Add test cases for the logger<commit_after># encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
import pytest
from simplesqlite import (
set_logger,
set_log_level,
)
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
|
|
ee1e6587044aa5dc9b47776c648c7da55220306a
|
scratchpad/push-compass-values.py
|
scratchpad/push-compass-values.py
|
#!/usr/bin/env python3
from sense_hat import SenseHat
from pymongo import MongoClient
sense = SenseHat()
client = MongoClient("mongodb://192.168.0.128:27017")
db = client.g2x
for _ in range(0, 1000):
reading = sense.get_compass()
db.compass.insert_one({"angle": reading})
# db.compass.insert_one({"angle": 359.0})
|
Create script to time compass reads and db writes
|
Create script to time compass reads and db writes
|
Python
|
bsd-3-clause
|
gizmo-cda/g2x-submarine-v2,gizmo-cda/g2x-submarine-v2,gizmo-cda/g2x-submarine-v2,gizmo-cda/g2x-submarine-v2
|
Create script to time compass reads and db writes
|
#!/usr/bin/env python3
from sense_hat import SenseHat
from pymongo import MongoClient
sense = SenseHat()
client = MongoClient("mongodb://192.168.0.128:27017")
db = client.g2x
for _ in range(0, 1000):
reading = sense.get_compass()
db.compass.insert_one({"angle": reading})
# db.compass.insert_one({"angle": 359.0})
|
<commit_before><commit_msg>Create script to time compass reads and db writes<commit_after>
|
#!/usr/bin/env python3
from sense_hat import SenseHat
from pymongo import MongoClient
sense = SenseHat()
client = MongoClient("mongodb://192.168.0.128:27017")
db = client.g2x
for _ in range(0, 1000):
reading = sense.get_compass()
db.compass.insert_one({"angle": reading})
# db.compass.insert_one({"angle": 359.0})
|
Create script to time compass reads and db writes#!/usr/bin/env python3
from sense_hat import SenseHat
from pymongo import MongoClient
sense = SenseHat()
client = MongoClient("mongodb://192.168.0.128:27017")
db = client.g2x
for _ in range(0, 1000):
reading = sense.get_compass()
db.compass.insert_one({"angle": reading})
# db.compass.insert_one({"angle": 359.0})
|
<commit_before><commit_msg>Create script to time compass reads and db writes<commit_after>#!/usr/bin/env python3
from sense_hat import SenseHat
from pymongo import MongoClient
sense = SenseHat()
client = MongoClient("mongodb://192.168.0.128:27017")
db = client.g2x
for _ in range(0, 1000):
reading = sense.get_compass()
db.compass.insert_one({"angle": reading})
# db.compass.insert_one({"angle": 359.0})
|
|
efafb94f6512a00baa13ecc2ab18e80441f41922
|
tests/decorators.py
|
tests/decorators.py
|
from selenium.common.exceptions import TimeoutException, StaleElementReferenceException
def retry_on_stale_element_exception(func):
def wrapped(*args, **kwargs):
try:
result = func(*args, **kwargs)
except StaleElementReferenceException:
result = func(*args, **kwargs)
return result
return wrapped
|
Add decorator that will catch and retry on StaleElementReferenceException
|
Add decorator that will catch and retry on StaleElementReferenceException
|
Python
|
mit
|
alphagov/notifications-functional-tests,alphagov/notifications-functional-tests
|
Add decorator that will catch and retry on StaleElementReferenceException
|
from selenium.common.exceptions import TimeoutException, StaleElementReferenceException
def retry_on_stale_element_exception(func):
def wrapped(*args, **kwargs):
try:
result = func(*args, **kwargs)
except StaleElementReferenceException:
result = func(*args, **kwargs)
return result
return wrapped
|
<commit_before><commit_msg>Add decorator that will catch and retry on StaleElementReferenceException<commit_after>
|
from selenium.common.exceptions import TimeoutException, StaleElementReferenceException
def retry_on_stale_element_exception(func):
def wrapped(*args, **kwargs):
try:
result = func(*args, **kwargs)
except StaleElementReferenceException:
result = func(*args, **kwargs)
return result
return wrapped
|
Add decorator that will catch and retry on StaleElementReferenceExceptionfrom selenium.common.exceptions import TimeoutException, StaleElementReferenceException
def retry_on_stale_element_exception(func):
def wrapped(*args, **kwargs):
try:
result = func(*args, **kwargs)
except StaleElementReferenceException:
result = func(*args, **kwargs)
return result
return wrapped
|
<commit_before><commit_msg>Add decorator that will catch and retry on StaleElementReferenceException<commit_after>from selenium.common.exceptions import TimeoutException, StaleElementReferenceException
def retry_on_stale_element_exception(func):
def wrapped(*args, **kwargs):
try:
result = func(*args, **kwargs)
except StaleElementReferenceException:
result = func(*args, **kwargs)
return result
return wrapped
|
|
88a74b351c29c4c6e337c76b105f9bc25591f755
|
tests/test_resources.py
|
tests/test_resources.py
|
from conference_scheduler.resources import Event
def test_can_construct_event():
e = Event(
name='example',
duration=60,
demand=100,
tags=['beginner', 'python'],
unavailability=[]
)
assert isinstance(e, Event)
assert e.name == 'example'
assert e.tags == ['beginner', 'python']
assert e.unavailability == []
def test_optional_args_to_event_are_defaulted():
e = Event(name='example', duration=60, demand=100)
assert e.tags == []
assert e.unavailability == []
def test_optional_args_are_safely_mutable():
# Construct an instance of `Event` with the optional arguments,
# omitted, then assign it a tag
e = Event(name='example', duration=60, demand=100)
assert e.tags == []
e.tags.append('intermediate')
assert e.tags == ['intermediate']
# Now create a second instance of `Event`, and check we haven't
# polluted the default arguments.
f = Event(name='another example', duration=30, demand=50)
assert f.tags == []
|
Add tests for optional arguments on Event
|
Add tests for optional arguments on Event
|
Python
|
mit
|
PyconUK/ConferenceScheduler
|
Add tests for optional arguments on Event
|
from conference_scheduler.resources import Event
def test_can_construct_event():
e = Event(
name='example',
duration=60,
demand=100,
tags=['beginner', 'python'],
unavailability=[]
)
assert isinstance(e, Event)
assert e.name == 'example'
assert e.tags == ['beginner', 'python']
assert e.unavailability == []
def test_optional_args_to_event_are_defaulted():
e = Event(name='example', duration=60, demand=100)
assert e.tags == []
assert e.unavailability == []
def test_optional_args_are_safely_mutable():
# Construct an instance of `Event` with the optional arguments,
# omitted, then assign it a tag
e = Event(name='example', duration=60, demand=100)
assert e.tags == []
e.tags.append('intermediate')
assert e.tags == ['intermediate']
# Now create a second instance of `Event`, and check we haven't
# polluted the default arguments.
f = Event(name='another example', duration=30, demand=50)
assert f.tags == []
|
<commit_before><commit_msg>Add tests for optional arguments on Event<commit_after>
|
from conference_scheduler.resources import Event
def test_can_construct_event():
e = Event(
name='example',
duration=60,
demand=100,
tags=['beginner', 'python'],
unavailability=[]
)
assert isinstance(e, Event)
assert e.name == 'example'
assert e.tags == ['beginner', 'python']
assert e.unavailability == []
def test_optional_args_to_event_are_defaulted():
e = Event(name='example', duration=60, demand=100)
assert e.tags == []
assert e.unavailability == []
def test_optional_args_are_safely_mutable():
# Construct an instance of `Event` with the optional arguments,
# omitted, then assign it a tag
e = Event(name='example', duration=60, demand=100)
assert e.tags == []
e.tags.append('intermediate')
assert e.tags == ['intermediate']
# Now create a second instance of `Event`, and check we haven't
# polluted the default arguments.
f = Event(name='another example', duration=30, demand=50)
assert f.tags == []
|
Add tests for optional arguments on Eventfrom conference_scheduler.resources import Event
def test_can_construct_event():
e = Event(
name='example',
duration=60,
demand=100,
tags=['beginner', 'python'],
unavailability=[]
)
assert isinstance(e, Event)
assert e.name == 'example'
assert e.tags == ['beginner', 'python']
assert e.unavailability == []
def test_optional_args_to_event_are_defaulted():
e = Event(name='example', duration=60, demand=100)
assert e.tags == []
assert e.unavailability == []
def test_optional_args_are_safely_mutable():
# Construct an instance of `Event` with the optional arguments,
# omitted, then assign it a tag
e = Event(name='example', duration=60, demand=100)
assert e.tags == []
e.tags.append('intermediate')
assert e.tags == ['intermediate']
# Now create a second instance of `Event`, and check we haven't
# polluted the default arguments.
f = Event(name='another example', duration=30, demand=50)
assert f.tags == []
|
<commit_before><commit_msg>Add tests for optional arguments on Event<commit_after>from conference_scheduler.resources import Event
def test_can_construct_event():
e = Event(
name='example',
duration=60,
demand=100,
tags=['beginner', 'python'],
unavailability=[]
)
assert isinstance(e, Event)
assert e.name == 'example'
assert e.tags == ['beginner', 'python']
assert e.unavailability == []
def test_optional_args_to_event_are_defaulted():
e = Event(name='example', duration=60, demand=100)
assert e.tags == []
assert e.unavailability == []
def test_optional_args_are_safely_mutable():
# Construct an instance of `Event` with the optional arguments,
# omitted, then assign it a tag
e = Event(name='example', duration=60, demand=100)
assert e.tags == []
e.tags.append('intermediate')
assert e.tags == ['intermediate']
# Now create a second instance of `Event`, and check we haven't
# polluted the default arguments.
f = Event(name='another example', duration=30, demand=50)
assert f.tags == []
|
|
763775b1295b920a2440dec0275349af4b9e7cb4
|
candidates/management/commands/candidates_record_new_versions.py
|
candidates/management/commands/candidates_record_new_versions.py
|
from datetime import datetime
from random import randint
import sys
from django.core.management.base import BaseCommand
from django.db import transaction
from candidates.models import PersonExtra
class Command(BaseCommand):
help = "Record the current version for all people"
def add_arguments(self, parser):
parser.add_argument(
'--person-id',
help='Only record the current version for the person with this ID'
)
parser.add_argument(
'--source', help='The source of information for this other name'
)
def handle(self, *args, **options):
kwargs = {}
if options['person_id']:
kwargs['base__id'] = options['person_id']
if options['source']:
source = options['source']
else:
source = 'New version recorded from the command-line'
with transaction.atomic():
for person_extra in PersonExtra.objects.filter(**kwargs):
print u"Recording the current version of {name} ({id})".format(
name=person_extra.base.name, id=person_extra.base.id
).encode('utf-8')
person_extra.record_version(
{
'information_source': source,
'version_id': "{0:016x}".format(randint(0, sys.maxint)),
'timestamp': datetime.utcnow().isoformat(),
}
)
person_extra.save()
|
Add a command to record the current version of every person
|
Add a command to record the current version of every person
|
Python
|
agpl-3.0
|
datamade/yournextmp-popit,mysociety/yournextmp-popit,neavouli/yournextrepresentative,neavouli/yournextrepresentative,datamade/yournextmp-popit,DemocracyClub/yournextrepresentative,mysociety/yournextmp-popit,datamade/yournextmp-popit,datamade/yournextmp-popit,neavouli/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextrepresentative,neavouli/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextmp-popit,neavouli/yournextrepresentative,DemocracyClub/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextrepresentative,mysociety/yournextrepresentative,datamade/yournextmp-popit,mysociety/yournextmp-popit,DemocracyClub/yournextrepresentative
|
Add a command to record the current version of every person
|
from datetime import datetime
from random import randint
import sys
from django.core.management.base import BaseCommand
from django.db import transaction
from candidates.models import PersonExtra
class Command(BaseCommand):
help = "Record the current version for all people"
def add_arguments(self, parser):
parser.add_argument(
'--person-id',
help='Only record the current version for the person with this ID'
)
parser.add_argument(
'--source', help='The source of information for this other name'
)
def handle(self, *args, **options):
kwargs = {}
if options['person_id']:
kwargs['base__id'] = options['person_id']
if options['source']:
source = options['source']
else:
source = 'New version recorded from the command-line'
with transaction.atomic():
for person_extra in PersonExtra.objects.filter(**kwargs):
print u"Recording the current version of {name} ({id})".format(
name=person_extra.base.name, id=person_extra.base.id
).encode('utf-8')
person_extra.record_version(
{
'information_source': source,
'version_id': "{0:016x}".format(randint(0, sys.maxint)),
'timestamp': datetime.utcnow().isoformat(),
}
)
person_extra.save()
|
<commit_before><commit_msg>Add a command to record the current version of every person<commit_after>
|
from datetime import datetime
from random import randint
import sys
from django.core.management.base import BaseCommand
from django.db import transaction
from candidates.models import PersonExtra
class Command(BaseCommand):
help = "Record the current version for all people"
def add_arguments(self, parser):
parser.add_argument(
'--person-id',
help='Only record the current version for the person with this ID'
)
parser.add_argument(
'--source', help='The source of information for this other name'
)
def handle(self, *args, **options):
kwargs = {}
if options['person_id']:
kwargs['base__id'] = options['person_id']
if options['source']:
source = options['source']
else:
source = 'New version recorded from the command-line'
with transaction.atomic():
for person_extra in PersonExtra.objects.filter(**kwargs):
print u"Recording the current version of {name} ({id})".format(
name=person_extra.base.name, id=person_extra.base.id
).encode('utf-8')
person_extra.record_version(
{
'information_source': source,
'version_id': "{0:016x}".format(randint(0, sys.maxint)),
'timestamp': datetime.utcnow().isoformat(),
}
)
person_extra.save()
|
Add a command to record the current version of every personfrom datetime import datetime
from random import randint
import sys
from django.core.management.base import BaseCommand
from django.db import transaction
from candidates.models import PersonExtra
class Command(BaseCommand):
help = "Record the current version for all people"
def add_arguments(self, parser):
parser.add_argument(
'--person-id',
help='Only record the current version for the person with this ID'
)
parser.add_argument(
'--source', help='The source of information for this other name'
)
def handle(self, *args, **options):
kwargs = {}
if options['person_id']:
kwargs['base__id'] = options['person_id']
if options['source']:
source = options['source']
else:
source = 'New version recorded from the command-line'
with transaction.atomic():
for person_extra in PersonExtra.objects.filter(**kwargs):
print u"Recording the current version of {name} ({id})".format(
name=person_extra.base.name, id=person_extra.base.id
).encode('utf-8')
person_extra.record_version(
{
'information_source': source,
'version_id': "{0:016x}".format(randint(0, sys.maxint)),
'timestamp': datetime.utcnow().isoformat(),
}
)
person_extra.save()
|
<commit_before><commit_msg>Add a command to record the current version of every person<commit_after>from datetime import datetime
from random import randint
import sys
from django.core.management.base import BaseCommand
from django.db import transaction
from candidates.models import PersonExtra
class Command(BaseCommand):
help = "Record the current version for all people"
def add_arguments(self, parser):
parser.add_argument(
'--person-id',
help='Only record the current version for the person with this ID'
)
parser.add_argument(
'--source', help='The source of information for this other name'
)
def handle(self, *args, **options):
kwargs = {}
if options['person_id']:
kwargs['base__id'] = options['person_id']
if options['source']:
source = options['source']
else:
source = 'New version recorded from the command-line'
with transaction.atomic():
for person_extra in PersonExtra.objects.filter(**kwargs):
print u"Recording the current version of {name} ({id})".format(
name=person_extra.base.name, id=person_extra.base.id
).encode('utf-8')
person_extra.record_version(
{
'information_source': source,
'version_id': "{0:016x}".format(randint(0, sys.maxint)),
'timestamp': datetime.utcnow().isoformat(),
}
)
person_extra.save()
|
|
f0d3cf2bfcaa569f47eb888d5553659c5c57f07d
|
ajaximage/urls.py
|
ajaximage/urls.py
|
from django.conf.urls.defaults import url, patterns
from ajaximage.views import ajaximage
from ajaximage.forms import FileForm
urlpatterns = patterns('',
url('^upload/(?P<upload_to>.*)/(?P<max_width>\d+)/(?P<max_height>\d+)/(?P<crop>\d+)', ajaximage, {
'form_class': FileForm,
'response': lambda name, url: url,
}, name='ajaximage'),
)
|
try:# pre 1.6
from django.conf.urls.defaults import url, patterns
except ImportError:
from django.conf.urls import url, patterns
from ajaximage.views import ajaximage
from ajaximage.forms import FileForm
urlpatterns = patterns('',
url('^upload/(?P<upload_to>.*)/(?P<max_width>\d+)/(?P<max_height>\d+)/(?P<crop>\d+)', ajaximage, {
'form_class': FileForm,
'response': lambda name, url: url,
}, name='ajaximage'),
)
|
Fix import for 1.6 version.
|
Fix import for 1.6 version.
|
Python
|
mit
|
bradleyg/django-ajaximage,bradleyg/django-ajaximage,subhaoi/kioskuser,subhaoi/kioskuser,subhaoi/kioskuser,bradleyg/django-ajaximage
|
from django.conf.urls.defaults import url, patterns
from ajaximage.views import ajaximage
from ajaximage.forms import FileForm
urlpatterns = patterns('',
url('^upload/(?P<upload_to>.*)/(?P<max_width>\d+)/(?P<max_height>\d+)/(?P<crop>\d+)', ajaximage, {
'form_class': FileForm,
'response': lambda name, url: url,
}, name='ajaximage'),
)
Fix import for 1.6 version.
|
try:# pre 1.6
from django.conf.urls.defaults import url, patterns
except ImportError:
from django.conf.urls import url, patterns
from ajaximage.views import ajaximage
from ajaximage.forms import FileForm
urlpatterns = patterns('',
url('^upload/(?P<upload_to>.*)/(?P<max_width>\d+)/(?P<max_height>\d+)/(?P<crop>\d+)', ajaximage, {
'form_class': FileForm,
'response': lambda name, url: url,
}, name='ajaximage'),
)
|
<commit_before>from django.conf.urls.defaults import url, patterns
from ajaximage.views import ajaximage
from ajaximage.forms import FileForm
urlpatterns = patterns('',
url('^upload/(?P<upload_to>.*)/(?P<max_width>\d+)/(?P<max_height>\d+)/(?P<crop>\d+)', ajaximage, {
'form_class': FileForm,
'response': lambda name, url: url,
}, name='ajaximage'),
)
<commit_msg>Fix import for 1.6 version.<commit_after>
|
try:# pre 1.6
from django.conf.urls.defaults import url, patterns
except ImportError:
from django.conf.urls import url, patterns
from ajaximage.views import ajaximage
from ajaximage.forms import FileForm
urlpatterns = patterns('',
url('^upload/(?P<upload_to>.*)/(?P<max_width>\d+)/(?P<max_height>\d+)/(?P<crop>\d+)', ajaximage, {
'form_class': FileForm,
'response': lambda name, url: url,
}, name='ajaximage'),
)
|
from django.conf.urls.defaults import url, patterns
from ajaximage.views import ajaximage
from ajaximage.forms import FileForm
urlpatterns = patterns('',
url('^upload/(?P<upload_to>.*)/(?P<max_width>\d+)/(?P<max_height>\d+)/(?P<crop>\d+)', ajaximage, {
'form_class': FileForm,
'response': lambda name, url: url,
}, name='ajaximage'),
)
Fix import for 1.6 version.try:# pre 1.6
from django.conf.urls.defaults import url, patterns
except ImportError:
from django.conf.urls import url, patterns
from ajaximage.views import ajaximage
from ajaximage.forms import FileForm
urlpatterns = patterns('',
url('^upload/(?P<upload_to>.*)/(?P<max_width>\d+)/(?P<max_height>\d+)/(?P<crop>\d+)', ajaximage, {
'form_class': FileForm,
'response': lambda name, url: url,
}, name='ajaximage'),
)
|
<commit_before>from django.conf.urls.defaults import url, patterns
from ajaximage.views import ajaximage
from ajaximage.forms import FileForm
urlpatterns = patterns('',
url('^upload/(?P<upload_to>.*)/(?P<max_width>\d+)/(?P<max_height>\d+)/(?P<crop>\d+)', ajaximage, {
'form_class': FileForm,
'response': lambda name, url: url,
}, name='ajaximage'),
)
<commit_msg>Fix import for 1.6 version.<commit_after>try:# pre 1.6
from django.conf.urls.defaults import url, patterns
except ImportError:
from django.conf.urls import url, patterns
from ajaximage.views import ajaximage
from ajaximage.forms import FileForm
urlpatterns = patterns('',
url('^upload/(?P<upload_to>.*)/(?P<max_width>\d+)/(?P<max_height>\d+)/(?P<crop>\d+)', ajaximage, {
'form_class': FileForm,
'response': lambda name, url: url,
}, name='ajaximage'),
)
|
c3a6347357e2ee604db0271cde829e6795794cf9
|
cheat/printer.py
|
cheat/printer.py
|
#!/usr/bin/env/python3
class Printer:
"""
Base class for the cheatsheet printers. Takes care of the actuall printing.
Args:
Takes a configparser objects to print.
"""
def __init__(self, configparser):
self.configparser = configparser
def printsheet(self, template):
for description in self.configparser['cheats']:
value = self.configparser['cheats'][description]
output = template.format(description, value)
print(output)
class InlinePrinter(Printer):
"""
Prints the cheatssheet line-by-line, so that it's grep-able.
"""
@property
def width(self):
width = len(max(self.configparser['cheats'], key=len))
return str(width)
def printsheet(self):
print_format = "{0:<" + self.width + "} {1}"
super().printsheet(print_format)
class BreaklinePrinter(Printer):
"""
Prints the cheatsheet and breaks the line after the description.
"""
# TODO Maybe use ljust rjust
def printsheet(self):
print_format = "{0} \n {1}"
super().printsheet(print_format)
class PrinterFactory:
"""
Creates a Printer object from the String given by the argparse option.
"""
printer_classes = {
"InlinePrinter": InlinePrinter,
"BreaklinePrinter": BreaklinePrinter
}
@staticmethod
def create_printer(name):
return PrinterFactory.printer_classes[name]
|
Refactor - Moved Printer classes to seperate file
|
Refactor - Moved Printer classes to seperate file
|
Python
|
mit
|
martialblog/cheatsheet
|
Refactor - Moved Printer classes to seperate file
|
#!/usr/bin/env/python3
class Printer:
"""
Base class for the cheatsheet printers. Takes care of the actuall printing.
Args:
Takes a configparser objects to print.
"""
def __init__(self, configparser):
self.configparser = configparser
def printsheet(self, template):
for description in self.configparser['cheats']:
value = self.configparser['cheats'][description]
output = template.format(description, value)
print(output)
class InlinePrinter(Printer):
"""
Prints the cheatssheet line-by-line, so that it's grep-able.
"""
@property
def width(self):
width = len(max(self.configparser['cheats'], key=len))
return str(width)
def printsheet(self):
print_format = "{0:<" + self.width + "} {1}"
super().printsheet(print_format)
class BreaklinePrinter(Printer):
"""
Prints the cheatsheet and breaks the line after the description.
"""
# TODO Maybe use ljust rjust
def printsheet(self):
print_format = "{0} \n {1}"
super().printsheet(print_format)
class PrinterFactory:
"""
Creates a Printer object from the String given by the argparse option.
"""
printer_classes = {
"InlinePrinter": InlinePrinter,
"BreaklinePrinter": BreaklinePrinter
}
@staticmethod
def create_printer(name):
return PrinterFactory.printer_classes[name]
|
<commit_before><commit_msg>Refactor - Moved Printer classes to seperate file<commit_after>
|
#!/usr/bin/env/python3
class Printer:
"""
Base class for the cheatsheet printers. Takes care of the actuall printing.
Args:
Takes a configparser objects to print.
"""
def __init__(self, configparser):
self.configparser = configparser
def printsheet(self, template):
for description in self.configparser['cheats']:
value = self.configparser['cheats'][description]
output = template.format(description, value)
print(output)
class InlinePrinter(Printer):
"""
Prints the cheatssheet line-by-line, so that it's grep-able.
"""
@property
def width(self):
width = len(max(self.configparser['cheats'], key=len))
return str(width)
def printsheet(self):
print_format = "{0:<" + self.width + "} {1}"
super().printsheet(print_format)
class BreaklinePrinter(Printer):
"""
Prints the cheatsheet and breaks the line after the description.
"""
# TODO Maybe use ljust rjust
def printsheet(self):
print_format = "{0} \n {1}"
super().printsheet(print_format)
class PrinterFactory:
"""
Creates a Printer object from the String given by the argparse option.
"""
printer_classes = {
"InlinePrinter": InlinePrinter,
"BreaklinePrinter": BreaklinePrinter
}
@staticmethod
def create_printer(name):
return PrinterFactory.printer_classes[name]
|
Refactor - Moved Printer classes to seperate file#!/usr/bin/env/python3
class Printer:
"""
Base class for the cheatsheet printers. Takes care of the actuall printing.
Args:
Takes a configparser objects to print.
"""
def __init__(self, configparser):
self.configparser = configparser
def printsheet(self, template):
for description in self.configparser['cheats']:
value = self.configparser['cheats'][description]
output = template.format(description, value)
print(output)
class InlinePrinter(Printer):
"""
Prints the cheatssheet line-by-line, so that it's grep-able.
"""
@property
def width(self):
width = len(max(self.configparser['cheats'], key=len))
return str(width)
def printsheet(self):
print_format = "{0:<" + self.width + "} {1}"
super().printsheet(print_format)
class BreaklinePrinter(Printer):
"""
Prints the cheatsheet and breaks the line after the description.
"""
# TODO Maybe use ljust rjust
def printsheet(self):
print_format = "{0} \n {1}"
super().printsheet(print_format)
class PrinterFactory:
"""
Creates a Printer object from the String given by the argparse option.
"""
printer_classes = {
"InlinePrinter": InlinePrinter,
"BreaklinePrinter": BreaklinePrinter
}
@staticmethod
def create_printer(name):
return PrinterFactory.printer_classes[name]
|
<commit_before><commit_msg>Refactor - Moved Printer classes to seperate file<commit_after>#!/usr/bin/env/python3
class Printer:
"""
Base class for the cheatsheet printers. Takes care of the actuall printing.
Args:
Takes a configparser objects to print.
"""
def __init__(self, configparser):
self.configparser = configparser
def printsheet(self, template):
for description in self.configparser['cheats']:
value = self.configparser['cheats'][description]
output = template.format(description, value)
print(output)
class InlinePrinter(Printer):
"""
Prints the cheatssheet line-by-line, so that it's grep-able.
"""
@property
def width(self):
width = len(max(self.configparser['cheats'], key=len))
return str(width)
def printsheet(self):
print_format = "{0:<" + self.width + "} {1}"
super().printsheet(print_format)
class BreaklinePrinter(Printer):
"""
Prints the cheatsheet and breaks the line after the description.
"""
# TODO Maybe use ljust rjust
def printsheet(self):
print_format = "{0} \n {1}"
super().printsheet(print_format)
class PrinterFactory:
"""
Creates a Printer object from the String given by the argparse option.
"""
printer_classes = {
"InlinePrinter": InlinePrinter,
"BreaklinePrinter": BreaklinePrinter
}
@staticmethod
def create_printer(name):
return PrinterFactory.printer_classes[name]
|
|
ac7b572b8bf7d690ac70b479814deed2c0a772e4
|
webapp/tests/test_finders.py
|
webapp/tests/test_finders.py
|
import random
import time
from django.test import TestCase
from graphite.intervals import Interval, IntervalSet
from graphite.node import LeafNode, BranchNode
from graphite.storage import Store, get_finder
class FinderTest(TestCase):
def test_custom_finder(self):
store = Store(finders=[get_finder('tests.test_finders.DummyFinder')])
nodes = list(store.find("foo"))
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].path, 'foo')
nodes = list(store.find('bar.*'))
self.assertEqual(len(nodes), 10)
node = nodes[0]
self.assertEqual(node.path.split('.')[0], 'bar')
time_info, series = node.fetch(100, 200)
self.assertEqual(time_info, (100, 200, 10))
self.assertEqual(len(series), 10)
class DummyReader(object):
__slots__ = ('path',)
def __init__(self, path):
self.path = path
def fetch(self, start_time, end_time):
npoints = (end_time - start_time) / 10
return (start_time, end_time, 10), [
random.choice([None, 1, 2, 3]) for i in range(npoints)
]
def get_intervals(self):
return IntervalSet([Interval(time.time() - 3600, time.time())])
class DummyFinder(object):
def find_nodes(self, query):
if query.pattern == 'foo':
yield BranchNode('foo')
elif query.pattern == 'bar.*':
for i in xrange(10):
path = 'bar.{0}'.format(i)
yield LeafNode(path, DummyReader(path))
|
Add simple test for a custom finder
|
Add simple test for a custom finder
|
Python
|
apache-2.0
|
Squarespace/graphite-web,disqus/graphite-web,dbn/graphite-web,brutasse/graphite-web,AICIDNN/graphite-web,brutasse/graphite-web,esnet/graphite-web,disqus/graphite-web,pu239ppy/graphite-web,nkhuyu/graphite-web,graphite-server/graphite-web,phreakocious/graphite-web,goir/graphite-web,JeanFred/graphite-web,zBMNForks/graphite-web,Invoca/graphite-web,lfckop/graphite-web,cybem/graphite-web-iow,bbc/graphite-web,cosm0s/graphite-web,nkhuyu/graphite-web,cbowman0/graphite-web,markolson/graphite-web,cgvarela/graphite-web,g76r/graphite-web,Squarespace/graphite-web,dhtech/graphite-web,Invoca/graphite-web,lyft/graphite-web,graphite-server/graphite-web,synedge/graphite-web,ZelunZhang/graphite-web,EinsamHauer/graphite-web-iow,obfuscurity/graphite-web,brutasse/graphite-web,section-io/graphite-web,gwaldo/graphite-web,dbn/graphite-web,kkdk5535/graphite-web,bruce-lyft/graphite-web,jssjr/graphite-web,drax68/graphite-web,redice/graphite-web,kkdk5535/graphite-web,section-io/graphite-web,edwardmlyte/graphite-web,cosm0s/graphite-web,g76r/graphite-web,esnet/graphite-web,penpen/graphite-web,axibase/graphite-web,g76r/graphite-web,krux/graphite-web,JeanFred/graphite-web,DanCech/graphite-web,g76r/graphite-web,krux/graphite-web,atnak/graphite-web,DanCech/graphite-web,pu239ppy/graphite-web,JeanFred/graphite-web,drax68/graphite-web,obfuscurity/graphite-web,markolson/graphite-web,ZelunZhang/graphite-web,ZelunZhang/graphite-web,mcoolive/graphite-web,axibase/graphite-web,atnak/graphite-web,johnseekins/graphite-web,bpaquet/graphite-web,jssjr/graphite-web,ZelunZhang/graphite-web,markolson/graphite-web,cbowman0/graphite-web,cosm0s/graphite-web,AICIDNN/graphite-web,Squarespace/graphite-web,Skyscanner/graphite-web,bmhatfield/graphite-web,phreakocious/graphite-web,AICIDNN/graphite-web,section-io/graphite-web,krux/graphite-web,Aloomaio/graphite-web,deniszh/graphite-web,jssjr/graphite-web,bmhatfield/graphite-web,piotr1212/graphite-web,bpaquet/graphite-web,mcoolive/graphite-web,DanCech/graphite-web,graphite-project/graphite-web,krux/graphite-web,bmhatfield/graphite-web,blacked/graphite-web,EinsamHauer/graphite-web-iow,cgvarela/graphite-web,esnet/graphite-web,mcoolive/graphite-web,lfckop/graphite-web,penpen/graphite-web,piotr1212/graphite-web,axibase/graphite-web,krux/graphite-web,axibase/graphite-web,Invoca/graphite-web,dhtech/graphite-web,zBMNForks/graphite-web,Skyscanner/graphite-web,bmhatfield/graphite-web,bruce-lyft/graphite-web,Invoca/graphite-web,phreakocious/graphite-web,obfuscurity/graphite-web,disqus/graphite-web,graphite-project/graphite-web,graphite-project/graphite-web,redice/graphite-web,EinsamHauer/graphite-web-iow,bbc/graphite-web,brutasse/graphite-web,blacked/graphite-web,obfuscurity/graphite-web,brutasse/graphite-web,phreakocious/graphite-web,dbn/graphite-web,penpen/graphite-web,redice/graphite-web,gwaldo/graphite-web,Skyscanner/graphite-web,bbc/graphite-web,piotr1212/graphite-web,redice/graphite-web,dbn/graphite-web,jssjr/graphite-web,synedge/graphite-web,lyft/graphite-web,zBMNForks/graphite-web,pu239ppy/graphite-web,graphite-project/graphite-web,synedge/graphite-web,jssjr/graphite-web,phreakocious/graphite-web,synedge/graphite-web,cybem/graphite-web-iow,deniszh/graphite-web,blacked/graphite-web,johnseekins/graphite-web,cbowman0/graphite-web,SEJeff/graphite-web,AICIDNN/graphite-web,synedge/graphite-web,AICIDNN/graphite-web,lfckop/graphite-web,johnseekins/graphite-web,criteo-forks/graphite-web,cbowman0/graphite-web,SEJeff/graphite-web,ZelunZhang/graphite-web,edwardmlyte/graphite-web,Squarespace/graphite-web,graphite-project/graphite-web,bruce-lyft/graphite-web,Skyscanner/graphite-web,cosm0s/graphite-web,pu239ppy/graphite-web,section-io/graphite-web,goir/graphite-web,esnet/graphite-web,goir/graphite-web,jssjr/graphite-web,markolson/graphite-web,Invoca/graphite-web,lfckop/graphite-web,disqus/graphite-web,blacked/graphite-web,EinsamHauer/graphite-web-iow,criteo-forks/graphite-web,phreakocious/graphite-web,gwaldo/graphite-web,drax68/graphite-web,disqus/graphite-web,deniszh/graphite-web,obfuscurity/graphite-web,edwardmlyte/graphite-web,Squarespace/graphite-web,edwardmlyte/graphite-web,cosm0s/graphite-web,cgvarela/graphite-web,kkdk5535/graphite-web,AICIDNN/graphite-web,johnseekins/graphite-web,Skyscanner/graphite-web,Squarespace/graphite-web,penpen/graphite-web,DanCech/graphite-web,zBMNForks/graphite-web,cgvarela/graphite-web,EinsamHauer/graphite-web-iow,johnseekins/graphite-web,atnak/graphite-web,Aloomaio/graphite-web,cbowman0/graphite-web,atnak/graphite-web,graphite-server/graphite-web,esnet/graphite-web,nkhuyu/graphite-web,edwardmlyte/graphite-web,axibase/graphite-web,g76r/graphite-web,bpaquet/graphite-web,mcoolive/graphite-web,krux/graphite-web,section-io/graphite-web,blacked/graphite-web,bruce-lyft/graphite-web,mcoolive/graphite-web,axibase/graphite-web,Invoca/graphite-web,nkhuyu/graphite-web,Aloomaio/graphite-web,synedge/graphite-web,lfckop/graphite-web,nkhuyu/graphite-web,JeanFred/graphite-web,cbowman0/graphite-web,criteo-forks/graphite-web,kkdk5535/graphite-web,bpaquet/graphite-web,pu239ppy/graphite-web,cgvarela/graphite-web,Aloomaio/graphite-web,bbc/graphite-web,dhtech/graphite-web,JeanFred/graphite-web,gwaldo/graphite-web,blacked/graphite-web,section-io/graphite-web,lyft/graphite-web,piotr1212/graphite-web,bbc/graphite-web,drax68/graphite-web,penpen/graphite-web,Aloomaio/graphite-web,zBMNForks/graphite-web,criteo-forks/graphite-web,bruce-lyft/graphite-web,SEJeff/graphite-web,Skyscanner/graphite-web,cosm0s/graphite-web,piotr1212/graphite-web,ZelunZhang/graphite-web,dbn/graphite-web,edwardmlyte/graphite-web,redice/graphite-web,DanCech/graphite-web,graphite-server/graphite-web,graphite-server/graphite-web,cgvarela/graphite-web,deniszh/graphite-web,cybem/graphite-web-iow,gwaldo/graphite-web,criteo-forks/graphite-web,nkhuyu/graphite-web,goir/graphite-web,cybem/graphite-web-iow,lfckop/graphite-web,bmhatfield/graphite-web,atnak/graphite-web,obfuscurity/graphite-web,graphite-server/graphite-web,johnseekins/graphite-web,dhtech/graphite-web,Aloomaio/graphite-web,DanCech/graphite-web,bpaquet/graphite-web,atnak/graphite-web,gwaldo/graphite-web,penpen/graphite-web,deniszh/graphite-web,lyft/graphite-web,lyft/graphite-web,markolson/graphite-web,dhtech/graphite-web,g76r/graphite-web,drax68/graphite-web,kkdk5535/graphite-web,JeanFred/graphite-web,zBMNForks/graphite-web,bmhatfield/graphite-web,EinsamHauer/graphite-web-iow,cybem/graphite-web-iow,goir/graphite-web,deniszh/graphite-web,SEJeff/graphite-web,bruce-lyft/graphite-web,graphite-project/graphite-web,cybem/graphite-web-iow,SEJeff/graphite-web,disqus/graphite-web,kkdk5535/graphite-web,goir/graphite-web,drax68/graphite-web,criteo-forks/graphite-web,brutasse/graphite-web,dbn/graphite-web,mcoolive/graphite-web,bpaquet/graphite-web,pu239ppy/graphite-web,piotr1212/graphite-web,lyft/graphite-web,redice/graphite-web
|
Add simple test for a custom finder
|
import random
import time
from django.test import TestCase
from graphite.intervals import Interval, IntervalSet
from graphite.node import LeafNode, BranchNode
from graphite.storage import Store, get_finder
class FinderTest(TestCase):
def test_custom_finder(self):
store = Store(finders=[get_finder('tests.test_finders.DummyFinder')])
nodes = list(store.find("foo"))
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].path, 'foo')
nodes = list(store.find('bar.*'))
self.assertEqual(len(nodes), 10)
node = nodes[0]
self.assertEqual(node.path.split('.')[0], 'bar')
time_info, series = node.fetch(100, 200)
self.assertEqual(time_info, (100, 200, 10))
self.assertEqual(len(series), 10)
class DummyReader(object):
__slots__ = ('path',)
def __init__(self, path):
self.path = path
def fetch(self, start_time, end_time):
npoints = (end_time - start_time) / 10
return (start_time, end_time, 10), [
random.choice([None, 1, 2, 3]) for i in range(npoints)
]
def get_intervals(self):
return IntervalSet([Interval(time.time() - 3600, time.time())])
class DummyFinder(object):
def find_nodes(self, query):
if query.pattern == 'foo':
yield BranchNode('foo')
elif query.pattern == 'bar.*':
for i in xrange(10):
path = 'bar.{0}'.format(i)
yield LeafNode(path, DummyReader(path))
|
<commit_before><commit_msg>Add simple test for a custom finder<commit_after>
|
import random
import time
from django.test import TestCase
from graphite.intervals import Interval, IntervalSet
from graphite.node import LeafNode, BranchNode
from graphite.storage import Store, get_finder
class FinderTest(TestCase):
def test_custom_finder(self):
store = Store(finders=[get_finder('tests.test_finders.DummyFinder')])
nodes = list(store.find("foo"))
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].path, 'foo')
nodes = list(store.find('bar.*'))
self.assertEqual(len(nodes), 10)
node = nodes[0]
self.assertEqual(node.path.split('.')[0], 'bar')
time_info, series = node.fetch(100, 200)
self.assertEqual(time_info, (100, 200, 10))
self.assertEqual(len(series), 10)
class DummyReader(object):
__slots__ = ('path',)
def __init__(self, path):
self.path = path
def fetch(self, start_time, end_time):
npoints = (end_time - start_time) / 10
return (start_time, end_time, 10), [
random.choice([None, 1, 2, 3]) for i in range(npoints)
]
def get_intervals(self):
return IntervalSet([Interval(time.time() - 3600, time.time())])
class DummyFinder(object):
def find_nodes(self, query):
if query.pattern == 'foo':
yield BranchNode('foo')
elif query.pattern == 'bar.*':
for i in xrange(10):
path = 'bar.{0}'.format(i)
yield LeafNode(path, DummyReader(path))
|
Add simple test for a custom finderimport random
import time
from django.test import TestCase
from graphite.intervals import Interval, IntervalSet
from graphite.node import LeafNode, BranchNode
from graphite.storage import Store, get_finder
class FinderTest(TestCase):
def test_custom_finder(self):
store = Store(finders=[get_finder('tests.test_finders.DummyFinder')])
nodes = list(store.find("foo"))
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].path, 'foo')
nodes = list(store.find('bar.*'))
self.assertEqual(len(nodes), 10)
node = nodes[0]
self.assertEqual(node.path.split('.')[0], 'bar')
time_info, series = node.fetch(100, 200)
self.assertEqual(time_info, (100, 200, 10))
self.assertEqual(len(series), 10)
class DummyReader(object):
__slots__ = ('path',)
def __init__(self, path):
self.path = path
def fetch(self, start_time, end_time):
npoints = (end_time - start_time) / 10
return (start_time, end_time, 10), [
random.choice([None, 1, 2, 3]) for i in range(npoints)
]
def get_intervals(self):
return IntervalSet([Interval(time.time() - 3600, time.time())])
class DummyFinder(object):
def find_nodes(self, query):
if query.pattern == 'foo':
yield BranchNode('foo')
elif query.pattern == 'bar.*':
for i in xrange(10):
path = 'bar.{0}'.format(i)
yield LeafNode(path, DummyReader(path))
|
<commit_before><commit_msg>Add simple test for a custom finder<commit_after>import random
import time
from django.test import TestCase
from graphite.intervals import Interval, IntervalSet
from graphite.node import LeafNode, BranchNode
from graphite.storage import Store, get_finder
class FinderTest(TestCase):
def test_custom_finder(self):
store = Store(finders=[get_finder('tests.test_finders.DummyFinder')])
nodes = list(store.find("foo"))
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].path, 'foo')
nodes = list(store.find('bar.*'))
self.assertEqual(len(nodes), 10)
node = nodes[0]
self.assertEqual(node.path.split('.')[0], 'bar')
time_info, series = node.fetch(100, 200)
self.assertEqual(time_info, (100, 200, 10))
self.assertEqual(len(series), 10)
class DummyReader(object):
__slots__ = ('path',)
def __init__(self, path):
self.path = path
def fetch(self, start_time, end_time):
npoints = (end_time - start_time) / 10
return (start_time, end_time, 10), [
random.choice([None, 1, 2, 3]) for i in range(npoints)
]
def get_intervals(self):
return IntervalSet([Interval(time.time() - 3600, time.time())])
class DummyFinder(object):
def find_nodes(self, query):
if query.pattern == 'foo':
yield BranchNode('foo')
elif query.pattern == 'bar.*':
for i in xrange(10):
path = 'bar.{0}'.format(i)
yield LeafNode(path, DummyReader(path))
|
|
21f02e452546cdbe4209bfbb8ec6addecf083ebf
|
spam/common/collections.py
|
spam/common/collections.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import namedtuple
Data = namedtuple('Data', ['X', 'Y', 'y', ])
Dataset = namedtuple('Dataset', ['unlabel', 'train', 'test', ])
|
Add data and dataset collection.
|
Add data and dataset collection.
|
Python
|
mit
|
benigls/spam,benigls/spam
|
Add data and dataset collection.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import namedtuple
Data = namedtuple('Data', ['X', 'Y', 'y', ])
Dataset = namedtuple('Dataset', ['unlabel', 'train', 'test', ])
|
<commit_before><commit_msg>Add data and dataset collection.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import namedtuple
Data = namedtuple('Data', ['X', 'Y', 'y', ])
Dataset = namedtuple('Dataset', ['unlabel', 'train', 'test', ])
|
Add data and dataset collection.#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import namedtuple
Data = namedtuple('Data', ['X', 'Y', 'y', ])
Dataset = namedtuple('Dataset', ['unlabel', 'train', 'test', ])
|
<commit_before><commit_msg>Add data and dataset collection.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import namedtuple
Data = namedtuple('Data', ['X', 'Y', 'y', ])
Dataset = namedtuple('Dataset', ['unlabel', 'train', 'test', ])
|
|
dcc0d0373b6cb72a62c3279ae753f7cf0663f63b
|
heroku.py
|
heroku.py
|
#!/usr/bin/env python
from evesrp import create_app
from evesrp.killmail import CRESTMail, ShipURLMixin
import evesrp.auth.testauth
from os import environ as env
from binascii import unhexlify
skel_url = 'https://wiki.eveonline.com/en/wiki/{name}'
class EOWikiCREST(CRESTMail, ShipURLMixin(skel_url)): pass
app = create_app()
app.config['SQLALCHEMY_DATABASE_URI'] = env['DATABASE_URL']
app.config['SECRET_KEY'] = unhexlify(env['SECRET_KEY'])
app.config['USER_AGENT_EMAIL'] = 'paxswill@paxswill.com'
app.config['AUTH_METHODS'] = ['evesrp.auth.testauth.TestAuth']
app.config['CORE_AUTH_PRIVATE_KEY'] = env.get('CORE_PRIVATE_KEY')
app.config['CORE_AUTH_PUBLIC_KEY'] = env.get('CORE_PUBLIC_KEY')
app.config['CORE_AUTH_IDENTIFIER'] = env.get('CORE_IDENTIFIER')
app.config['KILLMAIL_SOURCES'] = [EOWikiCREST]
if env.get('DEBUG') is not None:
app.debug = True
if __name__ == '__main__':
print("Creating databases...")
app.extensions['sqlalchemy'].db.create_all(app=app)
|
#!/usr/bin/env python
from evesrp import create_app
from evesrp.killmail import CRESTMail, ShipURLMixin
from evesrp.auth.testauth import TestAuth
from os import environ as env
from binascii import unhexlify
skel_url = 'https://wiki.eveonline.com/en/wiki/{name}'
class EOWikiCREST(CRESTMail, ShipURLMixin(skel_url)): pass
app = create_app()
app.config['SQLALCHEMY_DATABASE_URI'] = env['DATABASE_URL']
app.config['SECRET_KEY'] = unhexlify(env['SECRET_KEY'])
app.config['USER_AGENT_EMAIL'] = 'paxswill@paxswill.com'
app.config['AUTH_METHODS'] = [TestAuth()]
app.config['CORE_AUTH_PRIVATE_KEY'] = env.get('CORE_PRIVATE_KEY')
app.config['CORE_AUTH_PUBLIC_KEY'] = env.get('CORE_PUBLIC_KEY')
app.config['CORE_AUTH_IDENTIFIER'] = env.get('CORE_IDENTIFIER')
app.config['KILLMAIL_SOURCES'] = [EOWikiCREST]
if env.get('DEBUG') is not None:
app.debug = True
if __name__ == '__main__':
# So we get the database tables for these
from evesrp.auth.testauth import TestUser, TestGroup
print("Creating databases...")
app.extensions['sqlalchemy'].db.create_all(app=app)
|
Use new AuthMethod configuration for Heroku
|
Use new AuthMethod configuration for Heroku
|
Python
|
bsd-2-clause
|
eskwire/evesrp,paxswill/evesrp,eskwire/evesrp,eskwire/evesrp,paxswill/evesrp,eskwire/evesrp,paxswill/evesrp
|
#!/usr/bin/env python
from evesrp import create_app
from evesrp.killmail import CRESTMail, ShipURLMixin
import evesrp.auth.testauth
from os import environ as env
from binascii import unhexlify
skel_url = 'https://wiki.eveonline.com/en/wiki/{name}'
class EOWikiCREST(CRESTMail, ShipURLMixin(skel_url)): pass
app = create_app()
app.config['SQLALCHEMY_DATABASE_URI'] = env['DATABASE_URL']
app.config['SECRET_KEY'] = unhexlify(env['SECRET_KEY'])
app.config['USER_AGENT_EMAIL'] = 'paxswill@paxswill.com'
app.config['AUTH_METHODS'] = ['evesrp.auth.testauth.TestAuth']
app.config['CORE_AUTH_PRIVATE_KEY'] = env.get('CORE_PRIVATE_KEY')
app.config['CORE_AUTH_PUBLIC_KEY'] = env.get('CORE_PUBLIC_KEY')
app.config['CORE_AUTH_IDENTIFIER'] = env.get('CORE_IDENTIFIER')
app.config['KILLMAIL_SOURCES'] = [EOWikiCREST]
if env.get('DEBUG') is not None:
app.debug = True
if __name__ == '__main__':
print("Creating databases...")
app.extensions['sqlalchemy'].db.create_all(app=app)
Use new AuthMethod configuration for Heroku
|
#!/usr/bin/env python
from evesrp import create_app
from evesrp.killmail import CRESTMail, ShipURLMixin
from evesrp.auth.testauth import TestAuth
from os import environ as env
from binascii import unhexlify
skel_url = 'https://wiki.eveonline.com/en/wiki/{name}'
class EOWikiCREST(CRESTMail, ShipURLMixin(skel_url)): pass
app = create_app()
app.config['SQLALCHEMY_DATABASE_URI'] = env['DATABASE_URL']
app.config['SECRET_KEY'] = unhexlify(env['SECRET_KEY'])
app.config['USER_AGENT_EMAIL'] = 'paxswill@paxswill.com'
app.config['AUTH_METHODS'] = [TestAuth()]
app.config['CORE_AUTH_PRIVATE_KEY'] = env.get('CORE_PRIVATE_KEY')
app.config['CORE_AUTH_PUBLIC_KEY'] = env.get('CORE_PUBLIC_KEY')
app.config['CORE_AUTH_IDENTIFIER'] = env.get('CORE_IDENTIFIER')
app.config['KILLMAIL_SOURCES'] = [EOWikiCREST]
if env.get('DEBUG') is not None:
app.debug = True
if __name__ == '__main__':
# So we get the database tables for these
from evesrp.auth.testauth import TestUser, TestGroup
print("Creating databases...")
app.extensions['sqlalchemy'].db.create_all(app=app)
|
<commit_before>#!/usr/bin/env python
from evesrp import create_app
from evesrp.killmail import CRESTMail, ShipURLMixin
import evesrp.auth.testauth
from os import environ as env
from binascii import unhexlify
skel_url = 'https://wiki.eveonline.com/en/wiki/{name}'
class EOWikiCREST(CRESTMail, ShipURLMixin(skel_url)): pass
app = create_app()
app.config['SQLALCHEMY_DATABASE_URI'] = env['DATABASE_URL']
app.config['SECRET_KEY'] = unhexlify(env['SECRET_KEY'])
app.config['USER_AGENT_EMAIL'] = 'paxswill@paxswill.com'
app.config['AUTH_METHODS'] = ['evesrp.auth.testauth.TestAuth']
app.config['CORE_AUTH_PRIVATE_KEY'] = env.get('CORE_PRIVATE_KEY')
app.config['CORE_AUTH_PUBLIC_KEY'] = env.get('CORE_PUBLIC_KEY')
app.config['CORE_AUTH_IDENTIFIER'] = env.get('CORE_IDENTIFIER')
app.config['KILLMAIL_SOURCES'] = [EOWikiCREST]
if env.get('DEBUG') is not None:
app.debug = True
if __name__ == '__main__':
print("Creating databases...")
app.extensions['sqlalchemy'].db.create_all(app=app)
<commit_msg>Use new AuthMethod configuration for Heroku<commit_after>
|
#!/usr/bin/env python
from evesrp import create_app
from evesrp.killmail import CRESTMail, ShipURLMixin
from evesrp.auth.testauth import TestAuth
from os import environ as env
from binascii import unhexlify
skel_url = 'https://wiki.eveonline.com/en/wiki/{name}'
class EOWikiCREST(CRESTMail, ShipURLMixin(skel_url)): pass
app = create_app()
app.config['SQLALCHEMY_DATABASE_URI'] = env['DATABASE_URL']
app.config['SECRET_KEY'] = unhexlify(env['SECRET_KEY'])
app.config['USER_AGENT_EMAIL'] = 'paxswill@paxswill.com'
app.config['AUTH_METHODS'] = [TestAuth()]
app.config['CORE_AUTH_PRIVATE_KEY'] = env.get('CORE_PRIVATE_KEY')
app.config['CORE_AUTH_PUBLIC_KEY'] = env.get('CORE_PUBLIC_KEY')
app.config['CORE_AUTH_IDENTIFIER'] = env.get('CORE_IDENTIFIER')
app.config['KILLMAIL_SOURCES'] = [EOWikiCREST]
if env.get('DEBUG') is not None:
app.debug = True
if __name__ == '__main__':
# So we get the database tables for these
from evesrp.auth.testauth import TestUser, TestGroup
print("Creating databases...")
app.extensions['sqlalchemy'].db.create_all(app=app)
|
#!/usr/bin/env python
from evesrp import create_app
from evesrp.killmail import CRESTMail, ShipURLMixin
import evesrp.auth.testauth
from os import environ as env
from binascii import unhexlify
skel_url = 'https://wiki.eveonline.com/en/wiki/{name}'
class EOWikiCREST(CRESTMail, ShipURLMixin(skel_url)): pass
app = create_app()
app.config['SQLALCHEMY_DATABASE_URI'] = env['DATABASE_URL']
app.config['SECRET_KEY'] = unhexlify(env['SECRET_KEY'])
app.config['USER_AGENT_EMAIL'] = 'paxswill@paxswill.com'
app.config['AUTH_METHODS'] = ['evesrp.auth.testauth.TestAuth']
app.config['CORE_AUTH_PRIVATE_KEY'] = env.get('CORE_PRIVATE_KEY')
app.config['CORE_AUTH_PUBLIC_KEY'] = env.get('CORE_PUBLIC_KEY')
app.config['CORE_AUTH_IDENTIFIER'] = env.get('CORE_IDENTIFIER')
app.config['KILLMAIL_SOURCES'] = [EOWikiCREST]
if env.get('DEBUG') is not None:
app.debug = True
if __name__ == '__main__':
print("Creating databases...")
app.extensions['sqlalchemy'].db.create_all(app=app)
Use new AuthMethod configuration for Heroku#!/usr/bin/env python
from evesrp import create_app
from evesrp.killmail import CRESTMail, ShipURLMixin
from evesrp.auth.testauth import TestAuth
from os import environ as env
from binascii import unhexlify
skel_url = 'https://wiki.eveonline.com/en/wiki/{name}'
class EOWikiCREST(CRESTMail, ShipURLMixin(skel_url)): pass
app = create_app()
app.config['SQLALCHEMY_DATABASE_URI'] = env['DATABASE_URL']
app.config['SECRET_KEY'] = unhexlify(env['SECRET_KEY'])
app.config['USER_AGENT_EMAIL'] = 'paxswill@paxswill.com'
app.config['AUTH_METHODS'] = [TestAuth()]
app.config['CORE_AUTH_PRIVATE_KEY'] = env.get('CORE_PRIVATE_KEY')
app.config['CORE_AUTH_PUBLIC_KEY'] = env.get('CORE_PUBLIC_KEY')
app.config['CORE_AUTH_IDENTIFIER'] = env.get('CORE_IDENTIFIER')
app.config['KILLMAIL_SOURCES'] = [EOWikiCREST]
if env.get('DEBUG') is not None:
app.debug = True
if __name__ == '__main__':
# So we get the database tables for these
from evesrp.auth.testauth import TestUser, TestGroup
print("Creating databases...")
app.extensions['sqlalchemy'].db.create_all(app=app)
|
<commit_before>#!/usr/bin/env python
from evesrp import create_app
from evesrp.killmail import CRESTMail, ShipURLMixin
import evesrp.auth.testauth
from os import environ as env
from binascii import unhexlify
skel_url = 'https://wiki.eveonline.com/en/wiki/{name}'
class EOWikiCREST(CRESTMail, ShipURLMixin(skel_url)): pass
app = create_app()
app.config['SQLALCHEMY_DATABASE_URI'] = env['DATABASE_URL']
app.config['SECRET_KEY'] = unhexlify(env['SECRET_KEY'])
app.config['USER_AGENT_EMAIL'] = 'paxswill@paxswill.com'
app.config['AUTH_METHODS'] = ['evesrp.auth.testauth.TestAuth']
app.config['CORE_AUTH_PRIVATE_KEY'] = env.get('CORE_PRIVATE_KEY')
app.config['CORE_AUTH_PUBLIC_KEY'] = env.get('CORE_PUBLIC_KEY')
app.config['CORE_AUTH_IDENTIFIER'] = env.get('CORE_IDENTIFIER')
app.config['KILLMAIL_SOURCES'] = [EOWikiCREST]
if env.get('DEBUG') is not None:
app.debug = True
if __name__ == '__main__':
print("Creating databases...")
app.extensions['sqlalchemy'].db.create_all(app=app)
<commit_msg>Use new AuthMethod configuration for Heroku<commit_after>#!/usr/bin/env python
from evesrp import create_app
from evesrp.killmail import CRESTMail, ShipURLMixin
from evesrp.auth.testauth import TestAuth
from os import environ as env
from binascii import unhexlify
skel_url = 'https://wiki.eveonline.com/en/wiki/{name}'
class EOWikiCREST(CRESTMail, ShipURLMixin(skel_url)): pass
app = create_app()
app.config['SQLALCHEMY_DATABASE_URI'] = env['DATABASE_URL']
app.config['SECRET_KEY'] = unhexlify(env['SECRET_KEY'])
app.config['USER_AGENT_EMAIL'] = 'paxswill@paxswill.com'
app.config['AUTH_METHODS'] = [TestAuth()]
app.config['CORE_AUTH_PRIVATE_KEY'] = env.get('CORE_PRIVATE_KEY')
app.config['CORE_AUTH_PUBLIC_KEY'] = env.get('CORE_PUBLIC_KEY')
app.config['CORE_AUTH_IDENTIFIER'] = env.get('CORE_IDENTIFIER')
app.config['KILLMAIL_SOURCES'] = [EOWikiCREST]
if env.get('DEBUG') is not None:
app.debug = True
if __name__ == '__main__':
# So we get the database tables for these
from evesrp.auth.testauth import TestUser, TestGroup
print("Creating databases...")
app.extensions['sqlalchemy'].db.create_all(app=app)
|
efbf92af38a6bcaa87327b9c5fc44680888a6793
|
src/diamond/handler/Handler.py
|
src/diamond/handler/Handler.py
|
# coding=utf-8
import logging
import threading
import traceback
class Handler(object):
"""
Handlers process metrics that are collected by Collectors.
"""
def __init__(self, config=None):
"""
Create a new instance of the Handler class
"""
# Initialize Log
self.log = logging.getLogger('diamond')
# Initialize Data
self.config = config
# Initialize Lock
self.lock = threading.Condition(threading.Lock())
def _process(self, metric):
"""
Decorator for processing handlers with a lock, catching exceptions
"""
try:
self.log.debug("Running Handler %s locked" % (self))
with self.lock:
self.process(metric)
except Exception:
self.log.error(traceback.format_exc())
finally:
self.log.debug("Unlocked Handler %s" % (self))
def process(self, metric):
"""
Process a metric
Should be overridden in subclasses
"""
raise NotImplementedError
def flush(self):
"""
Flush metrics
Optional: Should be overridden in subclasses
"""
pass
|
# coding=utf-8
import logging
import threading
import traceback
class Handler(object):
"""
Handlers process metrics that are collected by Collectors.
"""
def __init__(self, config=None):
"""
Create a new instance of the Handler class
"""
# Initialize Log
self.log = logging.getLogger('diamond')
# Initialize Data
self.config = config
# Initialize Lock
self.lock = threading.Condition(threading.Lock())
def _process(self, metric):
"""
Decorator for processing handlers with a lock, catching exceptions
"""
try:
self.log.debug("Running Handler %s locked" % (self))
self.lock.acquire()
self.process(metric)
self.lock.release()
except Exception:
self.log.error(traceback.format_exc())
finally:
self.lock.release()
self.log.debug("Unlocked Handler %s" % (self))
def process(self, metric):
"""
Process a metric
Should be overridden in subclasses
"""
raise NotImplementedError
def flush(self):
"""
Flush metrics
Optional: Should be overridden in subclasses
"""
pass
|
Move away from using the with statement for 2.4 support
|
Move away from using the with statement for 2.4 support
|
Python
|
mit
|
Netuitive/Diamond,codepython/Diamond,Netuitive/netuitive-diamond,Ensighten/Diamond,Nihn/Diamond-1,Clever/Diamond,hamelg/Diamond,Ensighten/Diamond,szibis/Diamond,Precis/Diamond,Slach/Diamond,anandbhoraskar/Diamond,TAKEALOT/Diamond,eMerzh/Diamond-1,CYBERBUGJR/Diamond,jriguera/Diamond,Netuitive/Diamond,sebbrandt87/Diamond,dcsquared13/Diamond,tellapart/Diamond,stuartbfox/Diamond,hvnsweeting/Diamond,sebbrandt87/Diamond,thardie/Diamond,russss/Diamond,cannium/Diamond,saucelabs/Diamond,stuartbfox/Diamond,jaingaurav/Diamond,sebbrandt87/Diamond,EzyInsights/Diamond,disqus/Diamond,eMerzh/Diamond-1,zoidbergwill/Diamond,skbkontur/Diamond,TinLe/Diamond,socialwareinc/Diamond,thardie/Diamond,h00dy/Diamond,MediaMath/Diamond,Precis/Diamond,TinLe/Diamond,h00dy/Diamond,MichaelDoyle/Diamond,jaingaurav/Diamond,h00dy/Diamond,datafiniti/Diamond,CYBERBUGJR/Diamond,sebbrandt87/Diamond,metamx/Diamond,mfriedenhagen/Diamond,joel-airspring/Diamond,szibis/Diamond,Basis/Diamond,actmd/Diamond,anandbhoraskar/Diamond,TAKEALOT/Diamond,codepython/Diamond,timchenxiaoyu/Diamond,Slach/Diamond,rtoma/Diamond,datafiniti/Diamond,datafiniti/Diamond,rtoma/Diamond,russss/Diamond,krbaker/Diamond,cannium/Diamond,Netuitive/Diamond,krbaker/Diamond,Ensighten/Diamond,timchenxiaoyu/Diamond,Slach/Diamond,actmd/Diamond,acquia/Diamond,cannium/Diamond,hvnsweeting/Diamond,tusharmakkar08/Diamond,Clever/Diamond,jumping/Diamond,ceph/Diamond,ceph/Diamond,acquia/Diamond,zoidbergwill/Diamond,jriguera/Diamond,works-mobile/Diamond,MichaelDoyle/Diamond,Clever/Diamond,jriguera/Diamond,disqus/Diamond,ramjothikumar/Diamond,MediaMath/Diamond,Ormod/Diamond,bmhatfield/Diamond,ramjothikumar/Diamond,eMerzh/Diamond-1,stuartbfox/Diamond,EzyInsights/Diamond,Ormod/Diamond,socialwareinc/Diamond,tusharmakkar08/Diamond,eMerzh/Diamond-1,codepython/Diamond,tellapart/Diamond,signalfx/Diamond,Slach/Diamond,TAKEALOT/Diamond,thardie/Diamond,dcsquared13/Diamond,Nihn/Diamond-1,saucelabs/Diamond,mfriedenhagen/Diamond,tellapart/Diamond,jumping/Diamond,datafiniti/Diamond,Nihn/Diamond-1,Ormod/Diamond,python-diamond/Diamond,saucelabs/Diamond,ceph/Diamond,zoidbergwill/Diamond,acquia/Diamond,socialwareinc/Diamond,hvnsweeting/Diamond,ramjothikumar/Diamond,mzupan/Diamond,tuenti/Diamond,krbaker/Diamond,Basis/Diamond,mzupan/Diamond,timchenxiaoyu/Diamond,skbkontur/Diamond,MichaelDoyle/Diamond,codepython/Diamond,tellapart/Diamond,Basis/Diamond,saucelabs/Diamond,MichaelDoyle/Diamond,russss/Diamond,disqus/Diamond,tuenti/Diamond,metamx/Diamond,TinLe/Diamond,joel-airspring/Diamond,Clever/Diamond,tuenti/Diamond,signalfx/Diamond,Precis/Diamond,dcsquared13/Diamond,skbkontur/Diamond,krbaker/Diamond,CYBERBUGJR/Diamond,Ormod/Diamond,Netuitive/netuitive-diamond,anandbhoraskar/Diamond,h00dy/Diamond,Netuitive/netuitive-diamond,Ensighten/Diamond,Ssawa/Diamond,TAKEALOT/Diamond,tusharmakkar08/Diamond,jaingaurav/Diamond,CYBERBUGJR/Diamond,actmd/Diamond,mfriedenhagen/Diamond,skbkontur/Diamond,gg7/diamond,rtoma/Diamond,hamelg/Diamond,Ssawa/Diamond,signalfx/Diamond,stuartbfox/Diamond,timchenxiaoyu/Diamond,gg7/diamond,russss/Diamond,hvnsweeting/Diamond,thardie/Diamond,cannium/Diamond,python-diamond/Diamond,szibis/Diamond,EzyInsights/Diamond,metamx/Diamond,zoidbergwill/Diamond,jumping/Diamond,Nihn/Diamond-1,MediaMath/Diamond,ceph/Diamond,mfriedenhagen/Diamond,Ssawa/Diamond,signalfx/Diamond,bmhatfield/Diamond,tuenti/Diamond,Netuitive/Diamond,acquia/Diamond,hamelg/Diamond,Netuitive/netuitive-diamond,rtoma/Diamond,dcsquared13/Diamond,joel-airspring/Diamond,janisz/Diamond-1,Ssawa/Diamond,EzyInsights/Diamond,TinLe/Diamond,bmhatfield/Diamond,python-diamond/Diamond,works-mobile/Diamond,actmd/Diamond,Precis/Diamond,gg7/diamond,ramjothikumar/Diamond,gg7/diamond,socialwareinc/Diamond,joel-airspring/Diamond,jriguera/Diamond,mzupan/Diamond,bmhatfield/Diamond,tusharmakkar08/Diamond,works-mobile/Diamond,szibis/Diamond,anandbhoraskar/Diamond,janisz/Diamond-1,Basis/Diamond,jaingaurav/Diamond,mzupan/Diamond,MediaMath/Diamond,janisz/Diamond-1,works-mobile/Diamond,janisz/Diamond-1,hamelg/Diamond,jumping/Diamond
|
# coding=utf-8
import logging
import threading
import traceback
class Handler(object):
"""
Handlers process metrics that are collected by Collectors.
"""
def __init__(self, config=None):
"""
Create a new instance of the Handler class
"""
# Initialize Log
self.log = logging.getLogger('diamond')
# Initialize Data
self.config = config
# Initialize Lock
self.lock = threading.Condition(threading.Lock())
def _process(self, metric):
"""
Decorator for processing handlers with a lock, catching exceptions
"""
try:
self.log.debug("Running Handler %s locked" % (self))
with self.lock:
self.process(metric)
except Exception:
self.log.error(traceback.format_exc())
finally:
self.log.debug("Unlocked Handler %s" % (self))
def process(self, metric):
"""
Process a metric
Should be overridden in subclasses
"""
raise NotImplementedError
def flush(self):
"""
Flush metrics
Optional: Should be overridden in subclasses
"""
pass
Move away from using the with statement for 2.4 support
|
# coding=utf-8
import logging
import threading
import traceback
class Handler(object):
"""
Handlers process metrics that are collected by Collectors.
"""
def __init__(self, config=None):
"""
Create a new instance of the Handler class
"""
# Initialize Log
self.log = logging.getLogger('diamond')
# Initialize Data
self.config = config
# Initialize Lock
self.lock = threading.Condition(threading.Lock())
def _process(self, metric):
"""
Decorator for processing handlers with a lock, catching exceptions
"""
try:
self.log.debug("Running Handler %s locked" % (self))
self.lock.acquire()
self.process(metric)
self.lock.release()
except Exception:
self.log.error(traceback.format_exc())
finally:
self.lock.release()
self.log.debug("Unlocked Handler %s" % (self))
def process(self, metric):
"""
Process a metric
Should be overridden in subclasses
"""
raise NotImplementedError
def flush(self):
"""
Flush metrics
Optional: Should be overridden in subclasses
"""
pass
|
<commit_before># coding=utf-8
import logging
import threading
import traceback
class Handler(object):
"""
Handlers process metrics that are collected by Collectors.
"""
def __init__(self, config=None):
"""
Create a new instance of the Handler class
"""
# Initialize Log
self.log = logging.getLogger('diamond')
# Initialize Data
self.config = config
# Initialize Lock
self.lock = threading.Condition(threading.Lock())
def _process(self, metric):
"""
Decorator for processing handlers with a lock, catching exceptions
"""
try:
self.log.debug("Running Handler %s locked" % (self))
with self.lock:
self.process(metric)
except Exception:
self.log.error(traceback.format_exc())
finally:
self.log.debug("Unlocked Handler %s" % (self))
def process(self, metric):
"""
Process a metric
Should be overridden in subclasses
"""
raise NotImplementedError
def flush(self):
"""
Flush metrics
Optional: Should be overridden in subclasses
"""
pass
<commit_msg>Move away from using the with statement for 2.4 support<commit_after>
|
# coding=utf-8
import logging
import threading
import traceback
class Handler(object):
"""
Handlers process metrics that are collected by Collectors.
"""
def __init__(self, config=None):
"""
Create a new instance of the Handler class
"""
# Initialize Log
self.log = logging.getLogger('diamond')
# Initialize Data
self.config = config
# Initialize Lock
self.lock = threading.Condition(threading.Lock())
def _process(self, metric):
"""
Decorator for processing handlers with a lock, catching exceptions
"""
try:
self.log.debug("Running Handler %s locked" % (self))
self.lock.acquire()
self.process(metric)
self.lock.release()
except Exception:
self.log.error(traceback.format_exc())
finally:
self.lock.release()
self.log.debug("Unlocked Handler %s" % (self))
def process(self, metric):
"""
Process a metric
Should be overridden in subclasses
"""
raise NotImplementedError
def flush(self):
"""
Flush metrics
Optional: Should be overridden in subclasses
"""
pass
|
# coding=utf-8
import logging
import threading
import traceback
class Handler(object):
"""
Handlers process metrics that are collected by Collectors.
"""
def __init__(self, config=None):
"""
Create a new instance of the Handler class
"""
# Initialize Log
self.log = logging.getLogger('diamond')
# Initialize Data
self.config = config
# Initialize Lock
self.lock = threading.Condition(threading.Lock())
def _process(self, metric):
"""
Decorator for processing handlers with a lock, catching exceptions
"""
try:
self.log.debug("Running Handler %s locked" % (self))
with self.lock:
self.process(metric)
except Exception:
self.log.error(traceback.format_exc())
finally:
self.log.debug("Unlocked Handler %s" % (self))
def process(self, metric):
"""
Process a metric
Should be overridden in subclasses
"""
raise NotImplementedError
def flush(self):
"""
Flush metrics
Optional: Should be overridden in subclasses
"""
pass
Move away from using the with statement for 2.4 support# coding=utf-8
import logging
import threading
import traceback
class Handler(object):
"""
Handlers process metrics that are collected by Collectors.
"""
def __init__(self, config=None):
"""
Create a new instance of the Handler class
"""
# Initialize Log
self.log = logging.getLogger('diamond')
# Initialize Data
self.config = config
# Initialize Lock
self.lock = threading.Condition(threading.Lock())
def _process(self, metric):
"""
Decorator for processing handlers with a lock, catching exceptions
"""
try:
self.log.debug("Running Handler %s locked" % (self))
self.lock.acquire()
self.process(metric)
self.lock.release()
except Exception:
self.log.error(traceback.format_exc())
finally:
self.lock.release()
self.log.debug("Unlocked Handler %s" % (self))
def process(self, metric):
"""
Process a metric
Should be overridden in subclasses
"""
raise NotImplementedError
def flush(self):
"""
Flush metrics
Optional: Should be overridden in subclasses
"""
pass
|
<commit_before># coding=utf-8
import logging
import threading
import traceback
class Handler(object):
"""
Handlers process metrics that are collected by Collectors.
"""
def __init__(self, config=None):
"""
Create a new instance of the Handler class
"""
# Initialize Log
self.log = logging.getLogger('diamond')
# Initialize Data
self.config = config
# Initialize Lock
self.lock = threading.Condition(threading.Lock())
def _process(self, metric):
"""
Decorator for processing handlers with a lock, catching exceptions
"""
try:
self.log.debug("Running Handler %s locked" % (self))
with self.lock:
self.process(metric)
except Exception:
self.log.error(traceback.format_exc())
finally:
self.log.debug("Unlocked Handler %s" % (self))
def process(self, metric):
"""
Process a metric
Should be overridden in subclasses
"""
raise NotImplementedError
def flush(self):
"""
Flush metrics
Optional: Should be overridden in subclasses
"""
pass
<commit_msg>Move away from using the with statement for 2.4 support<commit_after># coding=utf-8
import logging
import threading
import traceback
class Handler(object):
"""
Handlers process metrics that are collected by Collectors.
"""
def __init__(self, config=None):
"""
Create a new instance of the Handler class
"""
# Initialize Log
self.log = logging.getLogger('diamond')
# Initialize Data
self.config = config
# Initialize Lock
self.lock = threading.Condition(threading.Lock())
def _process(self, metric):
"""
Decorator for processing handlers with a lock, catching exceptions
"""
try:
self.log.debug("Running Handler %s locked" % (self))
self.lock.acquire()
self.process(metric)
self.lock.release()
except Exception:
self.log.error(traceback.format_exc())
finally:
self.lock.release()
self.log.debug("Unlocked Handler %s" % (self))
def process(self, metric):
"""
Process a metric
Should be overridden in subclasses
"""
raise NotImplementedError
def flush(self):
"""
Flush metrics
Optional: Should be overridden in subclasses
"""
pass
|
9c1ae3592c5a376433b4048442837cb40a7af552
|
Detect_Face_Sides.py
|
Detect_Face_Sides.py
|
#scan through each row
#when you hit a high intensity pixel store x
#when you hit a low intensity pixel for more than like 10 pixels set xfinal
#scan each row and store all of the size values in a matrix
#scan through the matrix, when the values remain smooth for a while
#then designate a point for the top right and top left at x init and xfina
import numpy as np
def get_leftside_average(self):
"""Return Array of Left Most Points."""
width = self.size[0]
height = self.size[1]
left_most_points = []
for row in range(height):
for column in range(width):
if image.getpixel(row, column) > 200:
left_most_points.append(column)
break
return np.median(left_most_points)
|
Add detection of left side of face
|
Add detection of left side of face
|
Python
|
mit
|
anassinator/codejam,anassinator/codejam-2014
|
Add detection of left side of face
|
#scan through each row
#when you hit a high intensity pixel store x
#when you hit a low intensity pixel for more than like 10 pixels set xfinal
#scan each row and store all of the size values in a matrix
#scan through the matrix, when the values remain smooth for a while
#then designate a point for the top right and top left at x init and xfina
import numpy as np
def get_leftside_average(self):
"""Return Array of Left Most Points."""
width = self.size[0]
height = self.size[1]
left_most_points = []
for row in range(height):
for column in range(width):
if image.getpixel(row, column) > 200:
left_most_points.append(column)
break
return np.median(left_most_points)
|
<commit_before><commit_msg>Add detection of left side of face<commit_after>
|
#scan through each row
#when you hit a high intensity pixel store x
#when you hit a low intensity pixel for more than like 10 pixels set xfinal
#scan each row and store all of the size values in a matrix
#scan through the matrix, when the values remain smooth for a while
#then designate a point for the top right and top left at x init and xfina
import numpy as np
def get_leftside_average(self):
"""Return Array of Left Most Points."""
width = self.size[0]
height = self.size[1]
left_most_points = []
for row in range(height):
for column in range(width):
if image.getpixel(row, column) > 200:
left_most_points.append(column)
break
return np.median(left_most_points)
|
Add detection of left side of face#scan through each row
#when you hit a high intensity pixel store x
#when you hit a low intensity pixel for more than like 10 pixels set xfinal
#scan each row and store all of the size values in a matrix
#scan through the matrix, when the values remain smooth for a while
#then designate a point for the top right and top left at x init and xfina
import numpy as np
def get_leftside_average(self):
"""Return Array of Left Most Points."""
width = self.size[0]
height = self.size[1]
left_most_points = []
for row in range(height):
for column in range(width):
if image.getpixel(row, column) > 200:
left_most_points.append(column)
break
return np.median(left_most_points)
|
<commit_before><commit_msg>Add detection of left side of face<commit_after>#scan through each row
#when you hit a high intensity pixel store x
#when you hit a low intensity pixel for more than like 10 pixels set xfinal
#scan each row and store all of the size values in a matrix
#scan through the matrix, when the values remain smooth for a while
#then designate a point for the top right and top left at x init and xfina
import numpy as np
def get_leftside_average(self):
"""Return Array of Left Most Points."""
width = self.size[0]
height = self.size[1]
left_most_points = []
for row in range(height):
for column in range(width):
if image.getpixel(row, column) > 200:
left_most_points.append(column)
break
return np.median(left_most_points)
|
|
b07b12b2645e394c5a85a3585512acf7bdfb05b7
|
euler023.py
|
euler023.py
|
#!/usr/bin/python
from math import floor, sqrt
LIMIT = 28134
def isAbundant(n):
max_t = floor(sqrt(n)) + 1
sum_d = 1
for i in range(2, max_t):
if n % i == 0:
sum_d += i + n // i
if i == n / i:
sum_d -= i
return sum_d > n
""" Main """
abd_l = [0] * LIMIT
for i in range(12, LIMIT):
if isAbundant(i):
abd_l[i] = 1
sum_abd = 0
for i in range(1, LIMIT):
abd = 0
for j in range(12, (i // 2) + 1):
if abd_l[j] and abd_l[i - j]:
abd = 1
break
if not abd:
sum_abd += i
print(sum_abd)
|
Add solution for problem 23, it's quite fast
|
Add solution for problem 23, it's quite fast
|
Python
|
mit
|
cifvts/PyEuler
|
Add solution for problem 23, it's quite fast
|
#!/usr/bin/python
from math import floor, sqrt
LIMIT = 28134
def isAbundant(n):
max_t = floor(sqrt(n)) + 1
sum_d = 1
for i in range(2, max_t):
if n % i == 0:
sum_d += i + n // i
if i == n / i:
sum_d -= i
return sum_d > n
""" Main """
abd_l = [0] * LIMIT
for i in range(12, LIMIT):
if isAbundant(i):
abd_l[i] = 1
sum_abd = 0
for i in range(1, LIMIT):
abd = 0
for j in range(12, (i // 2) + 1):
if abd_l[j] and abd_l[i - j]:
abd = 1
break
if not abd:
sum_abd += i
print(sum_abd)
|
<commit_before><commit_msg>Add solution for problem 23, it's quite fast<commit_after>
|
#!/usr/bin/python
from math import floor, sqrt
LIMIT = 28134
def isAbundant(n):
max_t = floor(sqrt(n)) + 1
sum_d = 1
for i in range(2, max_t):
if n % i == 0:
sum_d += i + n // i
if i == n / i:
sum_d -= i
return sum_d > n
""" Main """
abd_l = [0] * LIMIT
for i in range(12, LIMIT):
if isAbundant(i):
abd_l[i] = 1
sum_abd = 0
for i in range(1, LIMIT):
abd = 0
for j in range(12, (i // 2) + 1):
if abd_l[j] and abd_l[i - j]:
abd = 1
break
if not abd:
sum_abd += i
print(sum_abd)
|
Add solution for problem 23, it's quite fast#!/usr/bin/python
from math import floor, sqrt
LIMIT = 28134
def isAbundant(n):
max_t = floor(sqrt(n)) + 1
sum_d = 1
for i in range(2, max_t):
if n % i == 0:
sum_d += i + n // i
if i == n / i:
sum_d -= i
return sum_d > n
""" Main """
abd_l = [0] * LIMIT
for i in range(12, LIMIT):
if isAbundant(i):
abd_l[i] = 1
sum_abd = 0
for i in range(1, LIMIT):
abd = 0
for j in range(12, (i // 2) + 1):
if abd_l[j] and abd_l[i - j]:
abd = 1
break
if not abd:
sum_abd += i
print(sum_abd)
|
<commit_before><commit_msg>Add solution for problem 23, it's quite fast<commit_after>#!/usr/bin/python
from math import floor, sqrt
LIMIT = 28134
def isAbundant(n):
max_t = floor(sqrt(n)) + 1
sum_d = 1
for i in range(2, max_t):
if n % i == 0:
sum_d += i + n // i
if i == n / i:
sum_d -= i
return sum_d > n
""" Main """
abd_l = [0] * LIMIT
for i in range(12, LIMIT):
if isAbundant(i):
abd_l[i] = 1
sum_abd = 0
for i in range(1, LIMIT):
abd = 0
for j in range(12, (i // 2) + 1):
if abd_l[j] and abd_l[i - j]:
abd = 1
break
if not abd:
sum_abd += i
print(sum_abd)
|
|
5b3a001af9ff992d061f880d6350292250fd8687
|
apps/explorer/tests/test_views.py
|
apps/explorer/tests/test_views.py
|
from django.core.urlresolvers import reverse
from apps.core.factories import PIXELER_PASSWORD, PixelerFactory
from apps.core.tests import CoreFixturesTestCase
from apps.core.management.commands.make_development_fixtures import (
make_development_fixtures
)
class PixelSetListViewTestCase(CoreFixturesTestCase):
def setUp(self):
self.user = PixelerFactory(
is_active=True,
is_staff=True,
is_superuser=True,
)
self.client.login(
username=self.user.username,
password=PIXELER_PASSWORD,
)
self.url = reverse('explorer:pixelset_list')
def test_renders_pixelset_list_template(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'explorer/pixelset_list.html')
def test_renders_empty_message(self):
response = self.client.get(self.url)
expected = (
'<td colspan="8" class="empty">'
'No pixel set has been submitted yet'
'</td>'
)
self.assertContains(response, expected, html=True)
def test_renders_pixelset_list(self):
make_development_fixtures(n_pixel_sets=12)
response = self.client.get(self.url)
self.assertContains(
response,
'<tr class="pixelset">',
count=10
)
|
Add tests for the pixelset list view
|
Add tests for the pixelset list view
|
Python
|
bsd-3-clause
|
Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel
|
Add tests for the pixelset list view
|
from django.core.urlresolvers import reverse
from apps.core.factories import PIXELER_PASSWORD, PixelerFactory
from apps.core.tests import CoreFixturesTestCase
from apps.core.management.commands.make_development_fixtures import (
make_development_fixtures
)
class PixelSetListViewTestCase(CoreFixturesTestCase):
def setUp(self):
self.user = PixelerFactory(
is_active=True,
is_staff=True,
is_superuser=True,
)
self.client.login(
username=self.user.username,
password=PIXELER_PASSWORD,
)
self.url = reverse('explorer:pixelset_list')
def test_renders_pixelset_list_template(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'explorer/pixelset_list.html')
def test_renders_empty_message(self):
response = self.client.get(self.url)
expected = (
'<td colspan="8" class="empty">'
'No pixel set has been submitted yet'
'</td>'
)
self.assertContains(response, expected, html=True)
def test_renders_pixelset_list(self):
make_development_fixtures(n_pixel_sets=12)
response = self.client.get(self.url)
self.assertContains(
response,
'<tr class="pixelset">',
count=10
)
|
<commit_before><commit_msg>Add tests for the pixelset list view<commit_after>
|
from django.core.urlresolvers import reverse
from apps.core.factories import PIXELER_PASSWORD, PixelerFactory
from apps.core.tests import CoreFixturesTestCase
from apps.core.management.commands.make_development_fixtures import (
make_development_fixtures
)
class PixelSetListViewTestCase(CoreFixturesTestCase):
def setUp(self):
self.user = PixelerFactory(
is_active=True,
is_staff=True,
is_superuser=True,
)
self.client.login(
username=self.user.username,
password=PIXELER_PASSWORD,
)
self.url = reverse('explorer:pixelset_list')
def test_renders_pixelset_list_template(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'explorer/pixelset_list.html')
def test_renders_empty_message(self):
response = self.client.get(self.url)
expected = (
'<td colspan="8" class="empty">'
'No pixel set has been submitted yet'
'</td>'
)
self.assertContains(response, expected, html=True)
def test_renders_pixelset_list(self):
make_development_fixtures(n_pixel_sets=12)
response = self.client.get(self.url)
self.assertContains(
response,
'<tr class="pixelset">',
count=10
)
|
Add tests for the pixelset list viewfrom django.core.urlresolvers import reverse
from apps.core.factories import PIXELER_PASSWORD, PixelerFactory
from apps.core.tests import CoreFixturesTestCase
from apps.core.management.commands.make_development_fixtures import (
make_development_fixtures
)
class PixelSetListViewTestCase(CoreFixturesTestCase):
def setUp(self):
self.user = PixelerFactory(
is_active=True,
is_staff=True,
is_superuser=True,
)
self.client.login(
username=self.user.username,
password=PIXELER_PASSWORD,
)
self.url = reverse('explorer:pixelset_list')
def test_renders_pixelset_list_template(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'explorer/pixelset_list.html')
def test_renders_empty_message(self):
response = self.client.get(self.url)
expected = (
'<td colspan="8" class="empty">'
'No pixel set has been submitted yet'
'</td>'
)
self.assertContains(response, expected, html=True)
def test_renders_pixelset_list(self):
make_development_fixtures(n_pixel_sets=12)
response = self.client.get(self.url)
self.assertContains(
response,
'<tr class="pixelset">',
count=10
)
|
<commit_before><commit_msg>Add tests for the pixelset list view<commit_after>from django.core.urlresolvers import reverse
from apps.core.factories import PIXELER_PASSWORD, PixelerFactory
from apps.core.tests import CoreFixturesTestCase
from apps.core.management.commands.make_development_fixtures import (
make_development_fixtures
)
class PixelSetListViewTestCase(CoreFixturesTestCase):
def setUp(self):
self.user = PixelerFactory(
is_active=True,
is_staff=True,
is_superuser=True,
)
self.client.login(
username=self.user.username,
password=PIXELER_PASSWORD,
)
self.url = reverse('explorer:pixelset_list')
def test_renders_pixelset_list_template(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'explorer/pixelset_list.html')
def test_renders_empty_message(self):
response = self.client.get(self.url)
expected = (
'<td colspan="8" class="empty">'
'No pixel set has been submitted yet'
'</td>'
)
self.assertContains(response, expected, html=True)
def test_renders_pixelset_list(self):
make_development_fixtures(n_pixel_sets=12)
response = self.client.get(self.url)
self.assertContains(
response,
'<tr class="pixelset">',
count=10
)
|
|
14c953533b135e014164277a0c33d8115866de6a
|
mochi/utils/path_helper.py
|
mochi/utils/path_helper.py
|
"""Helpers for working with paths an similar.
"""
import os
class TempDir(object):
"""Switch to a temporary directory and back.
This is a context manager.
usage:
# in orig_dir
with TempDir('/path/to/temp_dir'):
# in temp_dir
do_something_in_temp_dir()
# back in orig_dir
"""
# pylint: disable=too-few-public-methods
def __init__(self, temp_dir):
self.temp_dir = temp_dir
def __enter__(self):
# pylint: disable=attribute-defined-outside-init
self.orig_dir = os.getcwd()
os.chdir(self.temp_dir)
def __exit__(self, *args):
os.chdir(self.orig_dir)
|
Add context manager for temp dir.
|
Add context manager for temp dir.
|
Python
|
mit
|
slideclick/mochi,pya/mochi,i2y/mochi,slideclick/mochi,pya/mochi,i2y/mochi
|
Add context manager for temp dir.
|
"""Helpers for working with paths an similar.
"""
import os
class TempDir(object):
"""Switch to a temporary directory and back.
This is a context manager.
usage:
# in orig_dir
with TempDir('/path/to/temp_dir'):
# in temp_dir
do_something_in_temp_dir()
# back in orig_dir
"""
# pylint: disable=too-few-public-methods
def __init__(self, temp_dir):
self.temp_dir = temp_dir
def __enter__(self):
# pylint: disable=attribute-defined-outside-init
self.orig_dir = os.getcwd()
os.chdir(self.temp_dir)
def __exit__(self, *args):
os.chdir(self.orig_dir)
|
<commit_before><commit_msg>Add context manager for temp dir.<commit_after>
|
"""Helpers for working with paths an similar.
"""
import os
class TempDir(object):
"""Switch to a temporary directory and back.
This is a context manager.
usage:
# in orig_dir
with TempDir('/path/to/temp_dir'):
# in temp_dir
do_something_in_temp_dir()
# back in orig_dir
"""
# pylint: disable=too-few-public-methods
def __init__(self, temp_dir):
self.temp_dir = temp_dir
def __enter__(self):
# pylint: disable=attribute-defined-outside-init
self.orig_dir = os.getcwd()
os.chdir(self.temp_dir)
def __exit__(self, *args):
os.chdir(self.orig_dir)
|
Add context manager for temp dir."""Helpers for working with paths an similar.
"""
import os
class TempDir(object):
"""Switch to a temporary directory and back.
This is a context manager.
usage:
# in orig_dir
with TempDir('/path/to/temp_dir'):
# in temp_dir
do_something_in_temp_dir()
# back in orig_dir
"""
# pylint: disable=too-few-public-methods
def __init__(self, temp_dir):
self.temp_dir = temp_dir
def __enter__(self):
# pylint: disable=attribute-defined-outside-init
self.orig_dir = os.getcwd()
os.chdir(self.temp_dir)
def __exit__(self, *args):
os.chdir(self.orig_dir)
|
<commit_before><commit_msg>Add context manager for temp dir.<commit_after>"""Helpers for working with paths an similar.
"""
import os
class TempDir(object):
"""Switch to a temporary directory and back.
This is a context manager.
usage:
# in orig_dir
with TempDir('/path/to/temp_dir'):
# in temp_dir
do_something_in_temp_dir()
# back in orig_dir
"""
# pylint: disable=too-few-public-methods
def __init__(self, temp_dir):
self.temp_dir = temp_dir
def __enter__(self):
# pylint: disable=attribute-defined-outside-init
self.orig_dir = os.getcwd()
os.chdir(self.temp_dir)
def __exit__(self, *args):
os.chdir(self.orig_dir)
|
|
1a86f43428e9f05ade880b047a0b194ca776bfad
|
lib/util.py
|
lib/util.py
|
def op_cmp(op1, op2):
"""
Compare an operation by active and then opid.
"""
if op1['active'] != op2['active']:
return -1 if op1['active'] else 1
return cmp(op1['opid'], op2['opid'])
|
Add the classic missing file.
|
Add the classic missing file.
|
Python
|
apache-2.0
|
beaufour/mtop
|
Add the classic missing file.
|
def op_cmp(op1, op2):
"""
Compare an operation by active and then opid.
"""
if op1['active'] != op2['active']:
return -1 if op1['active'] else 1
return cmp(op1['opid'], op2['opid'])
|
<commit_before><commit_msg>Add the classic missing file.<commit_after>
|
def op_cmp(op1, op2):
"""
Compare an operation by active and then opid.
"""
if op1['active'] != op2['active']:
return -1 if op1['active'] else 1
return cmp(op1['opid'], op2['opid'])
|
Add the classic missing file.def op_cmp(op1, op2):
"""
Compare an operation by active and then opid.
"""
if op1['active'] != op2['active']:
return -1 if op1['active'] else 1
return cmp(op1['opid'], op2['opid'])
|
<commit_before><commit_msg>Add the classic missing file.<commit_after>def op_cmp(op1, op2):
"""
Compare an operation by active and then opid.
"""
if op1['active'] != op2['active']:
return -1 if op1['active'] else 1
return cmp(op1['opid'], op2['opid'])
|
|
ab0492c1c71bf9e77a0327efa2c8eca5be9ae728
|
tests/formatter/test_yamler.py
|
tests/formatter/test_yamler.py
|
import unittest, argparse
from echolalia.formatter.yamler import Formatter
class YamlerTestCase(unittest.TestCase):
def setUp(self):
self.parser = argparse.ArgumentParser()
self.data = [{'char': chr(i), 'order': i - 96} for i in xrange(97, 100)]
self.formatter = Formatter()
def test_add_args(self):
new_parser = self.formatter.add_args(self.parser)
self.assertEqual(new_parser, self.parser)
def test_marshall_header(self):
new_parser = self.formatter.add_args(self.parser)
args = new_parser.parse_args([])
result = self.formatter.marshall(args, self.data)
expect = "- char: a\n order: 1\n- char: b\n order: 2\n- char: c\n order: 3\n"
self.assertEqual(result, expect)
|
Add tests for formatter yaml
|
Add tests for formatter yaml
|
Python
|
mit
|
eiri/echolalia-prototype
|
Add tests for formatter yaml
|
import unittest, argparse
from echolalia.formatter.yamler import Formatter
class YamlerTestCase(unittest.TestCase):
def setUp(self):
self.parser = argparse.ArgumentParser()
self.data = [{'char': chr(i), 'order': i - 96} for i in xrange(97, 100)]
self.formatter = Formatter()
def test_add_args(self):
new_parser = self.formatter.add_args(self.parser)
self.assertEqual(new_parser, self.parser)
def test_marshall_header(self):
new_parser = self.formatter.add_args(self.parser)
args = new_parser.parse_args([])
result = self.formatter.marshall(args, self.data)
expect = "- char: a\n order: 1\n- char: b\n order: 2\n- char: c\n order: 3\n"
self.assertEqual(result, expect)
|
<commit_before><commit_msg>Add tests for formatter yaml<commit_after>
|
import unittest, argparse
from echolalia.formatter.yamler import Formatter
class YamlerTestCase(unittest.TestCase):
def setUp(self):
self.parser = argparse.ArgumentParser()
self.data = [{'char': chr(i), 'order': i - 96} for i in xrange(97, 100)]
self.formatter = Formatter()
def test_add_args(self):
new_parser = self.formatter.add_args(self.parser)
self.assertEqual(new_parser, self.parser)
def test_marshall_header(self):
new_parser = self.formatter.add_args(self.parser)
args = new_parser.parse_args([])
result = self.formatter.marshall(args, self.data)
expect = "- char: a\n order: 1\n- char: b\n order: 2\n- char: c\n order: 3\n"
self.assertEqual(result, expect)
|
Add tests for formatter yamlimport unittest, argparse
from echolalia.formatter.yamler import Formatter
class YamlerTestCase(unittest.TestCase):
def setUp(self):
self.parser = argparse.ArgumentParser()
self.data = [{'char': chr(i), 'order': i - 96} for i in xrange(97, 100)]
self.formatter = Formatter()
def test_add_args(self):
new_parser = self.formatter.add_args(self.parser)
self.assertEqual(new_parser, self.parser)
def test_marshall_header(self):
new_parser = self.formatter.add_args(self.parser)
args = new_parser.parse_args([])
result = self.formatter.marshall(args, self.data)
expect = "- char: a\n order: 1\n- char: b\n order: 2\n- char: c\n order: 3\n"
self.assertEqual(result, expect)
|
<commit_before><commit_msg>Add tests for formatter yaml<commit_after>import unittest, argparse
from echolalia.formatter.yamler import Formatter
class YamlerTestCase(unittest.TestCase):
def setUp(self):
self.parser = argparse.ArgumentParser()
self.data = [{'char': chr(i), 'order': i - 96} for i in xrange(97, 100)]
self.formatter = Formatter()
def test_add_args(self):
new_parser = self.formatter.add_args(self.parser)
self.assertEqual(new_parser, self.parser)
def test_marshall_header(self):
new_parser = self.formatter.add_args(self.parser)
args = new_parser.parse_args([])
result = self.formatter.marshall(args, self.data)
expect = "- char: a\n order: 1\n- char: b\n order: 2\n- char: c\n order: 3\n"
self.assertEqual(result, expect)
|
|
e271d355e057f72c85efe3f1a407c55e54faca74
|
migrations/008_convert_ga_buckets_to_utc.py
|
migrations/008_convert_ga_buckets_to_utc.py
|
"""
Convert Google Analytics buckets from Europe/London to UTC
"""
import logging
import copy
from itertools import imap, ifilter
from datetime import timedelta
from backdrop.core.records import Record
from backdrop.core.timeutils import utc
log = logging.getLogger(__name__)
GA_BUCKETS_TO_MIGRATE = [
"carers_allowance_journey",
"deposit_foreign_marriage_journey",
"pay_foreign_marriage_certificates_journey",
"pay_legalisation_drop_off_journey",
"pay_legalisation_post_journey",
"pay_register_birth_abroad_journey",
"pay_register_death_abroad_journey",
"lpa_journey",
"licensing_journey",
]
def fix_timestamp(document):
"""Return a new dict with the _timestamp field fixed
"""
document = copy.deepcopy(document)
document['_timestamp'] = document['_timestamp'].replace(tzinfo=None) + \
timedelta(hours=1)
return document
def strip_internal_fields(document):
"""Return a new dict with all internal fields removed
Leaves _timestamp and _id in place
"""
def allowed_field(key):
return not key.startswith('_') or key in ['_timestamp', '_id']
return dict(
(key, value) for key, value in document.items() if allowed_field(key))
def is_bst(document):
"""Return true if a document looks like it's BST"""
return document['_timestamp'].hour == 23
def create_record(document):
"""Return a dict with internal fields applied"""
return Record(document).to_mongo()
def fix_id(document):
"""Return a new dict with the _id field recalculated"""
def _format(timestamp):
return to_utc(timestamp).strftime("%Y%m%d%H%M%S")
def data_id(data_type, timestamp, period):
"""_id generation function copied from backdrop-ga-collector"""
return base64.urlsafe_b64encode("_".join(
[data_type, _format(timestamp), period]))
document = copy.deepcopy(document)
document['_id'] = data_id(
document['dataType'], document['_timestamp'],
document['timeSpan'])
return document
def up(db):
for name in GA_BUCKETS_TO_MIGRATE:
collection = db.get_repository(name)
documents = collection.find({})
documents = ifilter(is_bst, documents)
documents = imap(strip_internal_fields, documents)
documents = imap(fix_timestamp, documents)
documents = map(create_record, documents)
if len(documents) > 0:
log.info("Convert GA timezone: {0}".format(name))
map(collection.save, documents)
|
Add migration to convert GA timezone to UTC
|
Add migration to convert GA timezone to UTC
We are ignoring the timezone from GA requests so previous data needs to
be converted to account for this.
See https://www.pivotaltracker.com/story/show/62779118 for details.
|
Python
|
mit
|
alphagov/backdrop,alphagov/backdrop,alphagov/backdrop
|
Add migration to convert GA timezone to UTC
We are ignoring the timezone from GA requests so previous data needs to
be converted to account for this.
See https://www.pivotaltracker.com/story/show/62779118 for details.
|
"""
Convert Google Analytics buckets from Europe/London to UTC
"""
import logging
import copy
from itertools import imap, ifilter
from datetime import timedelta
from backdrop.core.records import Record
from backdrop.core.timeutils import utc
log = logging.getLogger(__name__)
GA_BUCKETS_TO_MIGRATE = [
"carers_allowance_journey",
"deposit_foreign_marriage_journey",
"pay_foreign_marriage_certificates_journey",
"pay_legalisation_drop_off_journey",
"pay_legalisation_post_journey",
"pay_register_birth_abroad_journey",
"pay_register_death_abroad_journey",
"lpa_journey",
"licensing_journey",
]
def fix_timestamp(document):
"""Return a new dict with the _timestamp field fixed
"""
document = copy.deepcopy(document)
document['_timestamp'] = document['_timestamp'].replace(tzinfo=None) + \
timedelta(hours=1)
return document
def strip_internal_fields(document):
"""Return a new dict with all internal fields removed
Leaves _timestamp and _id in place
"""
def allowed_field(key):
return not key.startswith('_') or key in ['_timestamp', '_id']
return dict(
(key, value) for key, value in document.items() if allowed_field(key))
def is_bst(document):
"""Return true if a document looks like it's BST"""
return document['_timestamp'].hour == 23
def create_record(document):
"""Return a dict with internal fields applied"""
return Record(document).to_mongo()
def fix_id(document):
"""Return a new dict with the _id field recalculated"""
def _format(timestamp):
return to_utc(timestamp).strftime("%Y%m%d%H%M%S")
def data_id(data_type, timestamp, period):
"""_id generation function copied from backdrop-ga-collector"""
return base64.urlsafe_b64encode("_".join(
[data_type, _format(timestamp), period]))
document = copy.deepcopy(document)
document['_id'] = data_id(
document['dataType'], document['_timestamp'],
document['timeSpan'])
return document
def up(db):
for name in GA_BUCKETS_TO_MIGRATE:
collection = db.get_repository(name)
documents = collection.find({})
documents = ifilter(is_bst, documents)
documents = imap(strip_internal_fields, documents)
documents = imap(fix_timestamp, documents)
documents = map(create_record, documents)
if len(documents) > 0:
log.info("Convert GA timezone: {0}".format(name))
map(collection.save, documents)
|
<commit_before><commit_msg>Add migration to convert GA timezone to UTC
We are ignoring the timezone from GA requests so previous data needs to
be converted to account for this.
See https://www.pivotaltracker.com/story/show/62779118 for details.<commit_after>
|
"""
Convert Google Analytics buckets from Europe/London to UTC
"""
import logging
import copy
from itertools import imap, ifilter
from datetime import timedelta
from backdrop.core.records import Record
from backdrop.core.timeutils import utc
log = logging.getLogger(__name__)
GA_BUCKETS_TO_MIGRATE = [
"carers_allowance_journey",
"deposit_foreign_marriage_journey",
"pay_foreign_marriage_certificates_journey",
"pay_legalisation_drop_off_journey",
"pay_legalisation_post_journey",
"pay_register_birth_abroad_journey",
"pay_register_death_abroad_journey",
"lpa_journey",
"licensing_journey",
]
def fix_timestamp(document):
"""Return a new dict with the _timestamp field fixed
"""
document = copy.deepcopy(document)
document['_timestamp'] = document['_timestamp'].replace(tzinfo=None) + \
timedelta(hours=1)
return document
def strip_internal_fields(document):
"""Return a new dict with all internal fields removed
Leaves _timestamp and _id in place
"""
def allowed_field(key):
return not key.startswith('_') or key in ['_timestamp', '_id']
return dict(
(key, value) for key, value in document.items() if allowed_field(key))
def is_bst(document):
"""Return true if a document looks like it's BST"""
return document['_timestamp'].hour == 23
def create_record(document):
"""Return a dict with internal fields applied"""
return Record(document).to_mongo()
def fix_id(document):
"""Return a new dict with the _id field recalculated"""
def _format(timestamp):
return to_utc(timestamp).strftime("%Y%m%d%H%M%S")
def data_id(data_type, timestamp, period):
"""_id generation function copied from backdrop-ga-collector"""
return base64.urlsafe_b64encode("_".join(
[data_type, _format(timestamp), period]))
document = copy.deepcopy(document)
document['_id'] = data_id(
document['dataType'], document['_timestamp'],
document['timeSpan'])
return document
def up(db):
for name in GA_BUCKETS_TO_MIGRATE:
collection = db.get_repository(name)
documents = collection.find({})
documents = ifilter(is_bst, documents)
documents = imap(strip_internal_fields, documents)
documents = imap(fix_timestamp, documents)
documents = map(create_record, documents)
if len(documents) > 0:
log.info("Convert GA timezone: {0}".format(name))
map(collection.save, documents)
|
Add migration to convert GA timezone to UTC
We are ignoring the timezone from GA requests so previous data needs to
be converted to account for this.
See https://www.pivotaltracker.com/story/show/62779118 for details."""
Convert Google Analytics buckets from Europe/London to UTC
"""
import logging
import copy
from itertools import imap, ifilter
from datetime import timedelta
from backdrop.core.records import Record
from backdrop.core.timeutils import utc
log = logging.getLogger(__name__)
GA_BUCKETS_TO_MIGRATE = [
"carers_allowance_journey",
"deposit_foreign_marriage_journey",
"pay_foreign_marriage_certificates_journey",
"pay_legalisation_drop_off_journey",
"pay_legalisation_post_journey",
"pay_register_birth_abroad_journey",
"pay_register_death_abroad_journey",
"lpa_journey",
"licensing_journey",
]
def fix_timestamp(document):
"""Return a new dict with the _timestamp field fixed
"""
document = copy.deepcopy(document)
document['_timestamp'] = document['_timestamp'].replace(tzinfo=None) + \
timedelta(hours=1)
return document
def strip_internal_fields(document):
"""Return a new dict with all internal fields removed
Leaves _timestamp and _id in place
"""
def allowed_field(key):
return not key.startswith('_') or key in ['_timestamp', '_id']
return dict(
(key, value) for key, value in document.items() if allowed_field(key))
def is_bst(document):
"""Return true if a document looks like it's BST"""
return document['_timestamp'].hour == 23
def create_record(document):
"""Return a dict with internal fields applied"""
return Record(document).to_mongo()
def fix_id(document):
"""Return a new dict with the _id field recalculated"""
def _format(timestamp):
return to_utc(timestamp).strftime("%Y%m%d%H%M%S")
def data_id(data_type, timestamp, period):
"""_id generation function copied from backdrop-ga-collector"""
return base64.urlsafe_b64encode("_".join(
[data_type, _format(timestamp), period]))
document = copy.deepcopy(document)
document['_id'] = data_id(
document['dataType'], document['_timestamp'],
document['timeSpan'])
return document
def up(db):
for name in GA_BUCKETS_TO_MIGRATE:
collection = db.get_repository(name)
documents = collection.find({})
documents = ifilter(is_bst, documents)
documents = imap(strip_internal_fields, documents)
documents = imap(fix_timestamp, documents)
documents = map(create_record, documents)
if len(documents) > 0:
log.info("Convert GA timezone: {0}".format(name))
map(collection.save, documents)
|
<commit_before><commit_msg>Add migration to convert GA timezone to UTC
We are ignoring the timezone from GA requests so previous data needs to
be converted to account for this.
See https://www.pivotaltracker.com/story/show/62779118 for details.<commit_after>"""
Convert Google Analytics buckets from Europe/London to UTC
"""
import logging
import copy
from itertools import imap, ifilter
from datetime import timedelta
from backdrop.core.records import Record
from backdrop.core.timeutils import utc
log = logging.getLogger(__name__)
GA_BUCKETS_TO_MIGRATE = [
"carers_allowance_journey",
"deposit_foreign_marriage_journey",
"pay_foreign_marriage_certificates_journey",
"pay_legalisation_drop_off_journey",
"pay_legalisation_post_journey",
"pay_register_birth_abroad_journey",
"pay_register_death_abroad_journey",
"lpa_journey",
"licensing_journey",
]
def fix_timestamp(document):
"""Return a new dict with the _timestamp field fixed
"""
document = copy.deepcopy(document)
document['_timestamp'] = document['_timestamp'].replace(tzinfo=None) + \
timedelta(hours=1)
return document
def strip_internal_fields(document):
"""Return a new dict with all internal fields removed
Leaves _timestamp and _id in place
"""
def allowed_field(key):
return not key.startswith('_') or key in ['_timestamp', '_id']
return dict(
(key, value) for key, value in document.items() if allowed_field(key))
def is_bst(document):
"""Return true if a document looks like it's BST"""
return document['_timestamp'].hour == 23
def create_record(document):
"""Return a dict with internal fields applied"""
return Record(document).to_mongo()
def fix_id(document):
"""Return a new dict with the _id field recalculated"""
def _format(timestamp):
return to_utc(timestamp).strftime("%Y%m%d%H%M%S")
def data_id(data_type, timestamp, period):
"""_id generation function copied from backdrop-ga-collector"""
return base64.urlsafe_b64encode("_".join(
[data_type, _format(timestamp), period]))
document = copy.deepcopy(document)
document['_id'] = data_id(
document['dataType'], document['_timestamp'],
document['timeSpan'])
return document
def up(db):
for name in GA_BUCKETS_TO_MIGRATE:
collection = db.get_repository(name)
documents = collection.find({})
documents = ifilter(is_bst, documents)
documents = imap(strip_internal_fields, documents)
documents = imap(fix_timestamp, documents)
documents = map(create_record, documents)
if len(documents) > 0:
log.info("Convert GA timezone: {0}".format(name))
map(collection.save, documents)
|
|
94e906d938cb860ff8744326ade4d65297805cc2
|
deleteAllJobs.py
|
deleteAllJobs.py
|
#!/usr/bin/env python
############################################################################
# #
# Copyright 2014 Prelert Ltd #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
'''
Delete all the jobs in the Engine API.
Request a list of jobs configured in the API then
delete them one at a time using the job id.
Be careful with this one you can't change your mind afterwards.
'''
import argparse
import sys
import json
import logging
import time
from prelert.engineApiClient import EngineApiClient
# defaults
HOST = 'localhost'
PORT = 8080
BASE_URL = 'engine/v0.3'
def parseArguments():
parser = argparse.ArgumentParser()
parser.add_argument("--host", help="The Prelert Engine API host, defaults to "
+ HOST, default=HOST)
parser.add_argument("--port", help="The Prelert Engine API port, defaults to "
+ str(PORT), default=PORT)
return parser.parse_args()
def main():
args = parseArguments()
host = args.host
port = args.port
base_url = BASE_URL
# Create the REST API client
engine_client = EngineApiClient(host, base_url, port)
while True:
(http_status_code, response) = engine_client.getJobs()
if http_status_code != 200:
print (http_status_code, json.dumps(response))
break
jobs = response['documents']
if (len(jobs) == 0):
print "Deleted all jobs"
break
print "Deleting %d jobs" % (len(jobs)),
for job in jobs:
(http_status_code, response) = engine_client.delete(job['id'])
if http_status_code != 200:
print (http_status_code, json.dumps(response))
else:
sys.stdout.write('.')
sys.stdout.flush()
print
if __name__ == "__main__":
main()
|
Add delete all jobs script
|
Add delete all jobs script
|
Python
|
apache-2.0
|
prelert/engine-python,pemontto/engine-python
|
Add delete all jobs script
|
#!/usr/bin/env python
############################################################################
# #
# Copyright 2014 Prelert Ltd #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
'''
Delete all the jobs in the Engine API.
Request a list of jobs configured in the API then
delete them one at a time using the job id.
Be careful with this one you can't change your mind afterwards.
'''
import argparse
import sys
import json
import logging
import time
from prelert.engineApiClient import EngineApiClient
# defaults
HOST = 'localhost'
PORT = 8080
BASE_URL = 'engine/v0.3'
def parseArguments():
parser = argparse.ArgumentParser()
parser.add_argument("--host", help="The Prelert Engine API host, defaults to "
+ HOST, default=HOST)
parser.add_argument("--port", help="The Prelert Engine API port, defaults to "
+ str(PORT), default=PORT)
return parser.parse_args()
def main():
args = parseArguments()
host = args.host
port = args.port
base_url = BASE_URL
# Create the REST API client
engine_client = EngineApiClient(host, base_url, port)
while True:
(http_status_code, response) = engine_client.getJobs()
if http_status_code != 200:
print (http_status_code, json.dumps(response))
break
jobs = response['documents']
if (len(jobs) == 0):
print "Deleted all jobs"
break
print "Deleting %d jobs" % (len(jobs)),
for job in jobs:
(http_status_code, response) = engine_client.delete(job['id'])
if http_status_code != 200:
print (http_status_code, json.dumps(response))
else:
sys.stdout.write('.')
sys.stdout.flush()
print
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add delete all jobs script<commit_after>
|
#!/usr/bin/env python
############################################################################
# #
# Copyright 2014 Prelert Ltd #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
'''
Delete all the jobs in the Engine API.
Request a list of jobs configured in the API then
delete them one at a time using the job id.
Be careful with this one you can't change your mind afterwards.
'''
import argparse
import sys
import json
import logging
import time
from prelert.engineApiClient import EngineApiClient
# defaults
HOST = 'localhost'
PORT = 8080
BASE_URL = 'engine/v0.3'
def parseArguments():
parser = argparse.ArgumentParser()
parser.add_argument("--host", help="The Prelert Engine API host, defaults to "
+ HOST, default=HOST)
parser.add_argument("--port", help="The Prelert Engine API port, defaults to "
+ str(PORT), default=PORT)
return parser.parse_args()
def main():
args = parseArguments()
host = args.host
port = args.port
base_url = BASE_URL
# Create the REST API client
engine_client = EngineApiClient(host, base_url, port)
while True:
(http_status_code, response) = engine_client.getJobs()
if http_status_code != 200:
print (http_status_code, json.dumps(response))
break
jobs = response['documents']
if (len(jobs) == 0):
print "Deleted all jobs"
break
print "Deleting %d jobs" % (len(jobs)),
for job in jobs:
(http_status_code, response) = engine_client.delete(job['id'])
if http_status_code != 200:
print (http_status_code, json.dumps(response))
else:
sys.stdout.write('.')
sys.stdout.flush()
print
if __name__ == "__main__":
main()
|
Add delete all jobs script#!/usr/bin/env python
############################################################################
# #
# Copyright 2014 Prelert Ltd #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
'''
Delete all the jobs in the Engine API.
Request a list of jobs configured in the API then
delete them one at a time using the job id.
Be careful with this one you can't change your mind afterwards.
'''
import argparse
import sys
import json
import logging
import time
from prelert.engineApiClient import EngineApiClient
# defaults
HOST = 'localhost'
PORT = 8080
BASE_URL = 'engine/v0.3'
def parseArguments():
parser = argparse.ArgumentParser()
parser.add_argument("--host", help="The Prelert Engine API host, defaults to "
+ HOST, default=HOST)
parser.add_argument("--port", help="The Prelert Engine API port, defaults to "
+ str(PORT), default=PORT)
return parser.parse_args()
def main():
args = parseArguments()
host = args.host
port = args.port
base_url = BASE_URL
# Create the REST API client
engine_client = EngineApiClient(host, base_url, port)
while True:
(http_status_code, response) = engine_client.getJobs()
if http_status_code != 200:
print (http_status_code, json.dumps(response))
break
jobs = response['documents']
if (len(jobs) == 0):
print "Deleted all jobs"
break
print "Deleting %d jobs" % (len(jobs)),
for job in jobs:
(http_status_code, response) = engine_client.delete(job['id'])
if http_status_code != 200:
print (http_status_code, json.dumps(response))
else:
sys.stdout.write('.')
sys.stdout.flush()
print
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add delete all jobs script<commit_after>#!/usr/bin/env python
############################################################################
# #
# Copyright 2014 Prelert Ltd #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
'''
Delete all the jobs in the Engine API.
Request a list of jobs configured in the API then
delete them one at a time using the job id.
Be careful with this one you can't change your mind afterwards.
'''
import argparse
import sys
import json
import logging
import time
from prelert.engineApiClient import EngineApiClient
# defaults
HOST = 'localhost'
PORT = 8080
BASE_URL = 'engine/v0.3'
def parseArguments():
parser = argparse.ArgumentParser()
parser.add_argument("--host", help="The Prelert Engine API host, defaults to "
+ HOST, default=HOST)
parser.add_argument("--port", help="The Prelert Engine API port, defaults to "
+ str(PORT), default=PORT)
return parser.parse_args()
def main():
args = parseArguments()
host = args.host
port = args.port
base_url = BASE_URL
# Create the REST API client
engine_client = EngineApiClient(host, base_url, port)
while True:
(http_status_code, response) = engine_client.getJobs()
if http_status_code != 200:
print (http_status_code, json.dumps(response))
break
jobs = response['documents']
if (len(jobs) == 0):
print "Deleted all jobs"
break
print "Deleting %d jobs" % (len(jobs)),
for job in jobs:
(http_status_code, response) = engine_client.delete(job['id'])
if http_status_code != 200:
print (http_status_code, json.dumps(response))
else:
sys.stdout.write('.')
sys.stdout.flush()
print
if __name__ == "__main__":
main()
|
|
4d50bb1140d8108c642ed89534bad1cb568123bb
|
examples/subprocess_evaluator.py
|
examples/subprocess_evaluator.py
|
import argparse
import os
from subprocess import PIPE, Popen
# insert your client_token into sigopt_creds.py
# otherwise you'll see "This endpoint requires an authenticated user" errors
from sigopt_creds import client_token
from sigopt.interface import Connection
class SubProcessEvaluator(object):
def __init__(self, filename):
assert os.path.isfile(filename)
self.filename = filename
# Take a suggestion from sigopt and evaluate your function
# Sends command line arguments to your executable file with the same names as the
# parameters of your experiment. Expected output is one line containing a float that
# is your function evaluated at the suggested assignments.
# For example, if your filename is test and you have one double parameter with suggested
# value 11.05, this script will run
# ./test --x=11.05
def evaluate_metric(self, assignments):
arguments = [
'--{}={}'.format(param_name, assignment)
for param_name, assignment
in assignments.to_json().iteritems()
]
process = Popen(['./{}'.format(self.filename)] + arguments, stdout=PIPE)
(stdoutdata,stderrdata) = process.communicate()
return float(stdoutdata.strip())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--filename', required=True, help="The name of the executable file containing the "
"function whose parameters you would like to optimize. Should accept parameters as command line argument and "
"output only the evaluated metric at the suggested point.")
parser.add_argument('--experiment_id', required=True, help="The parameters of this experiment should be the "
"same type and name of the command line arguments to your executable file.")
the_args = parser.parse_args()
connection = Connection(client_token=client_token)
experiment = connection.experiments(the_args.experiment_id).fetch()
connection.experiments(the_args.experiment_id).suggestions().delete(state="open")
evaluator = SubProcessEvaluator(the_args.filename)
# In a loop: receive a suggestion, evaluate the metric, report an observation
while True:
suggestion = connection.experiments(experiment.id).suggestions().create()
print('Evaluating at suggested assignments: {0}'.format(suggestion.assignments))
value = evaluator.evaluate_metric(suggestion.assignments)
print('Reporting observation of value: {0}'.format(value))
connection.experiments(experiment.id).observations().create(
suggestion=suggestion.id,
value=value,
)
|
Create an example script that opens executable in subprocess and evaluates
|
Create an example script that opens executable in subprocess and evaluates
|
Python
|
mit
|
sigopt/sigopt-python,sigopt/sigopt-python
|
Create an example script that opens executable in subprocess and evaluates
|
import argparse
import os
from subprocess import PIPE, Popen
# insert your client_token into sigopt_creds.py
# otherwise you'll see "This endpoint requires an authenticated user" errors
from sigopt_creds import client_token
from sigopt.interface import Connection
class SubProcessEvaluator(object):
def __init__(self, filename):
assert os.path.isfile(filename)
self.filename = filename
# Take a suggestion from sigopt and evaluate your function
# Sends command line arguments to your executable file with the same names as the
# parameters of your experiment. Expected output is one line containing a float that
# is your function evaluated at the suggested assignments.
# For example, if your filename is test and you have one double parameter with suggested
# value 11.05, this script will run
# ./test --x=11.05
def evaluate_metric(self, assignments):
arguments = [
'--{}={}'.format(param_name, assignment)
for param_name, assignment
in assignments.to_json().iteritems()
]
process = Popen(['./{}'.format(self.filename)] + arguments, stdout=PIPE)
(stdoutdata,stderrdata) = process.communicate()
return float(stdoutdata.strip())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--filename', required=True, help="The name of the executable file containing the "
"function whose parameters you would like to optimize. Should accept parameters as command line argument and "
"output only the evaluated metric at the suggested point.")
parser.add_argument('--experiment_id', required=True, help="The parameters of this experiment should be the "
"same type and name of the command line arguments to your executable file.")
the_args = parser.parse_args()
connection = Connection(client_token=client_token)
experiment = connection.experiments(the_args.experiment_id).fetch()
connection.experiments(the_args.experiment_id).suggestions().delete(state="open")
evaluator = SubProcessEvaluator(the_args.filename)
# In a loop: receive a suggestion, evaluate the metric, report an observation
while True:
suggestion = connection.experiments(experiment.id).suggestions().create()
print('Evaluating at suggested assignments: {0}'.format(suggestion.assignments))
value = evaluator.evaluate_metric(suggestion.assignments)
print('Reporting observation of value: {0}'.format(value))
connection.experiments(experiment.id).observations().create(
suggestion=suggestion.id,
value=value,
)
|
<commit_before><commit_msg>Create an example script that opens executable in subprocess and evaluates<commit_after>
|
import argparse
import os
from subprocess import PIPE, Popen
# insert your client_token into sigopt_creds.py
# otherwise you'll see "This endpoint requires an authenticated user" errors
from sigopt_creds import client_token
from sigopt.interface import Connection
class SubProcessEvaluator(object):
def __init__(self, filename):
assert os.path.isfile(filename)
self.filename = filename
# Take a suggestion from sigopt and evaluate your function
# Sends command line arguments to your executable file with the same names as the
# parameters of your experiment. Expected output is one line containing a float that
# is your function evaluated at the suggested assignments.
# For example, if your filename is test and you have one double parameter with suggested
# value 11.05, this script will run
# ./test --x=11.05
def evaluate_metric(self, assignments):
arguments = [
'--{}={}'.format(param_name, assignment)
for param_name, assignment
in assignments.to_json().iteritems()
]
process = Popen(['./{}'.format(self.filename)] + arguments, stdout=PIPE)
(stdoutdata,stderrdata) = process.communicate()
return float(stdoutdata.strip())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--filename', required=True, help="The name of the executable file containing the "
"function whose parameters you would like to optimize. Should accept parameters as command line argument and "
"output only the evaluated metric at the suggested point.")
parser.add_argument('--experiment_id', required=True, help="The parameters of this experiment should be the "
"same type and name of the command line arguments to your executable file.")
the_args = parser.parse_args()
connection = Connection(client_token=client_token)
experiment = connection.experiments(the_args.experiment_id).fetch()
connection.experiments(the_args.experiment_id).suggestions().delete(state="open")
evaluator = SubProcessEvaluator(the_args.filename)
# In a loop: receive a suggestion, evaluate the metric, report an observation
while True:
suggestion = connection.experiments(experiment.id).suggestions().create()
print('Evaluating at suggested assignments: {0}'.format(suggestion.assignments))
value = evaluator.evaluate_metric(suggestion.assignments)
print('Reporting observation of value: {0}'.format(value))
connection.experiments(experiment.id).observations().create(
suggestion=suggestion.id,
value=value,
)
|
Create an example script that opens executable in subprocess and evaluatesimport argparse
import os
from subprocess import PIPE, Popen
# insert your client_token into sigopt_creds.py
# otherwise you'll see "This endpoint requires an authenticated user" errors
from sigopt_creds import client_token
from sigopt.interface import Connection
class SubProcessEvaluator(object):
def __init__(self, filename):
assert os.path.isfile(filename)
self.filename = filename
# Take a suggestion from sigopt and evaluate your function
# Sends command line arguments to your executable file with the same names as the
# parameters of your experiment. Expected output is one line containing a float that
# is your function evaluated at the suggested assignments.
# For example, if your filename is test and you have one double parameter with suggested
# value 11.05, this script will run
# ./test --x=11.05
def evaluate_metric(self, assignments):
arguments = [
'--{}={}'.format(param_name, assignment)
for param_name, assignment
in assignments.to_json().iteritems()
]
process = Popen(['./{}'.format(self.filename)] + arguments, stdout=PIPE)
(stdoutdata,stderrdata) = process.communicate()
return float(stdoutdata.strip())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--filename', required=True, help="The name of the executable file containing the "
"function whose parameters you would like to optimize. Should accept parameters as command line argument and "
"output only the evaluated metric at the suggested point.")
parser.add_argument('--experiment_id', required=True, help="The parameters of this experiment should be the "
"same type and name of the command line arguments to your executable file.")
the_args = parser.parse_args()
connection = Connection(client_token=client_token)
experiment = connection.experiments(the_args.experiment_id).fetch()
connection.experiments(the_args.experiment_id).suggestions().delete(state="open")
evaluator = SubProcessEvaluator(the_args.filename)
# In a loop: receive a suggestion, evaluate the metric, report an observation
while True:
suggestion = connection.experiments(experiment.id).suggestions().create()
print('Evaluating at suggested assignments: {0}'.format(suggestion.assignments))
value = evaluator.evaluate_metric(suggestion.assignments)
print('Reporting observation of value: {0}'.format(value))
connection.experiments(experiment.id).observations().create(
suggestion=suggestion.id,
value=value,
)
|
<commit_before><commit_msg>Create an example script that opens executable in subprocess and evaluates<commit_after>import argparse
import os
from subprocess import PIPE, Popen
# insert your client_token into sigopt_creds.py
# otherwise you'll see "This endpoint requires an authenticated user" errors
from sigopt_creds import client_token
from sigopt.interface import Connection
class SubProcessEvaluator(object):
def __init__(self, filename):
assert os.path.isfile(filename)
self.filename = filename
# Take a suggestion from sigopt and evaluate your function
# Sends command line arguments to your executable file with the same names as the
# parameters of your experiment. Expected output is one line containing a float that
# is your function evaluated at the suggested assignments.
# For example, if your filename is test and you have one double parameter with suggested
# value 11.05, this script will run
# ./test --x=11.05
def evaluate_metric(self, assignments):
arguments = [
'--{}={}'.format(param_name, assignment)
for param_name, assignment
in assignments.to_json().iteritems()
]
process = Popen(['./{}'.format(self.filename)] + arguments, stdout=PIPE)
(stdoutdata,stderrdata) = process.communicate()
return float(stdoutdata.strip())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--filename', required=True, help="The name of the executable file containing the "
"function whose parameters you would like to optimize. Should accept parameters as command line argument and "
"output only the evaluated metric at the suggested point.")
parser.add_argument('--experiment_id', required=True, help="The parameters of this experiment should be the "
"same type and name of the command line arguments to your executable file.")
the_args = parser.parse_args()
connection = Connection(client_token=client_token)
experiment = connection.experiments(the_args.experiment_id).fetch()
connection.experiments(the_args.experiment_id).suggestions().delete(state="open")
evaluator = SubProcessEvaluator(the_args.filename)
# In a loop: receive a suggestion, evaluate the metric, report an observation
while True:
suggestion = connection.experiments(experiment.id).suggestions().create()
print('Evaluating at suggested assignments: {0}'.format(suggestion.assignments))
value = evaluator.evaluate_metric(suggestion.assignments)
print('Reporting observation of value: {0}'.format(value))
connection.experiments(experiment.id).observations().create(
suggestion=suggestion.id,
value=value,
)
|
|
5246ec4a98bca8c087d820b3ad10525ca9d6e1f2
|
wagtailnews/blocks.py
|
wagtailnews/blocks.py
|
from __future__ import absolute_import, unicode_literals
from django.utils.functional import cached_property
from wagtail.wagtailcore.blocks import ChooserBlock
class NewsChooserBlock(ChooserBlock):
def __init__(self, target_model, **kwargs):
super(NewsChooserBlock, self).__init__(**kwargs)
self.target_model = target_model
@cached_property
def widget(self):
from wagtailnews.widgets import AdminNewsChooser
return AdminNewsChooser(self.target_model)
class Meta:
icon = "grip"
|
Add NewsChooserBlock for choosing news items in StreamFields
|
Add NewsChooserBlock for choosing news items in StreamFields
|
Python
|
bsd-2-clause
|
takeflight/wagtailnews,takeflight/wagtailnews,takeflight/wagtailnews,takeflight/wagtailnews
|
Add NewsChooserBlock for choosing news items in StreamFields
|
from __future__ import absolute_import, unicode_literals
from django.utils.functional import cached_property
from wagtail.wagtailcore.blocks import ChooserBlock
class NewsChooserBlock(ChooserBlock):
def __init__(self, target_model, **kwargs):
super(NewsChooserBlock, self).__init__(**kwargs)
self.target_model = target_model
@cached_property
def widget(self):
from wagtailnews.widgets import AdminNewsChooser
return AdminNewsChooser(self.target_model)
class Meta:
icon = "grip"
|
<commit_before><commit_msg>Add NewsChooserBlock for choosing news items in StreamFields<commit_after>
|
from __future__ import absolute_import, unicode_literals
from django.utils.functional import cached_property
from wagtail.wagtailcore.blocks import ChooserBlock
class NewsChooserBlock(ChooserBlock):
def __init__(self, target_model, **kwargs):
super(NewsChooserBlock, self).__init__(**kwargs)
self.target_model = target_model
@cached_property
def widget(self):
from wagtailnews.widgets import AdminNewsChooser
return AdminNewsChooser(self.target_model)
class Meta:
icon = "grip"
|
Add NewsChooserBlock for choosing news items in StreamFieldsfrom __future__ import absolute_import, unicode_literals
from django.utils.functional import cached_property
from wagtail.wagtailcore.blocks import ChooserBlock
class NewsChooserBlock(ChooserBlock):
def __init__(self, target_model, **kwargs):
super(NewsChooserBlock, self).__init__(**kwargs)
self.target_model = target_model
@cached_property
def widget(self):
from wagtailnews.widgets import AdminNewsChooser
return AdminNewsChooser(self.target_model)
class Meta:
icon = "grip"
|
<commit_before><commit_msg>Add NewsChooserBlock for choosing news items in StreamFields<commit_after>from __future__ import absolute_import, unicode_literals
from django.utils.functional import cached_property
from wagtail.wagtailcore.blocks import ChooserBlock
class NewsChooserBlock(ChooserBlock):
def __init__(self, target_model, **kwargs):
super(NewsChooserBlock, self).__init__(**kwargs)
self.target_model = target_model
@cached_property
def widget(self):
from wagtailnews.widgets import AdminNewsChooser
return AdminNewsChooser(self.target_model)
class Meta:
icon = "grip"
|
|
4fbcf7b251253062465a286829b97c910b124c06
|
gem/migrations/0027_set_site_settings_correctly.py
|
gem/migrations/0027_set_site_settings_correctly.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from molo.profiles.models import UserProfilesSettings
from wagtail.wagtailcore.models import Site
def set_site_settings(apps, schema_editor):
for site in Site.objects.all():
settings = UserProfilesSettings.for_site(site)
settings.show_security_question_fields = True
settings.security_questions_required = True
settings.num_security_questions = 2
settings.activate_display_name = True
settings.capture_display_name_on_reg = True
settings.activate_gender = True
settings.capture_gender_on_reg = True
settings.save()
def unset_site_settings(apps, schema_editor):
# We don't know what we should be unsetting the
# site settings to here. It might be safest to
# do a no-op.
pass
class Migration(migrations.Migration):
dependencies = [
('gem', '0026_migrate_security_answers_off_gem_profile'),
('profiles', '0013_add_location_gender_education_level_fields'),
]
operations = [
migrations.RunPython(
set_site_settings,
unset_site_settings,
),
]
|
Add migration to set UserProfilesSettings correctly
|
Add migration to set UserProfilesSettings correctly
|
Python
|
bsd-2-clause
|
praekelt/molo-gem,praekelt/molo-gem,praekelt/molo-gem
|
Add migration to set UserProfilesSettings correctly
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from molo.profiles.models import UserProfilesSettings
from wagtail.wagtailcore.models import Site
def set_site_settings(apps, schema_editor):
for site in Site.objects.all():
settings = UserProfilesSettings.for_site(site)
settings.show_security_question_fields = True
settings.security_questions_required = True
settings.num_security_questions = 2
settings.activate_display_name = True
settings.capture_display_name_on_reg = True
settings.activate_gender = True
settings.capture_gender_on_reg = True
settings.save()
def unset_site_settings(apps, schema_editor):
# We don't know what we should be unsetting the
# site settings to here. It might be safest to
# do a no-op.
pass
class Migration(migrations.Migration):
dependencies = [
('gem', '0026_migrate_security_answers_off_gem_profile'),
('profiles', '0013_add_location_gender_education_level_fields'),
]
operations = [
migrations.RunPython(
set_site_settings,
unset_site_settings,
),
]
|
<commit_before><commit_msg>Add migration to set UserProfilesSettings correctly<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from molo.profiles.models import UserProfilesSettings
from wagtail.wagtailcore.models import Site
def set_site_settings(apps, schema_editor):
for site in Site.objects.all():
settings = UserProfilesSettings.for_site(site)
settings.show_security_question_fields = True
settings.security_questions_required = True
settings.num_security_questions = 2
settings.activate_display_name = True
settings.capture_display_name_on_reg = True
settings.activate_gender = True
settings.capture_gender_on_reg = True
settings.save()
def unset_site_settings(apps, schema_editor):
# We don't know what we should be unsetting the
# site settings to here. It might be safest to
# do a no-op.
pass
class Migration(migrations.Migration):
dependencies = [
('gem', '0026_migrate_security_answers_off_gem_profile'),
('profiles', '0013_add_location_gender_education_level_fields'),
]
operations = [
migrations.RunPython(
set_site_settings,
unset_site_settings,
),
]
|
Add migration to set UserProfilesSettings correctly# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from molo.profiles.models import UserProfilesSettings
from wagtail.wagtailcore.models import Site
def set_site_settings(apps, schema_editor):
for site in Site.objects.all():
settings = UserProfilesSettings.for_site(site)
settings.show_security_question_fields = True
settings.security_questions_required = True
settings.num_security_questions = 2
settings.activate_display_name = True
settings.capture_display_name_on_reg = True
settings.activate_gender = True
settings.capture_gender_on_reg = True
settings.save()
def unset_site_settings(apps, schema_editor):
# We don't know what we should be unsetting the
# site settings to here. It might be safest to
# do a no-op.
pass
class Migration(migrations.Migration):
dependencies = [
('gem', '0026_migrate_security_answers_off_gem_profile'),
('profiles', '0013_add_location_gender_education_level_fields'),
]
operations = [
migrations.RunPython(
set_site_settings,
unset_site_settings,
),
]
|
<commit_before><commit_msg>Add migration to set UserProfilesSettings correctly<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from molo.profiles.models import UserProfilesSettings
from wagtail.wagtailcore.models import Site
def set_site_settings(apps, schema_editor):
for site in Site.objects.all():
settings = UserProfilesSettings.for_site(site)
settings.show_security_question_fields = True
settings.security_questions_required = True
settings.num_security_questions = 2
settings.activate_display_name = True
settings.capture_display_name_on_reg = True
settings.activate_gender = True
settings.capture_gender_on_reg = True
settings.save()
def unset_site_settings(apps, schema_editor):
# We don't know what we should be unsetting the
# site settings to here. It might be safest to
# do a no-op.
pass
class Migration(migrations.Migration):
dependencies = [
('gem', '0026_migrate_security_answers_off_gem_profile'),
('profiles', '0013_add_location_gender_education_level_fields'),
]
operations = [
migrations.RunPython(
set_site_settings,
unset_site_settings,
),
]
|
|
161973b337c9574d95b1b4c71c802f6d9a2d6d62
|
core/migrations/0005_merge_20191122_0450.py
|
core/migrations/0005_merge_20191122_0450.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-11-22 04:50
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20190715_0912'),
('core', '0004_auto_20190222_1451'),
]
operations = [
]
|
Merge migrations file to resolve conflicts
|
Fix: Merge migrations file to resolve conflicts
|
Python
|
bsd-2-clause
|
lapo-luchini/pinry,lapo-luchini/pinry,pinry/pinry,pinry/pinry,lapo-luchini/pinry,pinry/pinry,pinry/pinry,lapo-luchini/pinry
|
Fix: Merge migrations file to resolve conflicts
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-11-22 04:50
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20190715_0912'),
('core', '0004_auto_20190222_1451'),
]
operations = [
]
|
<commit_before><commit_msg>Fix: Merge migrations file to resolve conflicts<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-11-22 04:50
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20190715_0912'),
('core', '0004_auto_20190222_1451'),
]
operations = [
]
|
Fix: Merge migrations file to resolve conflicts# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-11-22 04:50
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20190715_0912'),
('core', '0004_auto_20190222_1451'),
]
operations = [
]
|
<commit_before><commit_msg>Fix: Merge migrations file to resolve conflicts<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-11-22 04:50
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20190715_0912'),
('core', '0004_auto_20190222_1451'),
]
operations = [
]
|
|
84fc02af75bec18b1c436741453762c793700ea3
|
tests/test_middleware_key_auth.py
|
tests/test_middleware_key_auth.py
|
import json
from rest_framework import status
from rest_framework.test import APITestCase
from django.contrib.auth import get_user_model
from api_bouncer.models import Api, Consumer
User = get_user_model()
class KeyAuthMiddlewareTests(APITestCase):
def setUp(self):
self.superuser = User.objects.create_superuser(
'john',
'john@localhost.local',
'john123john'
)
self.example_api = Api.objects.create(
name='httpbin',
hosts=['httpbin.org'],
upstream_url='https://httpbin.org'
)
self.key_auth_url = '/apis/{}/plugins/'.format(self.example_api.name)
self.consumer = Consumer.objects.create(username='django')
self.consumer_key_url = (
'/consumers/{}/key-auth/'.format(self.consumer.username)
)
def test_bounce_api_authorization_ok(self):
"""
Ensure we can perform requests on an api using a valid key.
"""
self.client.login(username='john', password='john123john')
self.client.post(self.key_auth_url)
response = self.client.post(self.consumer_key_url)
self.client.logout()
apikey = response.data['key']
url = '/get?msg=Bounce'
self.client.credentials(HTTP_HOST='httpbin.org', HTTP_APIKEY=apikey)
response = self.client.get(url)
content = response.content.decode('utf-8')
data = json.loads(content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(data['args']['msg'], 'Bounce')
def test_bounce_api_authorization_invalid(self):
"""
Ensure we can't perform requests on an api without using a valid key.
"""
self.client.login(username='john', password='john123john')
self.client.post(self.key_auth_url, {'name': 'key-auth'})
response = self.client.post(self.consumer_key_url)
self.client.logout()
apikey = 'you_know_nothing'
url = '/get'
self.client.credentials(HTTP_HOST='httpbin.org', HTTP_APIKEY=apikey)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
|
Add test cases for KeyAuthMiddleware, Test apikey authentication
|
Add test cases for KeyAuthMiddleware, Test apikey authentication
|
Python
|
apache-2.0
|
menecio/django-api-bouncer
|
Add test cases for KeyAuthMiddleware, Test apikey authentication
|
import json
from rest_framework import status
from rest_framework.test import APITestCase
from django.contrib.auth import get_user_model
from api_bouncer.models import Api, Consumer
User = get_user_model()
class KeyAuthMiddlewareTests(APITestCase):
def setUp(self):
self.superuser = User.objects.create_superuser(
'john',
'john@localhost.local',
'john123john'
)
self.example_api = Api.objects.create(
name='httpbin',
hosts=['httpbin.org'],
upstream_url='https://httpbin.org'
)
self.key_auth_url = '/apis/{}/plugins/'.format(self.example_api.name)
self.consumer = Consumer.objects.create(username='django')
self.consumer_key_url = (
'/consumers/{}/key-auth/'.format(self.consumer.username)
)
def test_bounce_api_authorization_ok(self):
"""
Ensure we can perform requests on an api using a valid key.
"""
self.client.login(username='john', password='john123john')
self.client.post(self.key_auth_url)
response = self.client.post(self.consumer_key_url)
self.client.logout()
apikey = response.data['key']
url = '/get?msg=Bounce'
self.client.credentials(HTTP_HOST='httpbin.org', HTTP_APIKEY=apikey)
response = self.client.get(url)
content = response.content.decode('utf-8')
data = json.loads(content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(data['args']['msg'], 'Bounce')
def test_bounce_api_authorization_invalid(self):
"""
Ensure we can't perform requests on an api without using a valid key.
"""
self.client.login(username='john', password='john123john')
self.client.post(self.key_auth_url, {'name': 'key-auth'})
response = self.client.post(self.consumer_key_url)
self.client.logout()
apikey = 'you_know_nothing'
url = '/get'
self.client.credentials(HTTP_HOST='httpbin.org', HTTP_APIKEY=apikey)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
|
<commit_before><commit_msg>Add test cases for KeyAuthMiddleware, Test apikey authentication<commit_after>
|
import json
from rest_framework import status
from rest_framework.test import APITestCase
from django.contrib.auth import get_user_model
from api_bouncer.models import Api, Consumer
User = get_user_model()
class KeyAuthMiddlewareTests(APITestCase):
def setUp(self):
self.superuser = User.objects.create_superuser(
'john',
'john@localhost.local',
'john123john'
)
self.example_api = Api.objects.create(
name='httpbin',
hosts=['httpbin.org'],
upstream_url='https://httpbin.org'
)
self.key_auth_url = '/apis/{}/plugins/'.format(self.example_api.name)
self.consumer = Consumer.objects.create(username='django')
self.consumer_key_url = (
'/consumers/{}/key-auth/'.format(self.consumer.username)
)
def test_bounce_api_authorization_ok(self):
"""
Ensure we can perform requests on an api using a valid key.
"""
self.client.login(username='john', password='john123john')
self.client.post(self.key_auth_url)
response = self.client.post(self.consumer_key_url)
self.client.logout()
apikey = response.data['key']
url = '/get?msg=Bounce'
self.client.credentials(HTTP_HOST='httpbin.org', HTTP_APIKEY=apikey)
response = self.client.get(url)
content = response.content.decode('utf-8')
data = json.loads(content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(data['args']['msg'], 'Bounce')
def test_bounce_api_authorization_invalid(self):
"""
Ensure we can't perform requests on an api without using a valid key.
"""
self.client.login(username='john', password='john123john')
self.client.post(self.key_auth_url, {'name': 'key-auth'})
response = self.client.post(self.consumer_key_url)
self.client.logout()
apikey = 'you_know_nothing'
url = '/get'
self.client.credentials(HTTP_HOST='httpbin.org', HTTP_APIKEY=apikey)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
|
Add test cases for KeyAuthMiddleware, Test apikey authenticationimport json
from rest_framework import status
from rest_framework.test import APITestCase
from django.contrib.auth import get_user_model
from api_bouncer.models import Api, Consumer
User = get_user_model()
class KeyAuthMiddlewareTests(APITestCase):
def setUp(self):
self.superuser = User.objects.create_superuser(
'john',
'john@localhost.local',
'john123john'
)
self.example_api = Api.objects.create(
name='httpbin',
hosts=['httpbin.org'],
upstream_url='https://httpbin.org'
)
self.key_auth_url = '/apis/{}/plugins/'.format(self.example_api.name)
self.consumer = Consumer.objects.create(username='django')
self.consumer_key_url = (
'/consumers/{}/key-auth/'.format(self.consumer.username)
)
def test_bounce_api_authorization_ok(self):
"""
Ensure we can perform requests on an api using a valid key.
"""
self.client.login(username='john', password='john123john')
self.client.post(self.key_auth_url)
response = self.client.post(self.consumer_key_url)
self.client.logout()
apikey = response.data['key']
url = '/get?msg=Bounce'
self.client.credentials(HTTP_HOST='httpbin.org', HTTP_APIKEY=apikey)
response = self.client.get(url)
content = response.content.decode('utf-8')
data = json.loads(content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(data['args']['msg'], 'Bounce')
def test_bounce_api_authorization_invalid(self):
"""
Ensure we can't perform requests on an api without using a valid key.
"""
self.client.login(username='john', password='john123john')
self.client.post(self.key_auth_url, {'name': 'key-auth'})
response = self.client.post(self.consumer_key_url)
self.client.logout()
apikey = 'you_know_nothing'
url = '/get'
self.client.credentials(HTTP_HOST='httpbin.org', HTTP_APIKEY=apikey)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
|
<commit_before><commit_msg>Add test cases for KeyAuthMiddleware, Test apikey authentication<commit_after>import json
from rest_framework import status
from rest_framework.test import APITestCase
from django.contrib.auth import get_user_model
from api_bouncer.models import Api, Consumer
User = get_user_model()
class KeyAuthMiddlewareTests(APITestCase):
def setUp(self):
self.superuser = User.objects.create_superuser(
'john',
'john@localhost.local',
'john123john'
)
self.example_api = Api.objects.create(
name='httpbin',
hosts=['httpbin.org'],
upstream_url='https://httpbin.org'
)
self.key_auth_url = '/apis/{}/plugins/'.format(self.example_api.name)
self.consumer = Consumer.objects.create(username='django')
self.consumer_key_url = (
'/consumers/{}/key-auth/'.format(self.consumer.username)
)
def test_bounce_api_authorization_ok(self):
"""
Ensure we can perform requests on an api using a valid key.
"""
self.client.login(username='john', password='john123john')
self.client.post(self.key_auth_url)
response = self.client.post(self.consumer_key_url)
self.client.logout()
apikey = response.data['key']
url = '/get?msg=Bounce'
self.client.credentials(HTTP_HOST='httpbin.org', HTTP_APIKEY=apikey)
response = self.client.get(url)
content = response.content.decode('utf-8')
data = json.loads(content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(data['args']['msg'], 'Bounce')
def test_bounce_api_authorization_invalid(self):
"""
Ensure we can't perform requests on an api without using a valid key.
"""
self.client.login(username='john', password='john123john')
self.client.post(self.key_auth_url, {'name': 'key-auth'})
response = self.client.post(self.consumer_key_url)
self.client.logout()
apikey = 'you_know_nothing'
url = '/get'
self.client.credentials(HTTP_HOST='httpbin.org', HTTP_APIKEY=apikey)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
|
|
0e86d708c2a1a266f1ba5b5f6a28e2105d291d66
|
generate_stats.py
|
generate_stats.py
|
import argparse
import datetime
import time
from os import linesep
import psutil
def system_stats(duration):
for _ in range(duration):
now = datetime.datetime.isoformat(datetime.datetime.utcnow())
cpu = psutil.cpu_percent()
mem = psutil.virtual_memory().percent
net = psutil.net_io_counters()
yield now, cpu, mem, net.bytes_sent, net.bytes_recv
time.sleep(1)
def main(duration=60, outfile='systemstats.csv', quiet=False):
with open(outfile, 'wb') as f:
line = 'time,cpu_usage,memory_usage,bytes_sent,bytes_received'
if not quiet:
print line
f.write(line + linesep)
for snapshot in system_stats(duration):
line = '{},{},{},{},{}'.format(*snapshot)
if not quiet:
print line
f.write(line + linesep)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Gather data about the system and record it in a file')
parser.add_argument('-d','--duration', default=60, type=int)
parser.add_argument('--output-file', default='systemstats.csv')
parser.add_argument('-q', '--quiet', default=False, type=bool)
args = parser.parse_args()
main(args.duration, args.output_file, args.quiet)
|
Add script to collect data from the system
|
Add script to collect data from the system
|
Python
|
cc0-1.0
|
timClicks/make-for-data
|
Add script to collect data from the system
|
import argparse
import datetime
import time
from os import linesep
import psutil
def system_stats(duration):
for _ in range(duration):
now = datetime.datetime.isoformat(datetime.datetime.utcnow())
cpu = psutil.cpu_percent()
mem = psutil.virtual_memory().percent
net = psutil.net_io_counters()
yield now, cpu, mem, net.bytes_sent, net.bytes_recv
time.sleep(1)
def main(duration=60, outfile='systemstats.csv', quiet=False):
with open(outfile, 'wb') as f:
line = 'time,cpu_usage,memory_usage,bytes_sent,bytes_received'
if not quiet:
print line
f.write(line + linesep)
for snapshot in system_stats(duration):
line = '{},{},{},{},{}'.format(*snapshot)
if not quiet:
print line
f.write(line + linesep)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Gather data about the system and record it in a file')
parser.add_argument('-d','--duration', default=60, type=int)
parser.add_argument('--output-file', default='systemstats.csv')
parser.add_argument('-q', '--quiet', default=False, type=bool)
args = parser.parse_args()
main(args.duration, args.output_file, args.quiet)
|
<commit_before><commit_msg>Add script to collect data from the system<commit_after>
|
import argparse
import datetime
import time
from os import linesep
import psutil
def system_stats(duration):
for _ in range(duration):
now = datetime.datetime.isoformat(datetime.datetime.utcnow())
cpu = psutil.cpu_percent()
mem = psutil.virtual_memory().percent
net = psutil.net_io_counters()
yield now, cpu, mem, net.bytes_sent, net.bytes_recv
time.sleep(1)
def main(duration=60, outfile='systemstats.csv', quiet=False):
with open(outfile, 'wb') as f:
line = 'time,cpu_usage,memory_usage,bytes_sent,bytes_received'
if not quiet:
print line
f.write(line + linesep)
for snapshot in system_stats(duration):
line = '{},{},{},{},{}'.format(*snapshot)
if not quiet:
print line
f.write(line + linesep)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Gather data about the system and record it in a file')
parser.add_argument('-d','--duration', default=60, type=int)
parser.add_argument('--output-file', default='systemstats.csv')
parser.add_argument('-q', '--quiet', default=False, type=bool)
args = parser.parse_args()
main(args.duration, args.output_file, args.quiet)
|
Add script to collect data from the systemimport argparse
import datetime
import time
from os import linesep
import psutil
def system_stats(duration):
for _ in range(duration):
now = datetime.datetime.isoformat(datetime.datetime.utcnow())
cpu = psutil.cpu_percent()
mem = psutil.virtual_memory().percent
net = psutil.net_io_counters()
yield now, cpu, mem, net.bytes_sent, net.bytes_recv
time.sleep(1)
def main(duration=60, outfile='systemstats.csv', quiet=False):
with open(outfile, 'wb') as f:
line = 'time,cpu_usage,memory_usage,bytes_sent,bytes_received'
if not quiet:
print line
f.write(line + linesep)
for snapshot in system_stats(duration):
line = '{},{},{},{},{}'.format(*snapshot)
if not quiet:
print line
f.write(line + linesep)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Gather data about the system and record it in a file')
parser.add_argument('-d','--duration', default=60, type=int)
parser.add_argument('--output-file', default='systemstats.csv')
parser.add_argument('-q', '--quiet', default=False, type=bool)
args = parser.parse_args()
main(args.duration, args.output_file, args.quiet)
|
<commit_before><commit_msg>Add script to collect data from the system<commit_after>import argparse
import datetime
import time
from os import linesep
import psutil
def system_stats(duration):
for _ in range(duration):
now = datetime.datetime.isoformat(datetime.datetime.utcnow())
cpu = psutil.cpu_percent()
mem = psutil.virtual_memory().percent
net = psutil.net_io_counters()
yield now, cpu, mem, net.bytes_sent, net.bytes_recv
time.sleep(1)
def main(duration=60, outfile='systemstats.csv', quiet=False):
with open(outfile, 'wb') as f:
line = 'time,cpu_usage,memory_usage,bytes_sent,bytes_received'
if not quiet:
print line
f.write(line + linesep)
for snapshot in system_stats(duration):
line = '{},{},{},{},{}'.format(*snapshot)
if not quiet:
print line
f.write(line + linesep)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Gather data about the system and record it in a file')
parser.add_argument('-d','--duration', default=60, type=int)
parser.add_argument('--output-file', default='systemstats.csv')
parser.add_argument('-q', '--quiet', default=False, type=bool)
args = parser.parse_args()
main(args.duration, args.output_file, args.quiet)
|
|
7cd265eede78f5c9a50086776aca2eee593e28bc
|
common/djangoapps/student/migrations/0008_default_enrollment_honor.py
|
common/djangoapps/student/migrations/0008_default_enrollment_honor.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0008_auto_20161117_1209'),
]
operations = [
migrations.AlterField(
model_name='courseenrollment',
name='mode',
field=models.CharField(default='honor', max_length=100),
),
migrations.AlterField(
model_name='historicalcourseenrollment',
name='mode',
field=models.CharField(default='honor', max_length=100),
),
]
|
Add missing migration, student course enrollment
|
Add missing migration, student course enrollment
This was fixed by running `makemigrations`, which we presumably should
have done when we made the original change [1].
This has probably been broken all this time, potentially as far back as
December 2016.
Despite earlier suspicion, it's no longer clear that this will fix the
issue we've seen w/ mis-enrollments.
I had originally thought:
> While the software default course mode was correctly set to `honor`,
> the database default was still incorrectly set to `audit`. This meant
> that while most enrollments were registered successfully, any that
> followed a code path that neglected to pass in a value for course mode
> would instead rely on the database's default, which is incorrect for
> our cases.
Regardless, we'll need to correct any erroneous entries with the following SQL:
```sql
UPDATE
student_courseenrollment
SET
mode = 'honor'
WHERE
mode = 'audit'
;
```
- [1] d00bb9792086602470f1a8833013df3cf433a546
|
Python
|
agpl-3.0
|
Stanford-Online/edx-platform,Stanford-Online/edx-platform,Stanford-Online/edx-platform,Stanford-Online/edx-platform
|
Add missing migration, student course enrollment
This was fixed by running `makemigrations`, which we presumably should
have done when we made the original change [1].
This has probably been broken all this time, potentially as far back as
December 2016.
Despite earlier suspicion, it's no longer clear that this will fix the
issue we've seen w/ mis-enrollments.
I had originally thought:
> While the software default course mode was correctly set to `honor`,
> the database default was still incorrectly set to `audit`. This meant
> that while most enrollments were registered successfully, any that
> followed a code path that neglected to pass in a value for course mode
> would instead rely on the database's default, which is incorrect for
> our cases.
Regardless, we'll need to correct any erroneous entries with the following SQL:
```sql
UPDATE
student_courseenrollment
SET
mode = 'honor'
WHERE
mode = 'audit'
;
```
- [1] d00bb9792086602470f1a8833013df3cf433a546
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0008_auto_20161117_1209'),
]
operations = [
migrations.AlterField(
model_name='courseenrollment',
name='mode',
field=models.CharField(default='honor', max_length=100),
),
migrations.AlterField(
model_name='historicalcourseenrollment',
name='mode',
field=models.CharField(default='honor', max_length=100),
),
]
|
<commit_before><commit_msg>Add missing migration, student course enrollment
This was fixed by running `makemigrations`, which we presumably should
have done when we made the original change [1].
This has probably been broken all this time, potentially as far back as
December 2016.
Despite earlier suspicion, it's no longer clear that this will fix the
issue we've seen w/ mis-enrollments.
I had originally thought:
> While the software default course mode was correctly set to `honor`,
> the database default was still incorrectly set to `audit`. This meant
> that while most enrollments were registered successfully, any that
> followed a code path that neglected to pass in a value for course mode
> would instead rely on the database's default, which is incorrect for
> our cases.
Regardless, we'll need to correct any erroneous entries with the following SQL:
```sql
UPDATE
student_courseenrollment
SET
mode = 'honor'
WHERE
mode = 'audit'
;
```
- [1] d00bb9792086602470f1a8833013df3cf433a546<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0008_auto_20161117_1209'),
]
operations = [
migrations.AlterField(
model_name='courseenrollment',
name='mode',
field=models.CharField(default='honor', max_length=100),
),
migrations.AlterField(
model_name='historicalcourseenrollment',
name='mode',
field=models.CharField(default='honor', max_length=100),
),
]
|
Add missing migration, student course enrollment
This was fixed by running `makemigrations`, which we presumably should
have done when we made the original change [1].
This has probably been broken all this time, potentially as far back as
December 2016.
Despite earlier suspicion, it's no longer clear that this will fix the
issue we've seen w/ mis-enrollments.
I had originally thought:
> While the software default course mode was correctly set to `honor`,
> the database default was still incorrectly set to `audit`. This meant
> that while most enrollments were registered successfully, any that
> followed a code path that neglected to pass in a value for course mode
> would instead rely on the database's default, which is incorrect for
> our cases.
Regardless, we'll need to correct any erroneous entries with the following SQL:
```sql
UPDATE
student_courseenrollment
SET
mode = 'honor'
WHERE
mode = 'audit'
;
```
- [1] d00bb9792086602470f1a8833013df3cf433a546# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0008_auto_20161117_1209'),
]
operations = [
migrations.AlterField(
model_name='courseenrollment',
name='mode',
field=models.CharField(default='honor', max_length=100),
),
migrations.AlterField(
model_name='historicalcourseenrollment',
name='mode',
field=models.CharField(default='honor', max_length=100),
),
]
|
<commit_before><commit_msg>Add missing migration, student course enrollment
This was fixed by running `makemigrations`, which we presumably should
have done when we made the original change [1].
This has probably been broken all this time, potentially as far back as
December 2016.
Despite earlier suspicion, it's no longer clear that this will fix the
issue we've seen w/ mis-enrollments.
I had originally thought:
> While the software default course mode was correctly set to `honor`,
> the database default was still incorrectly set to `audit`. This meant
> that while most enrollments were registered successfully, any that
> followed a code path that neglected to pass in a value for course mode
> would instead rely on the database's default, which is incorrect for
> our cases.
Regardless, we'll need to correct any erroneous entries with the following SQL:
```sql
UPDATE
student_courseenrollment
SET
mode = 'honor'
WHERE
mode = 'audit'
;
```
- [1] d00bb9792086602470f1a8833013df3cf433a546<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0008_auto_20161117_1209'),
]
operations = [
migrations.AlterField(
model_name='courseenrollment',
name='mode',
field=models.CharField(default='honor', max_length=100),
),
migrations.AlterField(
model_name='historicalcourseenrollment',
name='mode',
field=models.CharField(default='honor', max_length=100),
),
]
|
|
4138baea40ad25635967f7382f082256099da4d2
|
examples/head.py
|
examples/head.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from examples import common
from examples import session
def run_get(opts):
sess = session.make_session(opts)
cls = common.find_resource_cls(opts)
data = common.get_data_option(opts)
obj = cls.new(**data)
obj.head(sess)
print(str(obj))
return
if __name__ == "__main__":
opts = common.setup()
sys.exit(common.main(opts, run_get))
|
Add example script for HEAD requests
|
Add example script for HEAD requests
Change-Id: I3d2421f1fb6e0db02e01789edc78ca432e34975f
|
Python
|
apache-2.0
|
stackforge/python-openstacksdk,dudymas/python-openstacksdk,dudymas/python-openstacksdk,briancurtin/python-openstacksdk,briancurtin/python-openstacksdk,mtougeron/python-openstacksdk,dtroyer/python-openstacksdk,dtroyer/python-openstacksdk,mtougeron/python-openstacksdk,openstack/python-openstacksdk,openstack/python-openstacksdk,stackforge/python-openstacksdk
|
Add example script for HEAD requests
Change-Id: I3d2421f1fb6e0db02e01789edc78ca432e34975f
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from examples import common
from examples import session
def run_get(opts):
sess = session.make_session(opts)
cls = common.find_resource_cls(opts)
data = common.get_data_option(opts)
obj = cls.new(**data)
obj.head(sess)
print(str(obj))
return
if __name__ == "__main__":
opts = common.setup()
sys.exit(common.main(opts, run_get))
|
<commit_before><commit_msg>Add example script for HEAD requests
Change-Id: I3d2421f1fb6e0db02e01789edc78ca432e34975f<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from examples import common
from examples import session
def run_get(opts):
sess = session.make_session(opts)
cls = common.find_resource_cls(opts)
data = common.get_data_option(opts)
obj = cls.new(**data)
obj.head(sess)
print(str(obj))
return
if __name__ == "__main__":
opts = common.setup()
sys.exit(common.main(opts, run_get))
|
Add example script for HEAD requests
Change-Id: I3d2421f1fb6e0db02e01789edc78ca432e34975f# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from examples import common
from examples import session
def run_get(opts):
sess = session.make_session(opts)
cls = common.find_resource_cls(opts)
data = common.get_data_option(opts)
obj = cls.new(**data)
obj.head(sess)
print(str(obj))
return
if __name__ == "__main__":
opts = common.setup()
sys.exit(common.main(opts, run_get))
|
<commit_before><commit_msg>Add example script for HEAD requests
Change-Id: I3d2421f1fb6e0db02e01789edc78ca432e34975f<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from examples import common
from examples import session
def run_get(opts):
sess = session.make_session(opts)
cls = common.find_resource_cls(opts)
data = common.get_data_option(opts)
obj = cls.new(**data)
obj.head(sess)
print(str(obj))
return
if __name__ == "__main__":
opts = common.setup()
sys.exit(common.main(opts, run_get))
|
|
2ef2ebf54e16ba1426dd370c6e3d656e8e2f97dd
|
pyUpdate.py
|
pyUpdate.py
|
#!/usr/bin/python3
# Import modules
from os import system as do
from time import sleep
# Set Variables
upCache = " "
upSys = " "
remAuto = " "
# define how to get config
def getConfig(filename):
import imp
f = open(filename)
global data
data = imp.load_source('data', '', f)
f.close()
# path to "config" file
getConfig('/home/carson/.pyUpdate/updaterConfig.txt')
if data.autoCache == 0:
while not upCache == "y" or not upCache == "n":
upCache = input("Update cache? (Recommended) y/n")
if upCache == "y":
do("clear")
do("sudo apt-get update")
if upCache == "n":
print("Not updating cache")
sleep(3)
if data.autoCache == 1:
do("clear")
do("sudo apt-get update --force-yes")
if data.autoUpdate == 0:
while not upSys == "y" or not upSys == "n":
upSys = input("Update system? (Recommended) y/n")
if upSys == "y":
do("clear")
do("sudo apt-get upgrade --force-yes")
if upSys == "n":
print("Not updating system")
sleep(3)
if data.autoUpdate == 1:
do("clear")
do("sudo apt-get upgrade --force-yes")
if data.autoRemove == 0:
while not remAuto == "y" or not remAuto == "n":
remAuto = input("Remove automatically install packages? (Recommended) y/n")
if remAuto == "y":
do("clear")
do("sudo apt-get autoremove")
if remAuto == "n":
print("Not removing automatically installed packages")
sleep(3)
if data.autoCache == 1:
do("clear")
do("sudo apt-get autoremove --force-yes")
|
Create in 63 glorious lines
|
Create in 63 glorious lines
|
Python
|
mit
|
hyperdriveguy/pyUpdate,hyperdriveguy/pyUpdate
|
Create in 63 glorious lines
|
#!/usr/bin/python3
# Import modules
from os import system as do
from time import sleep
# Set Variables
upCache = " "
upSys = " "
remAuto = " "
# define how to get config
def getConfig(filename):
import imp
f = open(filename)
global data
data = imp.load_source('data', '', f)
f.close()
# path to "config" file
getConfig('/home/carson/.pyUpdate/updaterConfig.txt')
if data.autoCache == 0:
while not upCache == "y" or not upCache == "n":
upCache = input("Update cache? (Recommended) y/n")
if upCache == "y":
do("clear")
do("sudo apt-get update")
if upCache == "n":
print("Not updating cache")
sleep(3)
if data.autoCache == 1:
do("clear")
do("sudo apt-get update --force-yes")
if data.autoUpdate == 0:
while not upSys == "y" or not upSys == "n":
upSys = input("Update system? (Recommended) y/n")
if upSys == "y":
do("clear")
do("sudo apt-get upgrade --force-yes")
if upSys == "n":
print("Not updating system")
sleep(3)
if data.autoUpdate == 1:
do("clear")
do("sudo apt-get upgrade --force-yes")
if data.autoRemove == 0:
while not remAuto == "y" or not remAuto == "n":
remAuto = input("Remove automatically install packages? (Recommended) y/n")
if remAuto == "y":
do("clear")
do("sudo apt-get autoremove")
if remAuto == "n":
print("Not removing automatically installed packages")
sleep(3)
if data.autoCache == 1:
do("clear")
do("sudo apt-get autoremove --force-yes")
|
<commit_before><commit_msg>Create in 63 glorious lines<commit_after>
|
#!/usr/bin/python3
# Import modules
from os import system as do
from time import sleep
# Set Variables
upCache = " "
upSys = " "
remAuto = " "
# define how to get config
def getConfig(filename):
import imp
f = open(filename)
global data
data = imp.load_source('data', '', f)
f.close()
# path to "config" file
getConfig('/home/carson/.pyUpdate/updaterConfig.txt')
if data.autoCache == 0:
while not upCache == "y" or not upCache == "n":
upCache = input("Update cache? (Recommended) y/n")
if upCache == "y":
do("clear")
do("sudo apt-get update")
if upCache == "n":
print("Not updating cache")
sleep(3)
if data.autoCache == 1:
do("clear")
do("sudo apt-get update --force-yes")
if data.autoUpdate == 0:
while not upSys == "y" or not upSys == "n":
upSys = input("Update system? (Recommended) y/n")
if upSys == "y":
do("clear")
do("sudo apt-get upgrade --force-yes")
if upSys == "n":
print("Not updating system")
sleep(3)
if data.autoUpdate == 1:
do("clear")
do("sudo apt-get upgrade --force-yes")
if data.autoRemove == 0:
while not remAuto == "y" or not remAuto == "n":
remAuto = input("Remove automatically install packages? (Recommended) y/n")
if remAuto == "y":
do("clear")
do("sudo apt-get autoremove")
if remAuto == "n":
print("Not removing automatically installed packages")
sleep(3)
if data.autoCache == 1:
do("clear")
do("sudo apt-get autoremove --force-yes")
|
Create in 63 glorious lines#!/usr/bin/python3
# Import modules
from os import system as do
from time import sleep
# Set Variables
upCache = " "
upSys = " "
remAuto = " "
# define how to get config
def getConfig(filename):
import imp
f = open(filename)
global data
data = imp.load_source('data', '', f)
f.close()
# path to "config" file
getConfig('/home/carson/.pyUpdate/updaterConfig.txt')
if data.autoCache == 0:
while not upCache == "y" or not upCache == "n":
upCache = input("Update cache? (Recommended) y/n")
if upCache == "y":
do("clear")
do("sudo apt-get update")
if upCache == "n":
print("Not updating cache")
sleep(3)
if data.autoCache == 1:
do("clear")
do("sudo apt-get update --force-yes")
if data.autoUpdate == 0:
while not upSys == "y" or not upSys == "n":
upSys = input("Update system? (Recommended) y/n")
if upSys == "y":
do("clear")
do("sudo apt-get upgrade --force-yes")
if upSys == "n":
print("Not updating system")
sleep(3)
if data.autoUpdate == 1:
do("clear")
do("sudo apt-get upgrade --force-yes")
if data.autoRemove == 0:
while not remAuto == "y" or not remAuto == "n":
remAuto = input("Remove automatically install packages? (Recommended) y/n")
if remAuto == "y":
do("clear")
do("sudo apt-get autoremove")
if remAuto == "n":
print("Not removing automatically installed packages")
sleep(3)
if data.autoCache == 1:
do("clear")
do("sudo apt-get autoremove --force-yes")
|
<commit_before><commit_msg>Create in 63 glorious lines<commit_after>#!/usr/bin/python3
# Import modules
from os import system as do
from time import sleep
# Set Variables
upCache = " "
upSys = " "
remAuto = " "
# define how to get config
def getConfig(filename):
import imp
f = open(filename)
global data
data = imp.load_source('data', '', f)
f.close()
# path to "config" file
getConfig('/home/carson/.pyUpdate/updaterConfig.txt')
if data.autoCache == 0:
while not upCache == "y" or not upCache == "n":
upCache = input("Update cache? (Recommended) y/n")
if upCache == "y":
do("clear")
do("sudo apt-get update")
if upCache == "n":
print("Not updating cache")
sleep(3)
if data.autoCache == 1:
do("clear")
do("sudo apt-get update --force-yes")
if data.autoUpdate == 0:
while not upSys == "y" or not upSys == "n":
upSys = input("Update system? (Recommended) y/n")
if upSys == "y":
do("clear")
do("sudo apt-get upgrade --force-yes")
if upSys == "n":
print("Not updating system")
sleep(3)
if data.autoUpdate == 1:
do("clear")
do("sudo apt-get upgrade --force-yes")
if data.autoRemove == 0:
while not remAuto == "y" or not remAuto == "n":
remAuto = input("Remove automatically install packages? (Recommended) y/n")
if remAuto == "y":
do("clear")
do("sudo apt-get autoremove")
if remAuto == "n":
print("Not removing automatically installed packages")
sleep(3)
if data.autoCache == 1:
do("clear")
do("sudo apt-get autoremove --force-yes")
|
|
733a9ef22a1c9479ee4d2debcfc92fe2d09a7aa5
|
pytask/helpers/__init__.py
|
pytask/helpers/__init__.py
|
"""Package containing the helper functions that may be used through out
the site.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@fossee.in>',
]
|
Create a separate package to hold all the PyTask sitewide helpers.
|
Create a separate package to hold all the PyTask sitewide helpers.
|
Python
|
agpl-3.0
|
madhusudancs/pytask,madhusudancs/pytask,madhusudancs/pytask
|
Create a separate package to hold all the PyTask sitewide helpers.
|
"""Package containing the helper functions that may be used through out
the site.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@fossee.in>',
]
|
<commit_before><commit_msg>Create a separate package to hold all the PyTask sitewide helpers.<commit_after>
|
"""Package containing the helper functions that may be used through out
the site.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@fossee.in>',
]
|
Create a separate package to hold all the PyTask sitewide helpers."""Package containing the helper functions that may be used through out
the site.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@fossee.in>',
]
|
<commit_before><commit_msg>Create a separate package to hold all the PyTask sitewide helpers.<commit_after>"""Package containing the helper functions that may be used through out
the site.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@fossee.in>',
]
|
|
6f7d8055613c8b0542dca5185cdc46e37a96942a
|
tests/test_status_page.py
|
tests/test_status_page.py
|
import subprocess
import unittest
from aws_status.status_page import StatusPage
class TestStatusPage(unittest.TestCase):
def test_number_of_detected_feeds(self):
command = "curl -s http://status.aws.amazon.com/|grep rss|sort -u|wc -l"
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
output, errors = p.communicate()
expected_feeds = int(output)
self.assertEqual(expected_feeds, len(StatusPage().rss_urls))
|
Add integration test for number of detected RSS feeds
|
Add integration test for number of detected RSS feeds
|
Python
|
mit
|
jbbarth/aws-status,jbbarth/aws-status
|
Add integration test for number of detected RSS feeds
|
import subprocess
import unittest
from aws_status.status_page import StatusPage
class TestStatusPage(unittest.TestCase):
def test_number_of_detected_feeds(self):
command = "curl -s http://status.aws.amazon.com/|grep rss|sort -u|wc -l"
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
output, errors = p.communicate()
expected_feeds = int(output)
self.assertEqual(expected_feeds, len(StatusPage().rss_urls))
|
<commit_before><commit_msg>Add integration test for number of detected RSS feeds<commit_after>
|
import subprocess
import unittest
from aws_status.status_page import StatusPage
class TestStatusPage(unittest.TestCase):
def test_number_of_detected_feeds(self):
command = "curl -s http://status.aws.amazon.com/|grep rss|sort -u|wc -l"
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
output, errors = p.communicate()
expected_feeds = int(output)
self.assertEqual(expected_feeds, len(StatusPage().rss_urls))
|
Add integration test for number of detected RSS feedsimport subprocess
import unittest
from aws_status.status_page import StatusPage
class TestStatusPage(unittest.TestCase):
def test_number_of_detected_feeds(self):
command = "curl -s http://status.aws.amazon.com/|grep rss|sort -u|wc -l"
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
output, errors = p.communicate()
expected_feeds = int(output)
self.assertEqual(expected_feeds, len(StatusPage().rss_urls))
|
<commit_before><commit_msg>Add integration test for number of detected RSS feeds<commit_after>import subprocess
import unittest
from aws_status.status_page import StatusPage
class TestStatusPage(unittest.TestCase):
def test_number_of_detected_feeds(self):
command = "curl -s http://status.aws.amazon.com/|grep rss|sort -u|wc -l"
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
output, errors = p.communicate()
expected_feeds = int(output)
self.assertEqual(expected_feeds, len(StatusPage().rss_urls))
|
|
81c2b0abcb5cec457e30c0b0d071212304f3da1d
|
scikits/learn/externals/setup.py
|
scikits/learn/externals/setup.py
|
# -*- coding: utf-8 -*-
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('externals',parent_package,top_path)
config.add_subpackage('joblib')
config.add_subpackage('joblib/test')
return config
|
Make sure that joblib does get installed.
|
BUG: Make sure that joblib does get installed.
|
Python
|
bsd-3-clause
|
mayblue9/scikit-learn,terkkila/scikit-learn,fabianp/scikit-learn,ningchi/scikit-learn,amueller/scikit-learn,glemaitre/scikit-learn,aflaxman/scikit-learn,larsmans/scikit-learn,ishanic/scikit-learn,Fireblend/scikit-learn,moutai/scikit-learn,nikitasingh981/scikit-learn,cybernet14/scikit-learn,xwolf12/scikit-learn,tdhopper/scikit-learn,voxlol/scikit-learn,MechCoder/scikit-learn,anntzer/scikit-learn,jmschrei/scikit-learn,florian-f/sklearn,abimannans/scikit-learn,aminert/scikit-learn,jmschrei/scikit-learn,mugizico/scikit-learn,abhishekgahlot/scikit-learn,henridwyer/scikit-learn,LiaoPan/scikit-learn,bigdataelephants/scikit-learn,zorojean/scikit-learn,larsmans/scikit-learn,MartinDelzant/scikit-learn,loli/semisupervisedforests,ephes/scikit-learn,MatthieuBizien/scikit-learn,AlexandreAbraham/scikit-learn,vshtanko/scikit-learn,wzbozon/scikit-learn,mjudsp/Tsallis,kagayakidan/scikit-learn,AlexandreAbraham/scikit-learn,macks22/scikit-learn,ogrisel/scikit-learn,alexeyum/scikit-learn,mattilyra/scikit-learn,DSLituiev/scikit-learn,lbishal/scikit-learn,lesteve/scikit-learn,nmayorov/scikit-learn,loli/semisupervisedforests,PatrickChrist/scikit-learn,gotomypc/scikit-learn,LohithBlaze/scikit-learn,lin-credible/scikit-learn,sinhrks/scikit-learn,Aasmi/scikit-learn,heli522/scikit-learn,bhargav/scikit-learn,huzq/scikit-learn,kagayakidan/scikit-learn,ngoix/OCRF,quheng/scikit-learn,anirudhjayaraman/scikit-learn,ephes/scikit-learn,NelisVerhoef/scikit-learn,rahul-c1/scikit-learn,fzalkow/scikit-learn,sumspr/scikit-learn,bnaul/scikit-learn,ominux/scikit-learn,pnedunuri/scikit-learn,liberatorqjw/scikit-learn,florian-f/sklearn,rexshihaoren/scikit-learn,roxyboy/scikit-learn,shenzebang/scikit-learn,giorgiop/scikit-learn,Titan-C/scikit-learn,zorroblue/scikit-learn,hitszxp/scikit-learn,espg/scikit-learn,theoryno3/scikit-learn,zhenv5/scikit-learn,zuku1985/scikit-learn,liyu1990/sklearn,466152112/scikit-learn,joernhees/scikit-learn,simon-pepin/scikit-learn,deepesch/scikit-learn,NunoEdgarGub1/scikit-learn,liyu1990/sklearn,mattilyra/scikit-learn,MatthieuBizien/scikit-learn,vortex-ape/scikit-learn,aabadie/scikit-learn,vybstat/scikit-learn,billy-inn/scikit-learn,liberatorqjw/scikit-learn,wazeerzulfikar/scikit-learn,fabioticconi/scikit-learn,nesterione/scikit-learn,ElDeveloper/scikit-learn,0x0all/scikit-learn,themrmax/scikit-learn,ilyes14/scikit-learn,madjelan/scikit-learn,Djabbz/scikit-learn,qifeigit/scikit-learn,beepee14/scikit-learn,samzhang111/scikit-learn,nomadcube/scikit-learn,evgchz/scikit-learn,tmhm/scikit-learn,sergeyf/scikit-learn,khkaminska/scikit-learn,Obus/scikit-learn,rrohan/scikit-learn,henrykironde/scikit-learn,alexeyum/scikit-learn,jpautom/scikit-learn,Vimos/scikit-learn,liberatorqjw/scikit-learn,aetilley/scikit-learn,ycaihua/scikit-learn,ishanic/scikit-learn,mojoboss/scikit-learn,thilbern/scikit-learn,hdmetor/scikit-learn,mikebenfield/scikit-learn,jblackburne/scikit-learn,cybernet14/scikit-learn,mjudsp/Tsallis,JosmanPS/scikit-learn,LiaoPan/scikit-learn,jorik041/scikit-learn,Lawrence-Liu/scikit-learn,robbymeals/scikit-learn,betatim/scikit-learn,xiaoxiamii/scikit-learn,gclenaghan/scikit-learn,dingocuster/scikit-learn,mfjb/scikit-learn,fabioticconi/scikit-learn,beepee14/scikit-learn,jpautom/scikit-learn,belltailjp/scikit-learn,zaxtax/scikit-learn,xyguo/scikit-learn,theoryno3/scikit-learn,3manuek/scikit-learn,lucidfrontier45/scikit-learn,shusenl/scikit-learn,pythonvietnam/scikit-learn,equialgo/scikit-learn,robbymeals/scikit-learn,kmike/scikit-learn,pompiduskus/scikit-learn,harshaneelhg/scikit-learn,eg-zhang/scikit-learn,Achuth17/scikit-learn,0asa/scikit-learn,hitszxp/scikit-learn,spallavolu/scikit-learn,michigraber/scikit-learn,0x0all/scikit-learn,sonnyhu/scikit-learn,vermouthmjl/scikit-learn,kashif/scikit-learn,cl4rke/scikit-learn,jayflo/scikit-learn,equialgo/scikit-learn,sergeyf/scikit-learn,pythonvietnam/scikit-learn,bigdataelephants/scikit-learn,pianomania/scikit-learn,hainm/scikit-learn,0x0all/scikit-learn,CVML/scikit-learn,kjung/scikit-learn,beepee14/scikit-learn,robin-lai/scikit-learn,henrykironde/scikit-learn,loli/sklearn-ensembletrees,tmhm/scikit-learn,herilalaina/scikit-learn,qifeigit/scikit-learn,jorge2703/scikit-learn,adamgreenhall/scikit-learn,krez13/scikit-learn,aetilley/scikit-learn,NunoEdgarGub1/scikit-learn,AIML/scikit-learn,pnedunuri/scikit-learn,cwu2011/scikit-learn,vinayak-mehta/scikit-learn,hlin117/scikit-learn,wazeerzulfikar/scikit-learn,yyjiang/scikit-learn,rohanp/scikit-learn,aewhatley/scikit-learn,mayblue9/scikit-learn,jjx02230808/project0223,AnasGhrab/scikit-learn,loli/sklearn-ensembletrees,walterreade/scikit-learn,AlexandreAbraham/scikit-learn,mblondel/scikit-learn,wanggang3333/scikit-learn,petosegan/scikit-learn,Windy-Ground/scikit-learn,aminert/scikit-learn,AIML/scikit-learn,ilo10/scikit-learn,shikhardb/scikit-learn,MartinDelzant/scikit-learn,harshaneelhg/scikit-learn,Obus/scikit-learn,q1ang/scikit-learn,f3r/scikit-learn,kjung/scikit-learn,rsivapr/scikit-learn,bthirion/scikit-learn,smartscheduling/scikit-learn-categorical-tree,florian-f/sklearn,thilbern/scikit-learn,tdhopper/scikit-learn,imaculate/scikit-learn,CforED/Machine-Learning,lenovor/scikit-learn,hrjn/scikit-learn,devanshdalal/scikit-learn,clemkoa/scikit-learn,scikit-learn/scikit-learn,rrohan/scikit-learn,ivannz/scikit-learn,shikhardb/scikit-learn,shenzebang/scikit-learn,depet/scikit-learn,sinhrks/scikit-learn,NelisVerhoef/scikit-learn,mehdidc/scikit-learn,xwolf12/scikit-learn,tomlof/scikit-learn,Jimmy-Morzaria/scikit-learn,mugizico/scikit-learn,ssaeger/scikit-learn,hsiaoyi0504/scikit-learn,NunoEdgarGub1/scikit-learn,ChanChiChoi/scikit-learn,fredhusser/scikit-learn,q1ang/scikit-learn,AlexanderFabisch/scikit-learn,elkingtonmcb/scikit-learn,jorge2703/scikit-learn,adamgreenhall/scikit-learn,jseabold/scikit-learn,shusenl/scikit-learn,bigdataelephants/scikit-learn,h2educ/scikit-learn,marcocaccin/scikit-learn,fbagirov/scikit-learn,bthirion/scikit-learn,alexsavio/scikit-learn,nmayorov/scikit-learn,alexsavio/scikit-learn,jayflo/scikit-learn,Aasmi/scikit-learn,phdowling/scikit-learn,akionakamura/scikit-learn,clemkoa/scikit-learn,thilbern/scikit-learn,rahul-c1/scikit-learn,jblackburne/scikit-learn,ivannz/scikit-learn,cauchycui/scikit-learn,mjgrav2001/scikit-learn,kmike/scikit-learn,deepesch/scikit-learn,walterreade/scikit-learn,treycausey/scikit-learn,altairpearl/scikit-learn,Aasmi/scikit-learn,lin-credible/scikit-learn,ningchi/scikit-learn,shyamalschandra/scikit-learn,abimannans/scikit-learn,RPGOne/scikit-learn,alexsavio/scikit-learn,deepesch/scikit-learn,AlexRobson/scikit-learn,jm-begon/scikit-learn,meduz/scikit-learn,nmayorov/scikit-learn,massmutual/scikit-learn,AnasGhrab/scikit-learn,rohanp/scikit-learn,bhargav/scikit-learn,AlexanderFabisch/scikit-learn,mhue/scikit-learn,ltiao/scikit-learn,jm-begon/scikit-learn,pypot/scikit-learn,liyu1990/sklearn,RomainBrault/scikit-learn,carrillo/scikit-learn,nomadcube/scikit-learn,shahankhatch/scikit-learn,frank-tancf/scikit-learn,huobaowangxi/scikit-learn,elkingtonmcb/scikit-learn,meduz/scikit-learn,anurag313/scikit-learn,xyguo/scikit-learn,abhishekkrthakur/scikit-learn,zhenv5/scikit-learn,ndingwall/scikit-learn,manhhomienbienthuy/scikit-learn,liangz0707/scikit-learn,glouppe/scikit-learn,thientu/scikit-learn,Garrett-R/scikit-learn,ashhher3/scikit-learn,aewhatley/scikit-learn,jlegendary/scikit-learn,rrohan/scikit-learn,pypot/scikit-learn,carrillo/scikit-learn,nelson-liu/scikit-learn,0asa/scikit-learn,hrjn/scikit-learn,mwv/scikit-learn,moutai/scikit-learn,ashhher3/scikit-learn,tawsifkhan/scikit-learn,procoder317/scikit-learn,h2educ/scikit-learn,alvarofierroclavero/scikit-learn,spallavolu/scikit-learn,lesteve/scikit-learn,Vimos/scikit-learn,mattgiguere/scikit-learn,jayflo/scikit-learn,fzalkow/scikit-learn,devanshdalal/scikit-learn,Jimmy-Morzaria/scikit-learn,kylerbrown/scikit-learn,vshtanko/scikit-learn,jorik041/scikit-learn,ClimbsRocks/scikit-learn,cdegroc/scikit-learn,mattgiguere/scikit-learn,thientu/scikit-learn,gotomypc/scikit-learn,kashif/scikit-learn,eg-zhang/scikit-learn,arabenjamin/scikit-learn,robbymeals/scikit-learn,aetilley/scikit-learn,JsNoNo/scikit-learn,lucidfrontier45/scikit-learn,yunfeilu/scikit-learn,mjudsp/Tsallis,sgenoud/scikit-learn,rahul-c1/scikit-learn,xwolf12/scikit-learn,treycausey/scikit-learn,quheng/scikit-learn,JsNoNo/scikit-learn,alvarofierroclavero/scikit-learn,cauchycui/scikit-learn,thientu/scikit-learn,aewhatley/scikit-learn,ndingwall/scikit-learn,pkruskal/scikit-learn,jmetzen/scikit-learn,Clyde-fare/scikit-learn,tawsifkhan/scikit-learn,mxjl620/scikit-learn,manashmndl/scikit-learn,yyjiang/scikit-learn,YinongLong/scikit-learn,treycausey/scikit-learn,r-mart/scikit-learn,mjudsp/Tsallis,fredhusser/scikit-learn,mojoboss/scikit-learn,equialgo/scikit-learn,vivekmishra1991/scikit-learn,arahuja/scikit-learn,0x0all/scikit-learn,bikong2/scikit-learn,depet/scikit-learn,jereze/scikit-learn,hsuantien/scikit-learn,zihua/scikit-learn,equialgo/scikit-learn,MechCoder/scikit-learn,zuku1985/scikit-learn,robin-lai/scikit-learn,xavierwu/scikit-learn,TomDLT/scikit-learn,russel1237/scikit-learn,massmutual/scikit-learn,nvoron23/scikit-learn,hugobowne/scikit-learn,mfjb/scikit-learn,RPGOne/scikit-learn,kaichogami/scikit-learn,TomDLT/scikit-learn,ngoix/OCRF,mrshu/scikit-learn,Barmaley-exe/scikit-learn,kashif/scikit-learn,xzh86/scikit-learn,murali-munna/scikit-learn,jjx02230808/project0223,sumspr/scikit-learn,andaag/scikit-learn,vinayak-mehta/scikit-learn,IndraVikas/scikit-learn,zorroblue/scikit-learn,alexsavio/scikit-learn,yonglehou/scikit-learn,ldirer/scikit-learn,phdowling/scikit-learn,MartinSavc/scikit-learn,jmschrei/scikit-learn,hugobowne/scikit-learn,potash/scikit-learn,adamgreenhall/scikit-learn,OshynSong/scikit-learn,shenzebang/scikit-learn,mxjl620/scikit-learn,fyffyt/scikit-learn,hsuantien/scikit-learn,hdmetor/scikit-learn,samzhang111/scikit-learn,ankurankan/scikit-learn,AlexanderFabisch/scikit-learn,PatrickChrist/scikit-learn,Barmaley-exe/scikit-learn,eg-zhang/scikit-learn,shangwuhencc/scikit-learn,bikong2/scikit-learn,arahuja/scikit-learn,idlead/scikit-learn,ZENGXH/scikit-learn,MartinDelzant/scikit-learn,jjx02230808/project0223,maheshakya/scikit-learn,maheshakya/scikit-learn,rsivapr/scikit-learn,vibhorag/scikit-learn,hlin117/scikit-learn,shahankhatch/scikit-learn,nhejazi/scikit-learn,imaculate/scikit-learn,krez13/scikit-learn,Djabbz/scikit-learn,Myasuka/scikit-learn,maheshakya/scikit-learn,ElDeveloper/scikit-learn,rishikksh20/scikit-learn,zorroblue/scikit-learn,akionakamura/scikit-learn,dsullivan7/scikit-learn,kmike/scikit-learn,YinongLong/scikit-learn,mlyundin/scikit-learn,ilyes14/scikit-learn,bhargav/scikit-learn,shangwuhencc/scikit-learn,glemaitre/scikit-learn,frank-tancf/scikit-learn,themrmax/scikit-learn,costypetrisor/scikit-learn,xubenben/scikit-learn,massmutual/scikit-learn,Adai0808/scikit-learn,q1ang/scikit-learn,0asa/scikit-learn,saiwing-yeung/scikit-learn,adamgreenhall/scikit-learn,RPGOne/scikit-learn,MechCoder/scikit-learn,jereze/scikit-learn,moutai/scikit-learn,roxyboy/scikit-learn,DSLituiev/scikit-learn,kagayakidan/scikit-learn,aflaxman/scikit-learn,walterreade/scikit-learn,costypetrisor/scikit-learn,Barmaley-exe/scikit-learn,jaidevd/scikit-learn,liangz0707/scikit-learn,xyguo/scikit-learn,imaculate/scikit-learn,AlexRobson/scikit-learn,larsmans/scikit-learn,simon-pepin/scikit-learn,arjoly/scikit-learn,vigilv/scikit-learn,russel1237/scikit-learn,procoder317/scikit-learn,JPFrancoia/scikit-learn,mhue/scikit-learn,vivekmishra1991/scikit-learn,kevin-intel/scikit-learn,jzt5132/scikit-learn,manashmndl/scikit-learn,jorik041/scikit-learn,h2educ/scikit-learn,poryfly/scikit-learn,wzbozon/scikit-learn,sarahgrogan/scikit-learn,bthirion/scikit-learn,amueller/scikit-learn,sumspr/scikit-learn,robin-lai/scikit-learn,fabianp/scikit-learn,sergeyf/scikit-learn,eickenberg/scikit-learn,ningchi/scikit-learn,HolgerPeters/scikit-learn,RayMick/scikit-learn,nesterione/scikit-learn,siutanwong/scikit-learn,Jimmy-Morzaria/scikit-learn,vshtanko/scikit-learn,aflaxman/scikit-learn,voxlol/scikit-learn,ashhher3/scikit-learn,sgenoud/scikit-learn,amueller/scikit-learn,0asa/scikit-learn,vibhorag/scikit-learn,petosegan/scikit-learn,BiaDarkia/scikit-learn,Vimos/scikit-learn,anirudhjayaraman/scikit-learn,wanggang3333/scikit-learn,rishikksh20/scikit-learn,zihua/scikit-learn,jseabold/scikit-learn,ankurankan/scikit-learn,dhruv13J/scikit-learn,sarahgrogan/scikit-learn,shusenl/scikit-learn,BiaDarkia/scikit-learn,poryfly/scikit-learn,mrshu/scikit-learn,AIML/scikit-learn,IndraVikas/scikit-learn,eickenberg/scikit-learn,ogrisel/scikit-learn,xzh86/scikit-learn,ChanderG/scikit-learn,ominux/scikit-learn,pypot/scikit-learn,evgchz/scikit-learn,saiwing-yeung/scikit-learn,jakirkham/scikit-learn,manashmndl/scikit-learn,vermouthmjl/scikit-learn,mrshu/scikit-learn,IshankGulati/scikit-learn,ominux/scikit-learn,ssaeger/scikit-learn,Garrett-R/scikit-learn,DSLituiev/scikit-learn,murali-munna/scikit-learn,IssamLaradji/scikit-learn,lazywei/scikit-learn,hainm/scikit-learn,huzq/scikit-learn,BiaDarkia/scikit-learn,Obus/scikit-learn,siutanwong/scikit-learn,vivekmishra1991/scikit-learn,russel1237/scikit-learn,tosolveit/scikit-learn,mwv/scikit-learn,loli/sklearn-ensembletrees,luo66/scikit-learn,pianomania/scikit-learn,trungnt13/scikit-learn,cwu2011/scikit-learn,massmutual/scikit-learn,eg-zhang/scikit-learn,vortex-ape/scikit-learn,henrykironde/scikit-learn,robin-lai/scikit-learn,cl4rke/scikit-learn,depet/scikit-learn,MartinSavc/scikit-learn,thilbern/scikit-learn,anirudhjayaraman/scikit-learn,Myasuka/scikit-learn,ltiao/scikit-learn,fabioticconi/scikit-learn,ycaihua/scikit-learn,depet/scikit-learn,TomDLT/scikit-learn,RachitKansal/scikit-learn,mhdella/scikit-learn,BiaDarkia/scikit-learn,manhhomienbienthuy/scikit-learn,ishanic/scikit-learn,abhishekgahlot/scikit-learn,rvraghav93/scikit-learn,pratapvardhan/scikit-learn,abimannans/scikit-learn,vybstat/scikit-learn,huobaowangxi/scikit-learn,arahuja/scikit-learn,trankmichael/scikit-learn,mblondel/scikit-learn,mblondel/scikit-learn,sanketloke/scikit-learn,AlexanderFabisch/scikit-learn,PatrickChrist/scikit-learn,spallavolu/scikit-learn,jaidevd/scikit-learn,ahoyosid/scikit-learn,belltailjp/scikit-learn,Adai0808/scikit-learn,RomainBrault/scikit-learn,iismd17/scikit-learn,siutanwong/scikit-learn,herilalaina/scikit-learn,altairpearl/scikit-learn,Barmaley-exe/scikit-learn,alexeyum/scikit-learn,ZenDevelopmentSystems/scikit-learn,tosolveit/scikit-learn,yyjiang/scikit-learn,yask123/scikit-learn,khkaminska/scikit-learn,etkirsch/scikit-learn,lin-credible/scikit-learn,Fireblend/scikit-learn,mblondel/scikit-learn,ky822/scikit-learn,theoryno3/scikit-learn,RPGOne/scikit-learn,vigilv/scikit-learn,tawsifkhan/scikit-learn,chrisburr/scikit-learn,zaxtax/scikit-learn,appapantula/scikit-learn,xuewei4d/scikit-learn,vinayak-mehta/scikit-learn,LohithBlaze/scikit-learn,lesteve/scikit-learn,ahoyosid/scikit-learn,mlyundin/scikit-learn,rsivapr/scikit-learn,akionakamura/scikit-learn,pratapvardhan/scikit-learn,frank-tancf/scikit-learn,olologin/scikit-learn,shusenl/scikit-learn,sanketloke/scikit-learn,xzh86/scikit-learn,billy-inn/scikit-learn,jzt5132/scikit-learn,kaichogami/scikit-learn,Sentient07/scikit-learn,jlegendary/scikit-learn,Adai0808/scikit-learn,rsivapr/scikit-learn,nvoron23/scikit-learn,glouppe/scikit-learn,hsiaoyi0504/scikit-learn,mwv/scikit-learn,TomDLT/scikit-learn,mrshu/scikit-learn,yask123/scikit-learn,yonglehou/scikit-learn,marcocaccin/scikit-learn,lenovor/scikit-learn,jkarnows/scikit-learn,carrillo/scikit-learn,cybernet14/scikit-learn,arabenjamin/scikit-learn,ndingwall/scikit-learn,jpautom/scikit-learn,hitszxp/scikit-learn,rajat1994/scikit-learn,wlamond/scikit-learn,billy-inn/scikit-learn,belltailjp/scikit-learn,CVML/scikit-learn,shyamalschandra/scikit-learn,gclenaghan/scikit-learn,fabianp/scikit-learn,etkirsch/scikit-learn,joshloyal/scikit-learn,ilo10/scikit-learn,nrhine1/scikit-learn,Clyde-fare/scikit-learn,smartscheduling/scikit-learn-categorical-tree,mlyundin/scikit-learn,amueller/scikit-learn,IssamLaradji/scikit-learn,q1ang/scikit-learn,vortex-ape/scikit-learn,appapantula/scikit-learn,olologin/scikit-learn,glemaitre/scikit-learn,bthirion/scikit-learn,xzh86/scikit-learn,glennq/scikit-learn,fabioticconi/scikit-learn,xuewei4d/scikit-learn,fengzhyuan/scikit-learn,giorgiop/scikit-learn,PatrickOReilly/scikit-learn,poryfly/scikit-learn,clemkoa/scikit-learn,ElDeveloper/scikit-learn,Srisai85/scikit-learn,PatrickOReilly/scikit-learn,JeanKossaifi/scikit-learn,yonglehou/scikit-learn,jorge2703/scikit-learn,abimannans/scikit-learn,nikitasingh981/scikit-learn,mehdidc/scikit-learn,samuel1208/scikit-learn,imaculate/scikit-learn,raghavrv/scikit-learn,mhdella/scikit-learn,CforED/Machine-Learning,icdishb/scikit-learn,fredhusser/scikit-learn,Djabbz/scikit-learn,nomadcube/scikit-learn,JosmanPS/scikit-learn,vibhorag/scikit-learn,IshankGulati/scikit-learn,Vimos/scikit-learn,CVML/scikit-learn,jakirkham/scikit-learn,jorik041/scikit-learn,lbishal/scikit-learn,anntzer/scikit-learn,mxjl620/scikit-learn,B3AU/waveTree,altairpearl/scikit-learn,manhhomienbienthuy/scikit-learn,hitszxp/scikit-learn,rexshihaoren/scikit-learn,arjoly/scikit-learn,JPFrancoia/scikit-learn,lin-credible/scikit-learn,JosmanPS/scikit-learn,victorbergelin/scikit-learn,abhishekgahlot/scikit-learn,xavierwu/scikit-learn,RayMick/scikit-learn,hsuantien/scikit-learn,cdegroc/scikit-learn,PatrickOReilly/scikit-learn,UNR-AERIAL/scikit-learn,ycaihua/scikit-learn,Windy-Ground/scikit-learn,Achuth17/scikit-learn,CforED/Machine-Learning,RachitKansal/scikit-learn,larsmans/scikit-learn,lucidfrontier45/scikit-learn,nikitasingh981/scikit-learn,ChanderG/scikit-learn,ogrisel/scikit-learn,treycausey/scikit-learn,huobaowangxi/scikit-learn,smartscheduling/scikit-learn-categorical-tree,trungnt13/scikit-learn,eickenberg/scikit-learn,cdegroc/scikit-learn,hsiaoyi0504/scikit-learn,dsquareindia/scikit-learn,MartinDelzant/scikit-learn,PatrickOReilly/scikit-learn,hlin117/scikit-learn,Lawrence-Liu/scikit-learn,carrillo/scikit-learn,krez13/scikit-learn,f3r/scikit-learn,maheshakya/scikit-learn,gotomypc/scikit-learn,ClimbsRocks/scikit-learn,Clyde-fare/scikit-learn,MohammedWasim/scikit-learn,ChanChiChoi/scikit-learn,hugobowne/scikit-learn,LohithBlaze/scikit-learn,abhishekgahlot/scikit-learn,jaidevd/scikit-learn,olologin/scikit-learn,rishikksh20/scikit-learn,mojoboss/scikit-learn,luo66/scikit-learn,glemaitre/scikit-learn,fbagirov/scikit-learn,PrashntS/scikit-learn,hrjn/scikit-learn,samzhang111/scikit-learn,fzalkow/scikit-learn,kaichogami/scikit-learn,wlamond/scikit-learn,herilalaina/scikit-learn,Titan-C/scikit-learn,wazeerzulfikar/scikit-learn,chrisburr/scikit-learn,idlead/scikit-learn,eickenberg/scikit-learn,vigilv/scikit-learn,evgchz/scikit-learn,devanshdalal/scikit-learn,vivekmishra1991/scikit-learn,RomainBrault/scikit-learn,pythonvietnam/scikit-learn,sumspr/scikit-learn,nesterione/scikit-learn,krez13/scikit-learn,schets/scikit-learn,HolgerPeters/scikit-learn,arjoly/scikit-learn,rrohan/scikit-learn,roxyboy/scikit-learn,waterponey/scikit-learn,jblackburne/scikit-learn,aewhatley/scikit-learn,untom/scikit-learn,samzhang111/scikit-learn,ZENGXH/scikit-learn,IshankGulati/scikit-learn,anurag313/scikit-learn,russel1237/scikit-learn,jakobworldpeace/scikit-learn,maheshakya/scikit-learn,ClimbsRocks/scikit-learn,appapantula/scikit-learn,vigilv/scikit-learn,DonBeo/scikit-learn,rahuldhote/scikit-learn,mattilyra/scikit-learn,evgchz/scikit-learn,vibhorag/scikit-learn,Myasuka/scikit-learn,macks22/scikit-learn,anirudhjayaraman/scikit-learn,3manuek/scikit-learn,ndingwall/scikit-learn,pompiduskus/scikit-learn,Clyde-fare/scikit-learn,djgagne/scikit-learn,bnaul/scikit-learn,mikebenfield/scikit-learn,deepesch/scikit-learn,Nyker510/scikit-learn,mjgrav2001/scikit-learn,btabibian/scikit-learn,vinayak-mehta/scikit-learn,jm-begon/scikit-learn,kjung/scikit-learn,beepee14/scikit-learn,depet/scikit-learn,themrmax/scikit-learn,Windy-Ground/scikit-learn,qifeigit/scikit-learn,sanketloke/scikit-learn,kashif/scikit-learn,MohammedWasim/scikit-learn,icdishb/scikit-learn,NelisVerhoef/scikit-learn,saiwing-yeung/scikit-learn,saiwing-yeung/scikit-learn,ZENGXH/scikit-learn,dsullivan7/scikit-learn,joernhees/scikit-learn,siutanwong/scikit-learn,vortex-ape/scikit-learn,bnaul/scikit-learn,davidgbe/scikit-learn,scikit-learn/scikit-learn,fredhusser/scikit-learn,rajat1994/scikit-learn,MatthieuBizien/scikit-learn,zihua/scikit-learn,qifeigit/scikit-learn,hsuantien/scikit-learn,ankurankan/scikit-learn,espg/scikit-learn,cainiaocome/scikit-learn,lbishal/scikit-learn,IshankGulati/scikit-learn,shahankhatch/scikit-learn,JsNoNo/scikit-learn,samuel1208/scikit-learn,hainm/scikit-learn,YinongLong/scikit-learn,bigdataelephants/scikit-learn,pkruskal/scikit-learn,nvoron23/scikit-learn,liyu1990/sklearn,ldirer/scikit-learn,IndraVikas/scikit-learn,mugizico/scikit-learn,lbishal/scikit-learn,mayblue9/scikit-learn,heli522/scikit-learn,ahoyosid/scikit-learn,wlamond/scikit-learn,ivannz/scikit-learn,toastedcornflakes/scikit-learn,lucidfrontier45/scikit-learn,sergeyf/scikit-learn,chrisburr/scikit-learn,ngoix/OCRF,plissonf/scikit-learn,sinhrks/scikit-learn,hlin117/scikit-learn,jmetzen/scikit-learn,marcocaccin/scikit-learn,lesteve/scikit-learn,shikhardb/scikit-learn,davidgbe/scikit-learn,dingocuster/scikit-learn,0asa/scikit-learn,cauchycui/scikit-learn,Srisai85/scikit-learn,meduz/scikit-learn,ldirer/scikit-learn,466152112/scikit-learn,elkingtonmcb/scikit-learn,nhejazi/scikit-learn,DonBeo/scikit-learn,zorojean/scikit-learn,andaag/scikit-learn,herilalaina/scikit-learn,wazeerzulfikar/scikit-learn,costypetrisor/scikit-learn,ChanderG/scikit-learn,raghavrv/scikit-learn,pratapvardhan/scikit-learn,untom/scikit-learn,michigraber/scikit-learn,OshynSong/scikit-learn,MohammedWasim/scikit-learn,procoder317/scikit-learn,mattgiguere/scikit-learn,michigraber/scikit-learn,vshtanko/scikit-learn,thientu/scikit-learn,tawsifkhan/scikit-learn,ssaeger/scikit-learn,rvraghav93/scikit-learn,huobaowangxi/scikit-learn,andrewnc/scikit-learn,kylerbrown/scikit-learn,tomlof/scikit-learn,djgagne/scikit-learn,arahuja/scikit-learn,florian-f/sklearn,arjoly/scikit-learn,sgenoud/scikit-learn,ankurankan/scikit-learn,pianomania/scikit-learn,clemkoa/scikit-learn,ltiao/scikit-learn,macks22/scikit-learn,anntzer/scikit-learn,kevin-intel/scikit-learn,mhdella/scikit-learn,fabianp/scikit-learn,appapantula/scikit-learn,kagayakidan/scikit-learn,sarahgrogan/scikit-learn,RayMick/scikit-learn,dsullivan7/scikit-learn,betatim/scikit-learn,RachitKansal/scikit-learn,Lawrence-Liu/scikit-learn,pkruskal/scikit-learn,aminert/scikit-learn,giorgiop/scikit-learn,YinongLong/scikit-learn,dsullivan7/scikit-learn,djgagne/scikit-learn,xavierwu/scikit-learn,joshloyal/scikit-learn,schets/scikit-learn,quheng/scikit-learn,mehdidc/scikit-learn,anurag313/scikit-learn,phdowling/scikit-learn,terkkila/scikit-learn,trankmichael/scikit-learn,B3AU/waveTree,jkarnows/scikit-learn,mattilyra/scikit-learn,Titan-C/scikit-learn,MartinSavc/scikit-learn,PatrickChrist/scikit-learn,abhishekkrthakur/scikit-learn,etkirsch/scikit-learn,gclenaghan/scikit-learn,mjgrav2001/scikit-learn,ElDeveloper/scikit-learn,khkaminska/scikit-learn,JPFrancoia/scikit-learn,466152112/scikit-learn,mugizico/scikit-learn,justincassidy/scikit-learn,stylianos-kampakis/scikit-learn,eickenberg/scikit-learn,voxlol/scikit-learn,trungnt13/scikit-learn,h2educ/scikit-learn,abhishekkrthakur/scikit-learn,gotomypc/scikit-learn,ngoix/OCRF,giorgiop/scikit-learn,jkarnows/scikit-learn,scikit-learn/scikit-learn,jereze/scikit-learn,tdhopper/scikit-learn,jseabold/scikit-learn,cybernet14/scikit-learn,CVML/scikit-learn,mfjb/scikit-learn,sgenoud/scikit-learn,dingocuster/scikit-learn,ky822/scikit-learn,lucidfrontier45/scikit-learn,rahuldhote/scikit-learn,Sentient07/scikit-learn,Akshay0724/scikit-learn,chrsrds/scikit-learn,fengzhyuan/scikit-learn,xubenben/scikit-learn,RachitKansal/scikit-learn,jayflo/scikit-learn,JeanKossaifi/scikit-learn,arabenjamin/scikit-learn,xwolf12/scikit-learn,cainiaocome/scikit-learn,IssamLaradji/scikit-learn,fengzhyuan/scikit-learn,aetilley/scikit-learn,bikong2/scikit-learn,hugobowne/scikit-learn,nvoron23/scikit-learn,justincassidy/scikit-learn,AnasGhrab/scikit-learn,ky822/scikit-learn,waterponey/scikit-learn,arabenjamin/scikit-learn,procoder317/scikit-learn,voxlol/scikit-learn,dsquareindia/scikit-learn,rajat1994/scikit-learn,gclenaghan/scikit-learn,murali-munna/scikit-learn,lazywei/scikit-learn,Aasmi/scikit-learn,mojoboss/scikit-learn,toastedcornflakes/scikit-learn,mhue/scikit-learn,abhishekkrthakur/scikit-learn,henridwyer/scikit-learn,pompiduskus/scikit-learn,mhue/scikit-learn,UNR-AERIAL/scikit-learn,mjgrav2001/scikit-learn,theoryno3/scikit-learn,kjung/scikit-learn,henrykironde/scikit-learn,tmhm/scikit-learn,Nyker510/scikit-learn,shenzebang/scikit-learn,nelson-liu/scikit-learn,ZenDevelopmentSystems/scikit-learn,luo66/scikit-learn,moutai/scikit-learn,victorbergelin/scikit-learn,pv/scikit-learn,aflaxman/scikit-learn,glennq/scikit-learn,plissonf/scikit-learn,AlexRobson/scikit-learn,mikebenfield/scikit-learn,nrhine1/scikit-learn,betatim/scikit-learn,Akshay0724/scikit-learn,nelson-liu/scikit-learn,harshaneelhg/scikit-learn,jakobworldpeace/scikit-learn,dingocuster/scikit-learn,ChanChiChoi/scikit-learn,Akshay0724/scikit-learn,jlegendary/scikit-learn,MartinSavc/scikit-learn,kmike/scikit-learn,yanlend/scikit-learn,ZENGXH/scikit-learn,ngoix/OCRF,nelson-liu/scikit-learn,mwv/scikit-learn,potash/scikit-learn,luo66/scikit-learn,fbagirov/scikit-learn,justincassidy/scikit-learn,pv/scikit-learn,mattgiguere/scikit-learn,manashmndl/scikit-learn,rahuldhote/scikit-learn,loli/sklearn-ensembletrees,JPFrancoia/scikit-learn,vermouthmjl/scikit-learn,DSLituiev/scikit-learn,IndraVikas/scikit-learn,ngoix/OCRF,marcocaccin/scikit-learn,joernhees/scikit-learn,yunfeilu/scikit-learn,rvraghav93/scikit-learn,jorge2703/scikit-learn,xiaoxiamii/scikit-learn,wzbozon/scikit-learn,mrshu/scikit-learn,pv/scikit-learn,etkirsch/scikit-learn,abhishekgahlot/scikit-learn,hainm/scikit-learn,vybstat/scikit-learn,zihua/scikit-learn,schets/scikit-learn,madjelan/scikit-learn,hsiaoyi0504/scikit-learn,victorbergelin/scikit-learn,ClimbsRocks/scikit-learn,ominux/scikit-learn,henridwyer/scikit-learn,MatthieuBizien/scikit-learn,belltailjp/scikit-learn,cainiaocome/scikit-learn,khkaminska/scikit-learn,hrjn/scikit-learn,AnasGhrab/scikit-learn,rahuldhote/scikit-learn,Fireblend/scikit-learn,plissonf/scikit-learn,anntzer/scikit-learn,stylianos-kampakis/scikit-learn,fyffyt/scikit-learn,zorojean/scikit-learn,lenovor/scikit-learn,schets/scikit-learn,ankurankan/scikit-learn,Nyker510/scikit-learn,xyguo/scikit-learn,wanggang3333/scikit-learn,r-mart/scikit-learn,ky822/scikit-learn,mikebenfield/scikit-learn,sinhrks/scikit-learn,scikit-learn/scikit-learn,madjelan/scikit-learn,mayblue9/scikit-learn,pkruskal/scikit-learn,fbagirov/scikit-learn,PrashntS/scikit-learn,olologin/scikit-learn,ltiao/scikit-learn,CforED/Machine-Learning,trankmichael/scikit-learn,JosmanPS/scikit-learn,alexeyum/scikit-learn,pompiduskus/scikit-learn,ZenDevelopmentSystems/scikit-learn,kevin-intel/scikit-learn,Sentient07/scikit-learn,heli522/scikit-learn,huzq/scikit-learn,aabadie/scikit-learn,xavierwu/scikit-learn,nikitasingh981/scikit-learn,PrashntS/scikit-learn,jseabold/scikit-learn,jpautom/scikit-learn,waterponey/scikit-learn,yanlend/scikit-learn,RayMick/scikit-learn,davidgbe/scikit-learn,pratapvardhan/scikit-learn,stylianos-kampakis/scikit-learn,idlead/scikit-learn,untom/scikit-learn,r-mart/scikit-learn,icdishb/scikit-learn,fyffyt/scikit-learn,mhdella/scikit-learn,rexshihaoren/scikit-learn,ycaihua/scikit-learn,cauchycui/scikit-learn,f3r/scikit-learn,anurag313/scikit-learn,jakobworldpeace/scikit-learn,lenovor/scikit-learn,DonBeo/scikit-learn,jkarnows/scikit-learn,yask123/scikit-learn,dhruv13J/scikit-learn,djgagne/scikit-learn,loli/semisupervisedforests,HolgerPeters/scikit-learn,petosegan/scikit-learn,JeanKossaifi/scikit-learn,plissonf/scikit-learn,yonglehou/scikit-learn,ChanderG/scikit-learn,samuel1208/scikit-learn,akionakamura/scikit-learn,alvarofierroclavero/scikit-learn,sonnyhu/scikit-learn,mattilyra/scikit-learn,dsquareindia/scikit-learn,jlegendary/scikit-learn,macks22/scikit-learn,btabibian/scikit-learn,joernhees/scikit-learn,potash/scikit-learn,loli/semisupervisedforests,wzbozon/scikit-learn,madjelan/scikit-learn,pv/scikit-learn,rsivapr/scikit-learn,rohanp/scikit-learn,Lawrence-Liu/scikit-learn,nesterione/scikit-learn,kylerbrown/scikit-learn,Titan-C/scikit-learn,trungnt13/scikit-learn,AlexRobson/scikit-learn,vybstat/scikit-learn,rahul-c1/scikit-learn,zhenv5/scikit-learn,kmike/scikit-learn,HolgerPeters/scikit-learn,Garrett-R/scikit-learn,icdishb/scikit-learn,kevin-intel/scikit-learn,pythonvietnam/scikit-learn,fzalkow/scikit-learn,jmetzen/scikit-learn,elkingtonmcb/scikit-learn,bnaul/scikit-learn,chrsrds/scikit-learn,phdowling/scikit-learn,andrewnc/scikit-learn,altairpearl/scikit-learn,zaxtax/scikit-learn,ilo10/scikit-learn,yanlend/scikit-learn,tomlof/scikit-learn,iismd17/scikit-learn,ephes/scikit-learn,lazywei/scikit-learn,justincassidy/scikit-learn,vermouthmjl/scikit-learn,jzt5132/scikit-learn,mehdidc/scikit-learn,Djabbz/scikit-learn,costypetrisor/scikit-learn,betatim/scikit-learn,zaxtax/scikit-learn,spallavolu/scikit-learn,aabadie/scikit-learn,iismd17/scikit-learn,Obus/scikit-learn,mlyundin/scikit-learn,ephes/scikit-learn,LiaoPan/scikit-learn,Achuth17/scikit-learn,jblackburne/scikit-learn,ldirer/scikit-learn,pnedunuri/scikit-learn,LiaoPan/scikit-learn,terkkila/scikit-learn,Garrett-R/scikit-learn,xubenben/scikit-learn,Jimmy-Morzaria/scikit-learn,tomlof/scikit-learn,michigraber/scikit-learn,xiaoxiamii/scikit-learn,roxyboy/scikit-learn,sonnyhu/scikit-learn,shikhardb/scikit-learn,kaichogami/scikit-learn,larsmans/scikit-learn,waterponey/scikit-learn,espg/scikit-learn,murali-munna/scikit-learn,PrashntS/scikit-learn,shangwuhencc/scikit-learn,ycaihua/scikit-learn,andrewnc/scikit-learn,heli522/scikit-learn,hdmetor/scikit-learn,tosolveit/scikit-learn,Fireblend/scikit-learn,jakobworldpeace/scikit-learn,ssaeger/scikit-learn,nhejazi/scikit-learn,tosolveit/scikit-learn,btabibian/scikit-learn,evgchz/scikit-learn,rajat1994/scikit-learn,petosegan/scikit-learn,stylianos-kampakis/scikit-learn,466152112/scikit-learn,manhhomienbienthuy/scikit-learn,NelisVerhoef/scikit-learn,JeanKossaifi/scikit-learn,shangwuhencc/scikit-learn,ZenDevelopmentSystems/scikit-learn,jjx02230808/project0223,raghavrv/scikit-learn,robbymeals/scikit-learn,alvarofierroclavero/scikit-learn,cwu2011/scikit-learn,yask123/scikit-learn,trankmichael/scikit-learn,Myasuka/scikit-learn,ogrisel/scikit-learn,Adai0808/scikit-learn,jaidevd/scikit-learn,B3AU/waveTree,jmetzen/scikit-learn,glennq/scikit-learn,glouppe/scikit-learn,jakirkham/scikit-learn,Nyker510/scikit-learn,yyjiang/scikit-learn,cainiaocome/scikit-learn,florian-f/sklearn,nmayorov/scikit-learn,samuel1208/scikit-learn,tmhm/scikit-learn,liberatorqjw/scikit-learn,potash/scikit-learn,simon-pepin/scikit-learn,zhenv5/scikit-learn,wanggang3333/scikit-learn,zuku1985/scikit-learn,sanketloke/scikit-learn,pypot/scikit-learn,liangz0707/scikit-learn,liangz0707/scikit-learn,zorroblue/scikit-learn,devanshdalal/scikit-learn,ilyes14/scikit-learn,xiaoxiamii/scikit-learn,mfjb/scikit-learn,dhruv13J/scikit-learn,toastedcornflakes/scikit-learn,ahoyosid/scikit-learn,espg/scikit-learn,dsquareindia/scikit-learn,bhargav/scikit-learn,Srisai85/scikit-learn,ishanic/scikit-learn,terkkila/scikit-learn,huzq/scikit-learn,aabadie/scikit-learn,glennq/scikit-learn,meduz/scikit-learn,3manuek/scikit-learn,xubenben/scikit-learn,OshynSong/scikit-learn,treycausey/scikit-learn,rexshihaoren/scikit-learn,toastedcornflakes/scikit-learn,cdegroc/scikit-learn,frank-tancf/scikit-learn,0x0all/scikit-learn,JsNoNo/scikit-learn,Srisai85/scikit-learn,lazywei/scikit-learn,Windy-Ground/scikit-learn,jm-begon/scikit-learn,nrhine1/scikit-learn,ivannz/scikit-learn,untom/scikit-learn,raghavrv/scikit-learn,cl4rke/scikit-learn,chrisburr/scikit-learn,ilyes14/scikit-learn,andaag/scikit-learn,mxjl620/scikit-learn,shyamalschandra/scikit-learn,idlead/scikit-learn,andrewnc/scikit-learn,fyffyt/scikit-learn,joshloyal/scikit-learn,MechCoder/scikit-learn,DonBeo/scikit-learn,xuewei4d/scikit-learn,henridwyer/scikit-learn,sgenoud/scikit-learn,Garrett-R/scikit-learn,andaag/scikit-learn,jereze/scikit-learn,walterreade/scikit-learn,pianomania/scikit-learn,jakirkham/scikit-learn,yunfeilu/scikit-learn,themrmax/scikit-learn,3manuek/scikit-learn,nhejazi/scikit-learn,yunfeilu/scikit-learn,Sentient07/scikit-learn,smartscheduling/scikit-learn-categorical-tree,loli/sklearn-ensembletrees,aminert/scikit-learn,quheng/scikit-learn,rishikksh20/scikit-learn,shyamalschandra/scikit-learn,bikong2/scikit-learn,UNR-AERIAL/scikit-learn,simon-pepin/scikit-learn,MohammedWasim/scikit-learn,zuku1985/scikit-learn,B3AU/waveTree,RomainBrault/scikit-learn,wlamond/scikit-learn,iismd17/scikit-learn,glouppe/scikit-learn,AlexandreAbraham/scikit-learn,mjudsp/Tsallis,davidgbe/scikit-learn,hdmetor/scikit-learn,joshloyal/scikit-learn,ashhher3/scikit-learn,ilo10/scikit-learn,AIML/scikit-learn,sarahgrogan/scikit-learn,UNR-AERIAL/scikit-learn,cl4rke/scikit-learn,billy-inn/scikit-learn,IssamLaradji/scikit-learn,btabibian/scikit-learn,chrsrds/scikit-learn,f3r/scikit-learn,B3AU/waveTree,rohanp/scikit-learn,victorbergelin/scikit-learn,LohithBlaze/scikit-learn,ChanChiChoi/scikit-learn,shahankhatch/scikit-learn,dhruv13J/scikit-learn,yanlend/scikit-learn,pnedunuri/scikit-learn,r-mart/scikit-learn,tdhopper/scikit-learn,harshaneelhg/scikit-learn,poryfly/scikit-learn,xuewei4d/scikit-learn,NunoEdgarGub1/scikit-learn,zorojean/scikit-learn,kylerbrown/scikit-learn,ningchi/scikit-learn,rvraghav93/scikit-learn,nrhine1/scikit-learn,sonnyhu/scikit-learn,OshynSong/scikit-learn,nomadcube/scikit-learn,chrsrds/scikit-learn,fengzhyuan/scikit-learn,cwu2011/scikit-learn,jzt5132/scikit-learn,Akshay0724/scikit-learn,Achuth17/scikit-learn,jmschrei/scikit-learn,hitszxp/scikit-learn
|
BUG: Make sure that joblib does get installed.
|
# -*- coding: utf-8 -*-
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('externals',parent_package,top_path)
config.add_subpackage('joblib')
config.add_subpackage('joblib/test')
return config
|
<commit_before><commit_msg>BUG: Make sure that joblib does get installed.<commit_after>
|
# -*- coding: utf-8 -*-
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('externals',parent_package,top_path)
config.add_subpackage('joblib')
config.add_subpackage('joblib/test')
return config
|
BUG: Make sure that joblib does get installed.# -*- coding: utf-8 -*-
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('externals',parent_package,top_path)
config.add_subpackage('joblib')
config.add_subpackage('joblib/test')
return config
|
<commit_before><commit_msg>BUG: Make sure that joblib does get installed.<commit_after># -*- coding: utf-8 -*-
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('externals',parent_package,top_path)
config.add_subpackage('joblib')
config.add_subpackage('joblib/test')
return config
|
|
ab242c4d4fe2aa3bc7ce5a44eb7bd1ffcc5a922e
|
bin/2000/crosswalk_msa_block.py
|
bin/2000/crosswalk_msa_block.py
|
"""crosswalk_msa_block.py
Extract the crosswalk between 2000 msa and blocks
"""
import os
import csv
#
# Import data
#
## MSA to counties crosswalk
# county_to_msa = {county: {msa: [cousub ids]}
county_to_msa = {}
with open('data/2000/crosswalks/msa_county.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
county = rows[1]
msa = rows[0]
county_to_msa[county] = msa
## Import list of states
states = []
with open('data/state_numbers.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
states.append(rows[0])
## Import all blockgroup ids
blocks = []
for st in states:
path = 'data/2000/shp/states/%s/blocks.shp'%st
with fiona.open(path, 'r', 'ESRI Shapefile') as f:
blocks.append(f['properties']['BLKIDFP00'])
#
# Group by MSA
#
msa_group = {}
for b in groups:
county = b[:5]
if county in county_to_msa:
msa = county_to_msa[county]
if msa not in msa_group:
msa_group[msa] = []
msa_group[msa].append(b)
#
# Save the crosswalk
#
with open('data/2000/crosswalks/msa_block.csv', 'w') as output:
output.write('MSA FIP\tBLOCK FIP\n')
for msa in msa_block:
## Remove duplicates
bs = list(set(msa_block[msa]))
for b in bs:
output.write('%s\t%s\n'%(msa, b))
|
Add script to extract the crosswalk between msas and blocks
|
Add script to extract the crosswalk between msas and blocks
|
Python
|
bsd-2-clause
|
scities/2000-us-metro-atlas
|
Add script to extract the crosswalk between msas and blocks
|
"""crosswalk_msa_block.py
Extract the crosswalk between 2000 msa and blocks
"""
import os
import csv
#
# Import data
#
## MSA to counties crosswalk
# county_to_msa = {county: {msa: [cousub ids]}
county_to_msa = {}
with open('data/2000/crosswalks/msa_county.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
county = rows[1]
msa = rows[0]
county_to_msa[county] = msa
## Import list of states
states = []
with open('data/state_numbers.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
states.append(rows[0])
## Import all blockgroup ids
blocks = []
for st in states:
path = 'data/2000/shp/states/%s/blocks.shp'%st
with fiona.open(path, 'r', 'ESRI Shapefile') as f:
blocks.append(f['properties']['BLKIDFP00'])
#
# Group by MSA
#
msa_group = {}
for b in groups:
county = b[:5]
if county in county_to_msa:
msa = county_to_msa[county]
if msa not in msa_group:
msa_group[msa] = []
msa_group[msa].append(b)
#
# Save the crosswalk
#
with open('data/2000/crosswalks/msa_block.csv', 'w') as output:
output.write('MSA FIP\tBLOCK FIP\n')
for msa in msa_block:
## Remove duplicates
bs = list(set(msa_block[msa]))
for b in bs:
output.write('%s\t%s\n'%(msa, b))
|
<commit_before><commit_msg>Add script to extract the crosswalk between msas and blocks<commit_after>
|
"""crosswalk_msa_block.py
Extract the crosswalk between 2000 msa and blocks
"""
import os
import csv
#
# Import data
#
## MSA to counties crosswalk
# county_to_msa = {county: {msa: [cousub ids]}
county_to_msa = {}
with open('data/2000/crosswalks/msa_county.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
county = rows[1]
msa = rows[0]
county_to_msa[county] = msa
## Import list of states
states = []
with open('data/state_numbers.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
states.append(rows[0])
## Import all blockgroup ids
blocks = []
for st in states:
path = 'data/2000/shp/states/%s/blocks.shp'%st
with fiona.open(path, 'r', 'ESRI Shapefile') as f:
blocks.append(f['properties']['BLKIDFP00'])
#
# Group by MSA
#
msa_group = {}
for b in groups:
county = b[:5]
if county in county_to_msa:
msa = county_to_msa[county]
if msa not in msa_group:
msa_group[msa] = []
msa_group[msa].append(b)
#
# Save the crosswalk
#
with open('data/2000/crosswalks/msa_block.csv', 'w') as output:
output.write('MSA FIP\tBLOCK FIP\n')
for msa in msa_block:
## Remove duplicates
bs = list(set(msa_block[msa]))
for b in bs:
output.write('%s\t%s\n'%(msa, b))
|
Add script to extract the crosswalk between msas and blocks"""crosswalk_msa_block.py
Extract the crosswalk between 2000 msa and blocks
"""
import os
import csv
#
# Import data
#
## MSA to counties crosswalk
# county_to_msa = {county: {msa: [cousub ids]}
county_to_msa = {}
with open('data/2000/crosswalks/msa_county.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
county = rows[1]
msa = rows[0]
county_to_msa[county] = msa
## Import list of states
states = []
with open('data/state_numbers.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
states.append(rows[0])
## Import all blockgroup ids
blocks = []
for st in states:
path = 'data/2000/shp/states/%s/blocks.shp'%st
with fiona.open(path, 'r', 'ESRI Shapefile') as f:
blocks.append(f['properties']['BLKIDFP00'])
#
# Group by MSA
#
msa_group = {}
for b in groups:
county = b[:5]
if county in county_to_msa:
msa = county_to_msa[county]
if msa not in msa_group:
msa_group[msa] = []
msa_group[msa].append(b)
#
# Save the crosswalk
#
with open('data/2000/crosswalks/msa_block.csv', 'w') as output:
output.write('MSA FIP\tBLOCK FIP\n')
for msa in msa_block:
## Remove duplicates
bs = list(set(msa_block[msa]))
for b in bs:
output.write('%s\t%s\n'%(msa, b))
|
<commit_before><commit_msg>Add script to extract the crosswalk between msas and blocks<commit_after>"""crosswalk_msa_block.py
Extract the crosswalk between 2000 msa and blocks
"""
import os
import csv
#
# Import data
#
## MSA to counties crosswalk
# county_to_msa = {county: {msa: [cousub ids]}
county_to_msa = {}
with open('data/2000/crosswalks/msa_county.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
county = rows[1]
msa = rows[0]
county_to_msa[county] = msa
## Import list of states
states = []
with open('data/state_numbers.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
states.append(rows[0])
## Import all blockgroup ids
blocks = []
for st in states:
path = 'data/2000/shp/states/%s/blocks.shp'%st
with fiona.open(path, 'r', 'ESRI Shapefile') as f:
blocks.append(f['properties']['BLKIDFP00'])
#
# Group by MSA
#
msa_group = {}
for b in groups:
county = b[:5]
if county in county_to_msa:
msa = county_to_msa[county]
if msa not in msa_group:
msa_group[msa] = []
msa_group[msa].append(b)
#
# Save the crosswalk
#
with open('data/2000/crosswalks/msa_block.csv', 'w') as output:
output.write('MSA FIP\tBLOCK FIP\n')
for msa in msa_block:
## Remove duplicates
bs = list(set(msa_block[msa]))
for b in bs:
output.write('%s\t%s\n'%(msa, b))
|
|
4b531ec853036b18f3014d310803f81e4fa511b8
|
openedx/tests/xblock_integration/test_external_xblocks.py
|
openedx/tests/xblock_integration/test_external_xblocks.py
|
"""
This will run tests on all XBlocks in the `xblock.test.v0`
entrypoint. Did you notice something about that entry point? It ends
with a v0. That means this is not finished. At some point, we might
stop running v0 tests, replacing them with test case failures, and
run v1 tests only.
That be the dragon here.
"""
import pkg_resources
class DuplicateXBlockTest(Exception):
'''
This exception is shown if there are multiple entry points with the same
class name for a test. In most cases, this means you have two versions
of the same XBlock installed, or two XBlocks with namespace collisions. In
either case, it'd be nice to resolve (likely by renaming tests as they
come in, hopefully still being careful to catch collisions which might
effect deployed XBlocks. See discussion at:
https://github.com/edx/edx-platform/pull/11032#discussion_r48097392).
'''
pass
class InvalidTestName(Exception):
'''
This means you have an entry point for a test that does not correspond
to a properly named test class. For example, if you cut-and-paste entry
points in `setup.py`, and forgot to repoint the class (so it points to
`DoneXBlock` instead of `TestDone`), or otherwise made an error, you
will see this exception.
'''
pass
xblock_loaded = False # pylint: disable=invalid-name
for entrypoint in pkg_resources.iter_entry_points(group="xblock.test.v0"): # pylint: disable=no-member
plugin = entrypoint.load()
classname = plugin.__name__
if classname in globals():
raise DuplicateXBlockTest(classname)
if not classname.startswith("Test"):
raise InvalidTestName("Test class should start with 'Test': " + classname)
# This should never happen, but while we're testing for class name
# validity, we figured it was okay to be a little overly defensive.
# See discussion at:
# https://github.com/edx/edx-platform/pull/11032#discussion_r48097392
if not classname.replace("_", "").isalnum():
raise InvalidTestName("Python variables should be letters, numbers, and underscores: " + classname)
globals()[classname] = plugin
print "Loading XBlock test: " + classname
xblock_loaded = True
|
Allow us to run tests from external XBlock repositories
|
Allow us to run tests from external XBlock repositories
|
Python
|
agpl-3.0
|
zhenzhai/edx-platform,zhenzhai/edx-platform,zhenzhai/edx-platform,zhenzhai/edx-platform,zhenzhai/edx-platform
|
Allow us to run tests from external XBlock repositories
|
"""
This will run tests on all XBlocks in the `xblock.test.v0`
entrypoint. Did you notice something about that entry point? It ends
with a v0. That means this is not finished. At some point, we might
stop running v0 tests, replacing them with test case failures, and
run v1 tests only.
That be the dragon here.
"""
import pkg_resources
class DuplicateXBlockTest(Exception):
'''
This exception is shown if there are multiple entry points with the same
class name for a test. In most cases, this means you have two versions
of the same XBlock installed, or two XBlocks with namespace collisions. In
either case, it'd be nice to resolve (likely by renaming tests as they
come in, hopefully still being careful to catch collisions which might
effect deployed XBlocks. See discussion at:
https://github.com/edx/edx-platform/pull/11032#discussion_r48097392).
'''
pass
class InvalidTestName(Exception):
'''
This means you have an entry point for a test that does not correspond
to a properly named test class. For example, if you cut-and-paste entry
points in `setup.py`, and forgot to repoint the class (so it points to
`DoneXBlock` instead of `TestDone`), or otherwise made an error, you
will see this exception.
'''
pass
xblock_loaded = False # pylint: disable=invalid-name
for entrypoint in pkg_resources.iter_entry_points(group="xblock.test.v0"): # pylint: disable=no-member
plugin = entrypoint.load()
classname = plugin.__name__
if classname in globals():
raise DuplicateXBlockTest(classname)
if not classname.startswith("Test"):
raise InvalidTestName("Test class should start with 'Test': " + classname)
# This should never happen, but while we're testing for class name
# validity, we figured it was okay to be a little overly defensive.
# See discussion at:
# https://github.com/edx/edx-platform/pull/11032#discussion_r48097392
if not classname.replace("_", "").isalnum():
raise InvalidTestName("Python variables should be letters, numbers, and underscores: " + classname)
globals()[classname] = plugin
print "Loading XBlock test: " + classname
xblock_loaded = True
|
<commit_before><commit_msg>Allow us to run tests from external XBlock repositories<commit_after>
|
"""
This will run tests on all XBlocks in the `xblock.test.v0`
entrypoint. Did you notice something about that entry point? It ends
with a v0. That means this is not finished. At some point, we might
stop running v0 tests, replacing them with test case failures, and
run v1 tests only.
That be the dragon here.
"""
import pkg_resources
class DuplicateXBlockTest(Exception):
'''
This exception is shown if there are multiple entry points with the same
class name for a test. In most cases, this means you have two versions
of the same XBlock installed, or two XBlocks with namespace collisions. In
either case, it'd be nice to resolve (likely by renaming tests as they
come in, hopefully still being careful to catch collisions which might
effect deployed XBlocks. See discussion at:
https://github.com/edx/edx-platform/pull/11032#discussion_r48097392).
'''
pass
class InvalidTestName(Exception):
'''
This means you have an entry point for a test that does not correspond
to a properly named test class. For example, if you cut-and-paste entry
points in `setup.py`, and forgot to repoint the class (so it points to
`DoneXBlock` instead of `TestDone`), or otherwise made an error, you
will see this exception.
'''
pass
xblock_loaded = False # pylint: disable=invalid-name
for entrypoint in pkg_resources.iter_entry_points(group="xblock.test.v0"): # pylint: disable=no-member
plugin = entrypoint.load()
classname = plugin.__name__
if classname in globals():
raise DuplicateXBlockTest(classname)
if not classname.startswith("Test"):
raise InvalidTestName("Test class should start with 'Test': " + classname)
# This should never happen, but while we're testing for class name
# validity, we figured it was okay to be a little overly defensive.
# See discussion at:
# https://github.com/edx/edx-platform/pull/11032#discussion_r48097392
if not classname.replace("_", "").isalnum():
raise InvalidTestName("Python variables should be letters, numbers, and underscores: " + classname)
globals()[classname] = plugin
print "Loading XBlock test: " + classname
xblock_loaded = True
|
Allow us to run tests from external XBlock repositories"""
This will run tests on all XBlocks in the `xblock.test.v0`
entrypoint. Did you notice something about that entry point? It ends
with a v0. That means this is not finished. At some point, we might
stop running v0 tests, replacing them with test case failures, and
run v1 tests only.
That be the dragon here.
"""
import pkg_resources
class DuplicateXBlockTest(Exception):
'''
This exception is shown if there are multiple entry points with the same
class name for a test. In most cases, this means you have two versions
of the same XBlock installed, or two XBlocks with namespace collisions. In
either case, it'd be nice to resolve (likely by renaming tests as they
come in, hopefully still being careful to catch collisions which might
effect deployed XBlocks. See discussion at:
https://github.com/edx/edx-platform/pull/11032#discussion_r48097392).
'''
pass
class InvalidTestName(Exception):
'''
This means you have an entry point for a test that does not correspond
to a properly named test class. For example, if you cut-and-paste entry
points in `setup.py`, and forgot to repoint the class (so it points to
`DoneXBlock` instead of `TestDone`), or otherwise made an error, you
will see this exception.
'''
pass
xblock_loaded = False # pylint: disable=invalid-name
for entrypoint in pkg_resources.iter_entry_points(group="xblock.test.v0"): # pylint: disable=no-member
plugin = entrypoint.load()
classname = plugin.__name__
if classname in globals():
raise DuplicateXBlockTest(classname)
if not classname.startswith("Test"):
raise InvalidTestName("Test class should start with 'Test': " + classname)
# This should never happen, but while we're testing for class name
# validity, we figured it was okay to be a little overly defensive.
# See discussion at:
# https://github.com/edx/edx-platform/pull/11032#discussion_r48097392
if not classname.replace("_", "").isalnum():
raise InvalidTestName("Python variables should be letters, numbers, and underscores: " + classname)
globals()[classname] = plugin
print "Loading XBlock test: " + classname
xblock_loaded = True
|
<commit_before><commit_msg>Allow us to run tests from external XBlock repositories<commit_after>"""
This will run tests on all XBlocks in the `xblock.test.v0`
entrypoint. Did you notice something about that entry point? It ends
with a v0. That means this is not finished. At some point, we might
stop running v0 tests, replacing them with test case failures, and
run v1 tests only.
That be the dragon here.
"""
import pkg_resources
class DuplicateXBlockTest(Exception):
'''
This exception is shown if there are multiple entry points with the same
class name for a test. In most cases, this means you have two versions
of the same XBlock installed, or two XBlocks with namespace collisions. In
either case, it'd be nice to resolve (likely by renaming tests as they
come in, hopefully still being careful to catch collisions which might
effect deployed XBlocks. See discussion at:
https://github.com/edx/edx-platform/pull/11032#discussion_r48097392).
'''
pass
class InvalidTestName(Exception):
'''
This means you have an entry point for a test that does not correspond
to a properly named test class. For example, if you cut-and-paste entry
points in `setup.py`, and forgot to repoint the class (so it points to
`DoneXBlock` instead of `TestDone`), or otherwise made an error, you
will see this exception.
'''
pass
xblock_loaded = False # pylint: disable=invalid-name
for entrypoint in pkg_resources.iter_entry_points(group="xblock.test.v0"): # pylint: disable=no-member
plugin = entrypoint.load()
classname = plugin.__name__
if classname in globals():
raise DuplicateXBlockTest(classname)
if not classname.startswith("Test"):
raise InvalidTestName("Test class should start with 'Test': " + classname)
# This should never happen, but while we're testing for class name
# validity, we figured it was okay to be a little overly defensive.
# See discussion at:
# https://github.com/edx/edx-platform/pull/11032#discussion_r48097392
if not classname.replace("_", "").isalnum():
raise InvalidTestName("Python variables should be letters, numbers, and underscores: " + classname)
globals()[classname] = plugin
print "Loading XBlock test: " + classname
xblock_loaded = True
|
|
77e71d20d67af9965ca42c8e187614be1a9c7ffc
|
KineticToolkit/__init__.py
|
KineticToolkit/__init__.py
|
"""
Simple and extensible toolkit for theoretic chemical kinetics
=============================================================
In theoretic study of chemical kinetics, it is quite often that a large
amount of quantum chemical computation needs to be performed and recorded,
which could make the post-processing of the kinetics data quite cumbersome,
labour-intensive, and sometimes even error-prone. This package is designed to
make this process easier in a simple and extensible way. The computation
results can be stored in YAML files that act as database for research
projects. Utilities are provided in this package for both creating and
manipulating this YAML file and for the post-processing of the results.
This toolkit is designed to be simple, extensible, and flexible. In its core,
it only assumes the YAML file contains a list of data points, which are
mappings with keys being property names and values being property values. The
keys and values are not fixed to any particular set. Users only need to make
sure the properties stored in the mapping are consistent with the requirement
of the specific post-processing that is needed to be performed.
In this way, the archiving of the results is fully extensible. Also by using
YAML as the storage engine, rather than using some binary formats like SQLite
or formats that is essentially human-unreadable like XML, the YAML file can
be easily edited manually or put under version control.
Public modules of this package includes
.. autosummary::
:toctree:
datadb
propnames
energetics
gauinter
All the public symbols of these modules are exported to the root package. So
only the root package needed to be imported for common usage.
"""
|
Add the main package module file
|
Add the main package module file
A basic introduction to the utility and philosophy of the package is
added to the package docstring.
|
Python
|
mit
|
tschijnmo/KineticsToolkit
|
Add the main package module file
A basic introduction to the utility and philosophy of the package is
added to the package docstring.
|
"""
Simple and extensible toolkit for theoretic chemical kinetics
=============================================================
In theoretic study of chemical kinetics, it is quite often that a large
amount of quantum chemical computation needs to be performed and recorded,
which could make the post-processing of the kinetics data quite cumbersome,
labour-intensive, and sometimes even error-prone. This package is designed to
make this process easier in a simple and extensible way. The computation
results can be stored in YAML files that act as database for research
projects. Utilities are provided in this package for both creating and
manipulating this YAML file and for the post-processing of the results.
This toolkit is designed to be simple, extensible, and flexible. In its core,
it only assumes the YAML file contains a list of data points, which are
mappings with keys being property names and values being property values. The
keys and values are not fixed to any particular set. Users only need to make
sure the properties stored in the mapping are consistent with the requirement
of the specific post-processing that is needed to be performed.
In this way, the archiving of the results is fully extensible. Also by using
YAML as the storage engine, rather than using some binary formats like SQLite
or formats that is essentially human-unreadable like XML, the YAML file can
be easily edited manually or put under version control.
Public modules of this package includes
.. autosummary::
:toctree:
datadb
propnames
energetics
gauinter
All the public symbols of these modules are exported to the root package. So
only the root package needed to be imported for common usage.
"""
|
<commit_before><commit_msg>Add the main package module file
A basic introduction to the utility and philosophy of the package is
added to the package docstring.<commit_after>
|
"""
Simple and extensible toolkit for theoretic chemical kinetics
=============================================================
In theoretic study of chemical kinetics, it is quite often that a large
amount of quantum chemical computation needs to be performed and recorded,
which could make the post-processing of the kinetics data quite cumbersome,
labour-intensive, and sometimes even error-prone. This package is designed to
make this process easier in a simple and extensible way. The computation
results can be stored in YAML files that act as database for research
projects. Utilities are provided in this package for both creating and
manipulating this YAML file and for the post-processing of the results.
This toolkit is designed to be simple, extensible, and flexible. In its core,
it only assumes the YAML file contains a list of data points, which are
mappings with keys being property names and values being property values. The
keys and values are not fixed to any particular set. Users only need to make
sure the properties stored in the mapping are consistent with the requirement
of the specific post-processing that is needed to be performed.
In this way, the archiving of the results is fully extensible. Also by using
YAML as the storage engine, rather than using some binary formats like SQLite
or formats that is essentially human-unreadable like XML, the YAML file can
be easily edited manually or put under version control.
Public modules of this package includes
.. autosummary::
:toctree:
datadb
propnames
energetics
gauinter
All the public symbols of these modules are exported to the root package. So
only the root package needed to be imported for common usage.
"""
|
Add the main package module file
A basic introduction to the utility and philosophy of the package is
added to the package docstring."""
Simple and extensible toolkit for theoretic chemical kinetics
=============================================================
In theoretic study of chemical kinetics, it is quite often that a large
amount of quantum chemical computation needs to be performed and recorded,
which could make the post-processing of the kinetics data quite cumbersome,
labour-intensive, and sometimes even error-prone. This package is designed to
make this process easier in a simple and extensible way. The computation
results can be stored in YAML files that act as database for research
projects. Utilities are provided in this package for both creating and
manipulating this YAML file and for the post-processing of the results.
This toolkit is designed to be simple, extensible, and flexible. In its core,
it only assumes the YAML file contains a list of data points, which are
mappings with keys being property names and values being property values. The
keys and values are not fixed to any particular set. Users only need to make
sure the properties stored in the mapping are consistent with the requirement
of the specific post-processing that is needed to be performed.
In this way, the archiving of the results is fully extensible. Also by using
YAML as the storage engine, rather than using some binary formats like SQLite
or formats that is essentially human-unreadable like XML, the YAML file can
be easily edited manually or put under version control.
Public modules of this package includes
.. autosummary::
:toctree:
datadb
propnames
energetics
gauinter
All the public symbols of these modules are exported to the root package. So
only the root package needed to be imported for common usage.
"""
|
<commit_before><commit_msg>Add the main package module file
A basic introduction to the utility and philosophy of the package is
added to the package docstring.<commit_after>"""
Simple and extensible toolkit for theoretic chemical kinetics
=============================================================
In theoretic study of chemical kinetics, it is quite often that a large
amount of quantum chemical computation needs to be performed and recorded,
which could make the post-processing of the kinetics data quite cumbersome,
labour-intensive, and sometimes even error-prone. This package is designed to
make this process easier in a simple and extensible way. The computation
results can be stored in YAML files that act as database for research
projects. Utilities are provided in this package for both creating and
manipulating this YAML file and for the post-processing of the results.
This toolkit is designed to be simple, extensible, and flexible. In its core,
it only assumes the YAML file contains a list of data points, which are
mappings with keys being property names and values being property values. The
keys and values are not fixed to any particular set. Users only need to make
sure the properties stored in the mapping are consistent with the requirement
of the specific post-processing that is needed to be performed.
In this way, the archiving of the results is fully extensible. Also by using
YAML as the storage engine, rather than using some binary formats like SQLite
or formats that is essentially human-unreadable like XML, the YAML file can
be easily edited manually or put under version control.
Public modules of this package includes
.. autosummary::
:toctree:
datadb
propnames
energetics
gauinter
All the public symbols of these modules are exported to the root package. So
only the root package needed to be imported for common usage.
"""
|
|
18275a566f23d4387ffbd1d19e5cd8d6d449dad9
|
corehq/apps/app_manager/management/commands/applications_with_add_ons.py
|
corehq/apps/app_manager/management/commands/applications_with_add_ons.py
|
from __future__ import absolute_import
import csv
from django.core.management.base import BaseCommand
from corehq import toggles
from corehq.apps.app_manager.models import Domain
class Command(BaseCommand):
help = """
Checks if an add on is enabled or was ever enabled for applications under all domains
or under a specific domain with domain name if passed
Also checks if toggle ENABLE_ALL_ADD_ONS enabled for domains
"""
def add_arguments(self, parser):
parser.add_argument('--domain', type=str)
parser.add_argument('add_on_name')
@staticmethod
def _iter_domains(options):
if options.get('domain'):
yield Domain.get_by_name(options['domain'])
else:
domain_ids = [
result['id'] for result in
Domain.get_db().view(
"domain/domains", reduce=False, include_docs=False
).all()
]
print("Count of domains : %s" % len(domain_ids))
for domain_id in domain_ids:
yield Domain.get(domain_id)
def handle(self, add_on_name, *args, **options):
with open("apps_with_feature_%s.csv" % add_on_name, "w") as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=[
'domain', 'application_id', 'app_name',
'all_add_ons_enabled', 'status'
])
writer.writeheader()
for domain in self._iter_domains(options):
for application in domain.full_applications(include_builds=False):
if not application.is_remote_app():
all_add_ons_enabled = toggles.ENABLE_ALL_ADD_ONS.enabled(domain.name)
if add_on_name in application.add_ons or all_add_ons_enabled:
writer.writerow({
'domain': domain.name,
'application_id': application.get_id,
'app_name': application.name,
'all_add_ons_enabled': all_add_ons_enabled,
'status': application.add_ons.get('custom_icon_badges')
})
|
Add command to find applications enabled for an add on
|
Add command to find applications enabled for an add on [skip ci]
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add command to find applications enabled for an add on [skip ci]
|
from __future__ import absolute_import
import csv
from django.core.management.base import BaseCommand
from corehq import toggles
from corehq.apps.app_manager.models import Domain
class Command(BaseCommand):
help = """
Checks if an add on is enabled or was ever enabled for applications under all domains
or under a specific domain with domain name if passed
Also checks if toggle ENABLE_ALL_ADD_ONS enabled for domains
"""
def add_arguments(self, parser):
parser.add_argument('--domain', type=str)
parser.add_argument('add_on_name')
@staticmethod
def _iter_domains(options):
if options.get('domain'):
yield Domain.get_by_name(options['domain'])
else:
domain_ids = [
result['id'] for result in
Domain.get_db().view(
"domain/domains", reduce=False, include_docs=False
).all()
]
print("Count of domains : %s" % len(domain_ids))
for domain_id in domain_ids:
yield Domain.get(domain_id)
def handle(self, add_on_name, *args, **options):
with open("apps_with_feature_%s.csv" % add_on_name, "w") as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=[
'domain', 'application_id', 'app_name',
'all_add_ons_enabled', 'status'
])
writer.writeheader()
for domain in self._iter_domains(options):
for application in domain.full_applications(include_builds=False):
if not application.is_remote_app():
all_add_ons_enabled = toggles.ENABLE_ALL_ADD_ONS.enabled(domain.name)
if add_on_name in application.add_ons or all_add_ons_enabled:
writer.writerow({
'domain': domain.name,
'application_id': application.get_id,
'app_name': application.name,
'all_add_ons_enabled': all_add_ons_enabled,
'status': application.add_ons.get('custom_icon_badges')
})
|
<commit_before><commit_msg>Add command to find applications enabled for an add on [skip ci]<commit_after>
|
from __future__ import absolute_import
import csv
from django.core.management.base import BaseCommand
from corehq import toggles
from corehq.apps.app_manager.models import Domain
class Command(BaseCommand):
help = """
Checks if an add on is enabled or was ever enabled for applications under all domains
or under a specific domain with domain name if passed
Also checks if toggle ENABLE_ALL_ADD_ONS enabled for domains
"""
def add_arguments(self, parser):
parser.add_argument('--domain', type=str)
parser.add_argument('add_on_name')
@staticmethod
def _iter_domains(options):
if options.get('domain'):
yield Domain.get_by_name(options['domain'])
else:
domain_ids = [
result['id'] for result in
Domain.get_db().view(
"domain/domains", reduce=False, include_docs=False
).all()
]
print("Count of domains : %s" % len(domain_ids))
for domain_id in domain_ids:
yield Domain.get(domain_id)
def handle(self, add_on_name, *args, **options):
with open("apps_with_feature_%s.csv" % add_on_name, "w") as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=[
'domain', 'application_id', 'app_name',
'all_add_ons_enabled', 'status'
])
writer.writeheader()
for domain in self._iter_domains(options):
for application in domain.full_applications(include_builds=False):
if not application.is_remote_app():
all_add_ons_enabled = toggles.ENABLE_ALL_ADD_ONS.enabled(domain.name)
if add_on_name in application.add_ons or all_add_ons_enabled:
writer.writerow({
'domain': domain.name,
'application_id': application.get_id,
'app_name': application.name,
'all_add_ons_enabled': all_add_ons_enabled,
'status': application.add_ons.get('custom_icon_badges')
})
|
Add command to find applications enabled for an add on [skip ci]from __future__ import absolute_import
import csv
from django.core.management.base import BaseCommand
from corehq import toggles
from corehq.apps.app_manager.models import Domain
class Command(BaseCommand):
help = """
Checks if an add on is enabled or was ever enabled for applications under all domains
or under a specific domain with domain name if passed
Also checks if toggle ENABLE_ALL_ADD_ONS enabled for domains
"""
def add_arguments(self, parser):
parser.add_argument('--domain', type=str)
parser.add_argument('add_on_name')
@staticmethod
def _iter_domains(options):
if options.get('domain'):
yield Domain.get_by_name(options['domain'])
else:
domain_ids = [
result['id'] for result in
Domain.get_db().view(
"domain/domains", reduce=False, include_docs=False
).all()
]
print("Count of domains : %s" % len(domain_ids))
for domain_id in domain_ids:
yield Domain.get(domain_id)
def handle(self, add_on_name, *args, **options):
with open("apps_with_feature_%s.csv" % add_on_name, "w") as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=[
'domain', 'application_id', 'app_name',
'all_add_ons_enabled', 'status'
])
writer.writeheader()
for domain in self._iter_domains(options):
for application in domain.full_applications(include_builds=False):
if not application.is_remote_app():
all_add_ons_enabled = toggles.ENABLE_ALL_ADD_ONS.enabled(domain.name)
if add_on_name in application.add_ons or all_add_ons_enabled:
writer.writerow({
'domain': domain.name,
'application_id': application.get_id,
'app_name': application.name,
'all_add_ons_enabled': all_add_ons_enabled,
'status': application.add_ons.get('custom_icon_badges')
})
|
<commit_before><commit_msg>Add command to find applications enabled for an add on [skip ci]<commit_after>from __future__ import absolute_import
import csv
from django.core.management.base import BaseCommand
from corehq import toggles
from corehq.apps.app_manager.models import Domain
class Command(BaseCommand):
help = """
Checks if an add on is enabled or was ever enabled for applications under all domains
or under a specific domain with domain name if passed
Also checks if toggle ENABLE_ALL_ADD_ONS enabled for domains
"""
def add_arguments(self, parser):
parser.add_argument('--domain', type=str)
parser.add_argument('add_on_name')
@staticmethod
def _iter_domains(options):
if options.get('domain'):
yield Domain.get_by_name(options['domain'])
else:
domain_ids = [
result['id'] for result in
Domain.get_db().view(
"domain/domains", reduce=False, include_docs=False
).all()
]
print("Count of domains : %s" % len(domain_ids))
for domain_id in domain_ids:
yield Domain.get(domain_id)
def handle(self, add_on_name, *args, **options):
with open("apps_with_feature_%s.csv" % add_on_name, "w") as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=[
'domain', 'application_id', 'app_name',
'all_add_ons_enabled', 'status'
])
writer.writeheader()
for domain in self._iter_domains(options):
for application in domain.full_applications(include_builds=False):
if not application.is_remote_app():
all_add_ons_enabled = toggles.ENABLE_ALL_ADD_ONS.enabled(domain.name)
if add_on_name in application.add_ons or all_add_ons_enabled:
writer.writerow({
'domain': domain.name,
'application_id': application.get_id,
'app_name': application.name,
'all_add_ons_enabled': all_add_ons_enabled,
'status': application.add_ons.get('custom_icon_badges')
})
|
|
8b7f48d9b077377ebaeed8c57ab02b705e0ba556
|
test_game_parser.py
|
test_game_parser.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
from lxml import html
from parsers.team_parser import TeamParser
from parsers.game_parser import GameParser
def test_2016():
url = "http://www.nhl.com/scores/htmlreports/20162017/ES020776.HTM"
tp = TeamParser(get_data(url))
tp.load_data()
tp.create_teams()
gp = GameParser(get_data(url), None)
gp.load_data()
gp.create_game(tp.teams)
assert gp.game_data == [
'Saturday, February 4, 2017', 'Attendance 19,092 at Amalie Arena',
'Start 7:08 EST; End 10:09 EST', 'Game 0776', 'Final']
def test_playoff_game():
url = "http://www.nhl.com/scores/htmlreports/20122013/ES030325.HTM"
tp = TeamParser(get_data(url))
tp.load_data()
tp.create_teams()
gp = GameParser(2, get_data(url), None)
gp.load_data()
gp.create_game(tp.teams)
assert gp.game_data == [
'Saturday, June 8, 2013', 'Attendance 22,237 at United Center',
'Start 7:20 CDT; End 11:02 CDT', 'Game 0325', 'Final']
def get_data(url):
r = requests.get(url)
return html.fromstring(r.text)
if __name__ == '__main__':
test_2016()
test_playoff_game()
|
Add tests for game parser item
|
Add tests for game parser item
|
Python
|
mit
|
leaffan/pynhldb
|
Add tests for game parser item
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
from lxml import html
from parsers.team_parser import TeamParser
from parsers.game_parser import GameParser
def test_2016():
url = "http://www.nhl.com/scores/htmlreports/20162017/ES020776.HTM"
tp = TeamParser(get_data(url))
tp.load_data()
tp.create_teams()
gp = GameParser(get_data(url), None)
gp.load_data()
gp.create_game(tp.teams)
assert gp.game_data == [
'Saturday, February 4, 2017', 'Attendance 19,092 at Amalie Arena',
'Start 7:08 EST; End 10:09 EST', 'Game 0776', 'Final']
def test_playoff_game():
url = "http://www.nhl.com/scores/htmlreports/20122013/ES030325.HTM"
tp = TeamParser(get_data(url))
tp.load_data()
tp.create_teams()
gp = GameParser(2, get_data(url), None)
gp.load_data()
gp.create_game(tp.teams)
assert gp.game_data == [
'Saturday, June 8, 2013', 'Attendance 22,237 at United Center',
'Start 7:20 CDT; End 11:02 CDT', 'Game 0325', 'Final']
def get_data(url):
r = requests.get(url)
return html.fromstring(r.text)
if __name__ == '__main__':
test_2016()
test_playoff_game()
|
<commit_before><commit_msg>Add tests for game parser item<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
from lxml import html
from parsers.team_parser import TeamParser
from parsers.game_parser import GameParser
def test_2016():
url = "http://www.nhl.com/scores/htmlreports/20162017/ES020776.HTM"
tp = TeamParser(get_data(url))
tp.load_data()
tp.create_teams()
gp = GameParser(get_data(url), None)
gp.load_data()
gp.create_game(tp.teams)
assert gp.game_data == [
'Saturday, February 4, 2017', 'Attendance 19,092 at Amalie Arena',
'Start 7:08 EST; End 10:09 EST', 'Game 0776', 'Final']
def test_playoff_game():
url = "http://www.nhl.com/scores/htmlreports/20122013/ES030325.HTM"
tp = TeamParser(get_data(url))
tp.load_data()
tp.create_teams()
gp = GameParser(2, get_data(url), None)
gp.load_data()
gp.create_game(tp.teams)
assert gp.game_data == [
'Saturday, June 8, 2013', 'Attendance 22,237 at United Center',
'Start 7:20 CDT; End 11:02 CDT', 'Game 0325', 'Final']
def get_data(url):
r = requests.get(url)
return html.fromstring(r.text)
if __name__ == '__main__':
test_2016()
test_playoff_game()
|
Add tests for game parser item#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
from lxml import html
from parsers.team_parser import TeamParser
from parsers.game_parser import GameParser
def test_2016():
url = "http://www.nhl.com/scores/htmlreports/20162017/ES020776.HTM"
tp = TeamParser(get_data(url))
tp.load_data()
tp.create_teams()
gp = GameParser(get_data(url), None)
gp.load_data()
gp.create_game(tp.teams)
assert gp.game_data == [
'Saturday, February 4, 2017', 'Attendance 19,092 at Amalie Arena',
'Start 7:08 EST; End 10:09 EST', 'Game 0776', 'Final']
def test_playoff_game():
url = "http://www.nhl.com/scores/htmlreports/20122013/ES030325.HTM"
tp = TeamParser(get_data(url))
tp.load_data()
tp.create_teams()
gp = GameParser(2, get_data(url), None)
gp.load_data()
gp.create_game(tp.teams)
assert gp.game_data == [
'Saturday, June 8, 2013', 'Attendance 22,237 at United Center',
'Start 7:20 CDT; End 11:02 CDT', 'Game 0325', 'Final']
def get_data(url):
r = requests.get(url)
return html.fromstring(r.text)
if __name__ == '__main__':
test_2016()
test_playoff_game()
|
<commit_before><commit_msg>Add tests for game parser item<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
from lxml import html
from parsers.team_parser import TeamParser
from parsers.game_parser import GameParser
def test_2016():
url = "http://www.nhl.com/scores/htmlreports/20162017/ES020776.HTM"
tp = TeamParser(get_data(url))
tp.load_data()
tp.create_teams()
gp = GameParser(get_data(url), None)
gp.load_data()
gp.create_game(tp.teams)
assert gp.game_data == [
'Saturday, February 4, 2017', 'Attendance 19,092 at Amalie Arena',
'Start 7:08 EST; End 10:09 EST', 'Game 0776', 'Final']
def test_playoff_game():
url = "http://www.nhl.com/scores/htmlreports/20122013/ES030325.HTM"
tp = TeamParser(get_data(url))
tp.load_data()
tp.create_teams()
gp = GameParser(2, get_data(url), None)
gp.load_data()
gp.create_game(tp.teams)
assert gp.game_data == [
'Saturday, June 8, 2013', 'Attendance 22,237 at United Center',
'Start 7:20 CDT; End 11:02 CDT', 'Game 0325', 'Final']
def get_data(url):
r = requests.get(url)
return html.fromstring(r.text)
if __name__ == '__main__':
test_2016()
test_playoff_game()
|
|
57b4d39749021305a2d5850e642537224d30611f
|
requests/hooks.py
|
requests/hooks.py
|
# -*- coding: utf-8 -*-
"""
requests.hooks
~~~~~~~~~~~~~~
This module provides the capabilities for the Requests hooks system.
Available hooks:
``args``:
A dictionary of the arguments being sent to Request().
``pre_request``:
The Request object, directly after being created.
``pre_send``:
The Request object, directly before being sent.
``post_request``:
The Request object, directly after being sent.
``response``:
The response generated from a Request.
"""
import traceback
HOOKS = ('args', 'pre_request', 'pre_send', 'post_request', 'response')
def dispatch_hook(key, hooks, hook_data):
"""Dispatches a hook dictionary on a given piece of data."""
hooks = hooks or dict()
if key in hooks:
hooks = hooks.get(key)
if hasattr(hooks, '__call__'):
hooks = [hooks]
for hook in hooks:
try:
_hook_data = hook(hook_data)
if _hook_data is not None:
hook_data = _hook_data
except Exception:
traceback.print_exc()
return hook_data
|
# -*- coding: utf-8 -*-
"""
requests.hooks
~~~~~~~~~~~~~~
This module provides the capabilities for the Requests hooks system.
Available hooks:
``args``:
A dictionary of the arguments being sent to Request().
``pre_request``:
The Request object, directly after being created.
``pre_send``:
The Request object, directly before being sent.
``post_request``:
The Request object, directly after being sent.
``response``:
The response generated from a Request.
"""
HOOKS = ('args', 'pre_request', 'pre_send', 'post_request', 'response')
def dispatch_hook(key, hooks, hook_data):
"""Dispatches a hook dictionary on a given piece of data."""
hooks = hooks or dict()
if key in hooks:
hooks = hooks.get(key)
if hasattr(hooks, '__call__'):
hooks = [hooks]
for hook in hooks:
_hook_data = hook(hook_data)
if _hook_data is not None:
hook_data = _hook_data
return hook_data
|
Remove exception eating from dispatch_hook.
|
Remove exception eating from dispatch_hook.
|
Python
|
isc
|
Bluehorn/requests,revolunet/requests,revolunet/requests,psf/requests
|
# -*- coding: utf-8 -*-
"""
requests.hooks
~~~~~~~~~~~~~~
This module provides the capabilities for the Requests hooks system.
Available hooks:
``args``:
A dictionary of the arguments being sent to Request().
``pre_request``:
The Request object, directly after being created.
``pre_send``:
The Request object, directly before being sent.
``post_request``:
The Request object, directly after being sent.
``response``:
The response generated from a Request.
"""
import traceback
HOOKS = ('args', 'pre_request', 'pre_send', 'post_request', 'response')
def dispatch_hook(key, hooks, hook_data):
"""Dispatches a hook dictionary on a given piece of data."""
hooks = hooks or dict()
if key in hooks:
hooks = hooks.get(key)
if hasattr(hooks, '__call__'):
hooks = [hooks]
for hook in hooks:
try:
_hook_data = hook(hook_data)
if _hook_data is not None:
hook_data = _hook_data
except Exception:
traceback.print_exc()
return hook_data
Remove exception eating from dispatch_hook.
|
# -*- coding: utf-8 -*-
"""
requests.hooks
~~~~~~~~~~~~~~
This module provides the capabilities for the Requests hooks system.
Available hooks:
``args``:
A dictionary of the arguments being sent to Request().
``pre_request``:
The Request object, directly after being created.
``pre_send``:
The Request object, directly before being sent.
``post_request``:
The Request object, directly after being sent.
``response``:
The response generated from a Request.
"""
HOOKS = ('args', 'pre_request', 'pre_send', 'post_request', 'response')
def dispatch_hook(key, hooks, hook_data):
"""Dispatches a hook dictionary on a given piece of data."""
hooks = hooks or dict()
if key in hooks:
hooks = hooks.get(key)
if hasattr(hooks, '__call__'):
hooks = [hooks]
for hook in hooks:
_hook_data = hook(hook_data)
if _hook_data is not None:
hook_data = _hook_data
return hook_data
|
<commit_before># -*- coding: utf-8 -*-
"""
requests.hooks
~~~~~~~~~~~~~~
This module provides the capabilities for the Requests hooks system.
Available hooks:
``args``:
A dictionary of the arguments being sent to Request().
``pre_request``:
The Request object, directly after being created.
``pre_send``:
The Request object, directly before being sent.
``post_request``:
The Request object, directly after being sent.
``response``:
The response generated from a Request.
"""
import traceback
HOOKS = ('args', 'pre_request', 'pre_send', 'post_request', 'response')
def dispatch_hook(key, hooks, hook_data):
"""Dispatches a hook dictionary on a given piece of data."""
hooks = hooks or dict()
if key in hooks:
hooks = hooks.get(key)
if hasattr(hooks, '__call__'):
hooks = [hooks]
for hook in hooks:
try:
_hook_data = hook(hook_data)
if _hook_data is not None:
hook_data = _hook_data
except Exception:
traceback.print_exc()
return hook_data
<commit_msg>Remove exception eating from dispatch_hook.<commit_after>
|
# -*- coding: utf-8 -*-
"""
requests.hooks
~~~~~~~~~~~~~~
This module provides the capabilities for the Requests hooks system.
Available hooks:
``args``:
A dictionary of the arguments being sent to Request().
``pre_request``:
The Request object, directly after being created.
``pre_send``:
The Request object, directly before being sent.
``post_request``:
The Request object, directly after being sent.
``response``:
The response generated from a Request.
"""
HOOKS = ('args', 'pre_request', 'pre_send', 'post_request', 'response')
def dispatch_hook(key, hooks, hook_data):
"""Dispatches a hook dictionary on a given piece of data."""
hooks = hooks or dict()
if key in hooks:
hooks = hooks.get(key)
if hasattr(hooks, '__call__'):
hooks = [hooks]
for hook in hooks:
_hook_data = hook(hook_data)
if _hook_data is not None:
hook_data = _hook_data
return hook_data
|
# -*- coding: utf-8 -*-
"""
requests.hooks
~~~~~~~~~~~~~~
This module provides the capabilities for the Requests hooks system.
Available hooks:
``args``:
A dictionary of the arguments being sent to Request().
``pre_request``:
The Request object, directly after being created.
``pre_send``:
The Request object, directly before being sent.
``post_request``:
The Request object, directly after being sent.
``response``:
The response generated from a Request.
"""
import traceback
HOOKS = ('args', 'pre_request', 'pre_send', 'post_request', 'response')
def dispatch_hook(key, hooks, hook_data):
"""Dispatches a hook dictionary on a given piece of data."""
hooks = hooks or dict()
if key in hooks:
hooks = hooks.get(key)
if hasattr(hooks, '__call__'):
hooks = [hooks]
for hook in hooks:
try:
_hook_data = hook(hook_data)
if _hook_data is not None:
hook_data = _hook_data
except Exception:
traceback.print_exc()
return hook_data
Remove exception eating from dispatch_hook.# -*- coding: utf-8 -*-
"""
requests.hooks
~~~~~~~~~~~~~~
This module provides the capabilities for the Requests hooks system.
Available hooks:
``args``:
A dictionary of the arguments being sent to Request().
``pre_request``:
The Request object, directly after being created.
``pre_send``:
The Request object, directly before being sent.
``post_request``:
The Request object, directly after being sent.
``response``:
The response generated from a Request.
"""
HOOKS = ('args', 'pre_request', 'pre_send', 'post_request', 'response')
def dispatch_hook(key, hooks, hook_data):
"""Dispatches a hook dictionary on a given piece of data."""
hooks = hooks or dict()
if key in hooks:
hooks = hooks.get(key)
if hasattr(hooks, '__call__'):
hooks = [hooks]
for hook in hooks:
_hook_data = hook(hook_data)
if _hook_data is not None:
hook_data = _hook_data
return hook_data
|
<commit_before># -*- coding: utf-8 -*-
"""
requests.hooks
~~~~~~~~~~~~~~
This module provides the capabilities for the Requests hooks system.
Available hooks:
``args``:
A dictionary of the arguments being sent to Request().
``pre_request``:
The Request object, directly after being created.
``pre_send``:
The Request object, directly before being sent.
``post_request``:
The Request object, directly after being sent.
``response``:
The response generated from a Request.
"""
import traceback
HOOKS = ('args', 'pre_request', 'pre_send', 'post_request', 'response')
def dispatch_hook(key, hooks, hook_data):
"""Dispatches a hook dictionary on a given piece of data."""
hooks = hooks or dict()
if key in hooks:
hooks = hooks.get(key)
if hasattr(hooks, '__call__'):
hooks = [hooks]
for hook in hooks:
try:
_hook_data = hook(hook_data)
if _hook_data is not None:
hook_data = _hook_data
except Exception:
traceback.print_exc()
return hook_data
<commit_msg>Remove exception eating from dispatch_hook.<commit_after># -*- coding: utf-8 -*-
"""
requests.hooks
~~~~~~~~~~~~~~
This module provides the capabilities for the Requests hooks system.
Available hooks:
``args``:
A dictionary of the arguments being sent to Request().
``pre_request``:
The Request object, directly after being created.
``pre_send``:
The Request object, directly before being sent.
``post_request``:
The Request object, directly after being sent.
``response``:
The response generated from a Request.
"""
HOOKS = ('args', 'pre_request', 'pre_send', 'post_request', 'response')
def dispatch_hook(key, hooks, hook_data):
"""Dispatches a hook dictionary on a given piece of data."""
hooks = hooks or dict()
if key in hooks:
hooks = hooks.get(key)
if hasattr(hooks, '__call__'):
hooks = [hooks]
for hook in hooks:
_hook_data = hook(hook_data)
if _hook_data is not None:
hook_data = _hook_data
return hook_data
|
48b1479a84fcb6d16d8aea8ca256d01e9898eeea
|
tools/fake_robot.py
|
tools/fake_robot.py
|
import json
from time import time
from threading import Timer
from tornado.ioloop import IOLoop
from tornado.web import Application
from tornado.websocket import WebSocketHandler
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.function(*self.args, **self.kwargs)
self.start()
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
class FakeRobot(WebSocketHandler):
period = 1 / 10
verbose = False
def open(self):
if self.verbose:
print('WebSocket connection open.')
self.rt = RepeatedTimer(self.period, self.pub_state)
def on_message(self, message):
if self.verbose:
print('{}: Received {}'.format(time(), message))
self.handle_command(json.loads(message))
def on_close(self):
if self.verbose:
print('WebSocket closed {}.'.format(self.close_reason))
self.rt.stop()
def pub_state(self):
state = {
'modules': [
{
'alias': 'my_led',
'id': 2,
'type': 'led',
},
{
'alias': 'my_motor',
'id': 3,
'type': 'motor',
},
{
'alias': 'my_button',
'id': 4,
'type': 'button',
'value': 0,
},
{
'alias': 'my_potentiometer',
'id': 5,
'type': 'potard',
'value': 50,
},
{
'alias': 'my_relay',
'id': 6,
'type': 'relay',
},
{
'alias': 'my_distance',
'id': 7,
'type': 'distance',
'value': 12,
},
]
}
self.write_message(json.dumps(state))
def handle_command(self, message):
pass
if __name__ == '__main__':
port = 9342
FakeRobot.verbose = True
loop = IOLoop()
app = Application([
(r'/', FakeRobot)
])
app.listen(port)
url = 'ws://{}:{}'.format('127.0.0.1', port)
print('Fake robot serving on {}'.format(url))
loop.start()
|
Add fake robot utility tool.
|
Add fake robot utility tool.
|
Python
|
mit
|
pollen/pyrobus
|
Add fake robot utility tool.
|
import json
from time import time
from threading import Timer
from tornado.ioloop import IOLoop
from tornado.web import Application
from tornado.websocket import WebSocketHandler
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.function(*self.args, **self.kwargs)
self.start()
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
class FakeRobot(WebSocketHandler):
period = 1 / 10
verbose = False
def open(self):
if self.verbose:
print('WebSocket connection open.')
self.rt = RepeatedTimer(self.period, self.pub_state)
def on_message(self, message):
if self.verbose:
print('{}: Received {}'.format(time(), message))
self.handle_command(json.loads(message))
def on_close(self):
if self.verbose:
print('WebSocket closed {}.'.format(self.close_reason))
self.rt.stop()
def pub_state(self):
state = {
'modules': [
{
'alias': 'my_led',
'id': 2,
'type': 'led',
},
{
'alias': 'my_motor',
'id': 3,
'type': 'motor',
},
{
'alias': 'my_button',
'id': 4,
'type': 'button',
'value': 0,
},
{
'alias': 'my_potentiometer',
'id': 5,
'type': 'potard',
'value': 50,
},
{
'alias': 'my_relay',
'id': 6,
'type': 'relay',
},
{
'alias': 'my_distance',
'id': 7,
'type': 'distance',
'value': 12,
},
]
}
self.write_message(json.dumps(state))
def handle_command(self, message):
pass
if __name__ == '__main__':
port = 9342
FakeRobot.verbose = True
loop = IOLoop()
app = Application([
(r'/', FakeRobot)
])
app.listen(port)
url = 'ws://{}:{}'.format('127.0.0.1', port)
print('Fake robot serving on {}'.format(url))
loop.start()
|
<commit_before><commit_msg>Add fake robot utility tool.<commit_after>
|
import json
from time import time
from threading import Timer
from tornado.ioloop import IOLoop
from tornado.web import Application
from tornado.websocket import WebSocketHandler
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.function(*self.args, **self.kwargs)
self.start()
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
class FakeRobot(WebSocketHandler):
period = 1 / 10
verbose = False
def open(self):
if self.verbose:
print('WebSocket connection open.')
self.rt = RepeatedTimer(self.period, self.pub_state)
def on_message(self, message):
if self.verbose:
print('{}: Received {}'.format(time(), message))
self.handle_command(json.loads(message))
def on_close(self):
if self.verbose:
print('WebSocket closed {}.'.format(self.close_reason))
self.rt.stop()
def pub_state(self):
state = {
'modules': [
{
'alias': 'my_led',
'id': 2,
'type': 'led',
},
{
'alias': 'my_motor',
'id': 3,
'type': 'motor',
},
{
'alias': 'my_button',
'id': 4,
'type': 'button',
'value': 0,
},
{
'alias': 'my_potentiometer',
'id': 5,
'type': 'potard',
'value': 50,
},
{
'alias': 'my_relay',
'id': 6,
'type': 'relay',
},
{
'alias': 'my_distance',
'id': 7,
'type': 'distance',
'value': 12,
},
]
}
self.write_message(json.dumps(state))
def handle_command(self, message):
pass
if __name__ == '__main__':
port = 9342
FakeRobot.verbose = True
loop = IOLoop()
app = Application([
(r'/', FakeRobot)
])
app.listen(port)
url = 'ws://{}:{}'.format('127.0.0.1', port)
print('Fake robot serving on {}'.format(url))
loop.start()
|
Add fake robot utility tool.import json
from time import time
from threading import Timer
from tornado.ioloop import IOLoop
from tornado.web import Application
from tornado.websocket import WebSocketHandler
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.function(*self.args, **self.kwargs)
self.start()
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
class FakeRobot(WebSocketHandler):
period = 1 / 10
verbose = False
def open(self):
if self.verbose:
print('WebSocket connection open.')
self.rt = RepeatedTimer(self.period, self.pub_state)
def on_message(self, message):
if self.verbose:
print('{}: Received {}'.format(time(), message))
self.handle_command(json.loads(message))
def on_close(self):
if self.verbose:
print('WebSocket closed {}.'.format(self.close_reason))
self.rt.stop()
def pub_state(self):
state = {
'modules': [
{
'alias': 'my_led',
'id': 2,
'type': 'led',
},
{
'alias': 'my_motor',
'id': 3,
'type': 'motor',
},
{
'alias': 'my_button',
'id': 4,
'type': 'button',
'value': 0,
},
{
'alias': 'my_potentiometer',
'id': 5,
'type': 'potard',
'value': 50,
},
{
'alias': 'my_relay',
'id': 6,
'type': 'relay',
},
{
'alias': 'my_distance',
'id': 7,
'type': 'distance',
'value': 12,
},
]
}
self.write_message(json.dumps(state))
def handle_command(self, message):
pass
if __name__ == '__main__':
port = 9342
FakeRobot.verbose = True
loop = IOLoop()
app = Application([
(r'/', FakeRobot)
])
app.listen(port)
url = 'ws://{}:{}'.format('127.0.0.1', port)
print('Fake robot serving on {}'.format(url))
loop.start()
|
<commit_before><commit_msg>Add fake robot utility tool.<commit_after>import json
from time import time
from threading import Timer
from tornado.ioloop import IOLoop
from tornado.web import Application
from tornado.websocket import WebSocketHandler
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.function(*self.args, **self.kwargs)
self.start()
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
class FakeRobot(WebSocketHandler):
period = 1 / 10
verbose = False
def open(self):
if self.verbose:
print('WebSocket connection open.')
self.rt = RepeatedTimer(self.period, self.pub_state)
def on_message(self, message):
if self.verbose:
print('{}: Received {}'.format(time(), message))
self.handle_command(json.loads(message))
def on_close(self):
if self.verbose:
print('WebSocket closed {}.'.format(self.close_reason))
self.rt.stop()
def pub_state(self):
state = {
'modules': [
{
'alias': 'my_led',
'id': 2,
'type': 'led',
},
{
'alias': 'my_motor',
'id': 3,
'type': 'motor',
},
{
'alias': 'my_button',
'id': 4,
'type': 'button',
'value': 0,
},
{
'alias': 'my_potentiometer',
'id': 5,
'type': 'potard',
'value': 50,
},
{
'alias': 'my_relay',
'id': 6,
'type': 'relay',
},
{
'alias': 'my_distance',
'id': 7,
'type': 'distance',
'value': 12,
},
]
}
self.write_message(json.dumps(state))
def handle_command(self, message):
pass
if __name__ == '__main__':
port = 9342
FakeRobot.verbose = True
loop = IOLoop()
app = Application([
(r'/', FakeRobot)
])
app.listen(port)
url = 'ws://{}:{}'.format('127.0.0.1', port)
print('Fake robot serving on {}'.format(url))
loop.start()
|
|
46600be25ea95a0c823730694a7f79be453477b1
|
list_projects.py
|
list_projects.py
|
from yastlib import *
yast_id = 'id'
yast_password = 'password'
yast = Yast()
yast_hash = yast.login(yast_id, yast_password)
if yast_hash != False:
print 'Connected to yast.com'
projects = yast.getProjects()
nodes = projects.items()
for k, n in nodes:
print 'project ' + str(k) + ': ' + 'name: "' + n.name + '" parent: ' + str(n.parentId)
folders = yast.getFolders()
nodes = folders.items()
for k, n in nodes:
print 'folder: ' + str(k) + ': ' + 'name: "' + n.name + '" parent: ' + str(n.parentId)
else:
print 'Could not connect to yast.com'
|
Add script to list projects
|
Add script to list projects
|
Python
|
mit
|
jfitz/hours-reporter
|
Add script to list projects
|
from yastlib import *
yast_id = 'id'
yast_password = 'password'
yast = Yast()
yast_hash = yast.login(yast_id, yast_password)
if yast_hash != False:
print 'Connected to yast.com'
projects = yast.getProjects()
nodes = projects.items()
for k, n in nodes:
print 'project ' + str(k) + ': ' + 'name: "' + n.name + '" parent: ' + str(n.parentId)
folders = yast.getFolders()
nodes = folders.items()
for k, n in nodes:
print 'folder: ' + str(k) + ': ' + 'name: "' + n.name + '" parent: ' + str(n.parentId)
else:
print 'Could not connect to yast.com'
|
<commit_before><commit_msg>Add script to list projects<commit_after>
|
from yastlib import *
yast_id = 'id'
yast_password = 'password'
yast = Yast()
yast_hash = yast.login(yast_id, yast_password)
if yast_hash != False:
print 'Connected to yast.com'
projects = yast.getProjects()
nodes = projects.items()
for k, n in nodes:
print 'project ' + str(k) + ': ' + 'name: "' + n.name + '" parent: ' + str(n.parentId)
folders = yast.getFolders()
nodes = folders.items()
for k, n in nodes:
print 'folder: ' + str(k) + ': ' + 'name: "' + n.name + '" parent: ' + str(n.parentId)
else:
print 'Could not connect to yast.com'
|
Add script to list projectsfrom yastlib import *
yast_id = 'id'
yast_password = 'password'
yast = Yast()
yast_hash = yast.login(yast_id, yast_password)
if yast_hash != False:
print 'Connected to yast.com'
projects = yast.getProjects()
nodes = projects.items()
for k, n in nodes:
print 'project ' + str(k) + ': ' + 'name: "' + n.name + '" parent: ' + str(n.parentId)
folders = yast.getFolders()
nodes = folders.items()
for k, n in nodes:
print 'folder: ' + str(k) + ': ' + 'name: "' + n.name + '" parent: ' + str(n.parentId)
else:
print 'Could not connect to yast.com'
|
<commit_before><commit_msg>Add script to list projects<commit_after>from yastlib import *
yast_id = 'id'
yast_password = 'password'
yast = Yast()
yast_hash = yast.login(yast_id, yast_password)
if yast_hash != False:
print 'Connected to yast.com'
projects = yast.getProjects()
nodes = projects.items()
for k, n in nodes:
print 'project ' + str(k) + ': ' + 'name: "' + n.name + '" parent: ' + str(n.parentId)
folders = yast.getFolders()
nodes = folders.items()
for k, n in nodes:
print 'folder: ' + str(k) + ': ' + 'name: "' + n.name + '" parent: ' + str(n.parentId)
else:
print 'Could not connect to yast.com'
|
|
7a4368bd4e3477300dd07408f622af4e55a106d2
|
src/test.py
|
src/test.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from mst_tree_builder import *
from framenetreader import *
if __name__ == '__main__':
unittest.main()
|
Test all modules in one fell swoop
|
Test all modules in one fell swoop
git-svn-id: a2d0af3c19596d99b5c1e07a0b4fed4eaca14ddf@18173 7fff26f0-e11d-0410-b8d0-f4b6ff9b0dc5
|
Python
|
agpl-3.0
|
aymara/knowledgesrl,aymara/knowledgesrl
|
Test all modules in one fell swoop
git-svn-id: a2d0af3c19596d99b5c1e07a0b4fed4eaca14ddf@18173 7fff26f0-e11d-0410-b8d0-f4b6ff9b0dc5
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from mst_tree_builder import *
from framenetreader import *
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test all modules in one fell swoop
git-svn-id: a2d0af3c19596d99b5c1e07a0b4fed4eaca14ddf@18173 7fff26f0-e11d-0410-b8d0-f4b6ff9b0dc5<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from mst_tree_builder import *
from framenetreader import *
if __name__ == '__main__':
unittest.main()
|
Test all modules in one fell swoop
git-svn-id: a2d0af3c19596d99b5c1e07a0b4fed4eaca14ddf@18173 7fff26f0-e11d-0410-b8d0-f4b6ff9b0dc5#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from mst_tree_builder import *
from framenetreader import *
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test all modules in one fell swoop
git-svn-id: a2d0af3c19596d99b5c1e07a0b4fed4eaca14ddf@18173 7fff26f0-e11d-0410-b8d0-f4b6ff9b0dc5<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from mst_tree_builder import *
from framenetreader import *
if __name__ == '__main__':
unittest.main()
|
|
df298d5bb8925ce54a9cd400494831c975ca8578
|
experimental/directshow.py
|
experimental/directshow.py
|
#!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
|
Move win32 audio experiment to trunk.
|
Move win32 audio experiment to trunk.
git-svn-id: d4fdfcd4de20a449196f78acc655f735742cd30d@736 14d46d22-621c-0410-bb3d-6f67920f7d95
|
Python
|
bsd-3-clause
|
regular/pyglet-avbin-optimizations,regular/pyglet-avbin-optimizations,regular/pyglet-avbin-optimizations,regular/pyglet-avbin-optimizations
|
Move win32 audio experiment to trunk.
git-svn-id: d4fdfcd4de20a449196f78acc655f735742cd30d@736 14d46d22-621c-0410-bb3d-6f67920f7d95
|
#!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
|
<commit_before><commit_msg>Move win32 audio experiment to trunk.
git-svn-id: d4fdfcd4de20a449196f78acc655f735742cd30d@736 14d46d22-621c-0410-bb3d-6f67920f7d95<commit_after>
|
#!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
|
Move win32 audio experiment to trunk.
git-svn-id: d4fdfcd4de20a449196f78acc655f735742cd30d@736 14d46d22-621c-0410-bb3d-6f67920f7d95#!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
|
<commit_before><commit_msg>Move win32 audio experiment to trunk.
git-svn-id: d4fdfcd4de20a449196f78acc655f735742cd30d@736 14d46d22-621c-0410-bb3d-6f67920f7d95<commit_after>#!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
|
|
a9af0878b349238f72dda384d3084bc31518b0ab
|
set-nmea.py
|
set-nmea.py
|
#!/usr/bin/python
# Enable or disable the use of NMEA.
import ubx
import struct
import calendar
import os
import gobject
import logging
import sys
import socket
import time
loop = gobject.MainLoop()
def callback(ty, packet):
print("callback %s" % repr([ty, packet]))
if ty == "CFG-PRT":
if sys.argv[1] == "on":
# NMEA and UBX
packet[0]["In_proto_mask"] = 1 + 2
packet[0]["Out_proto_mask"] = 1 + 2
else:
# only UBX
packet[0]["In_proto_mask"] = 1
packet[0]["Out_proto_mask"] = 1
t.send("CFG-PRT", 20, packet)
elif ty == "ACK-ACK":
loop.quit()
return True
assert len(sys.argv) == 2
t = ubx.Parser(callback)
t.send("CFG-PRT", 0, [])
loop.run()
|
#!/usr/bin/python
# Enable or disable the use of NMEA.
import ubx
import struct
import calendar
import os
import gobject
import logging
import sys
import socket
import time
loop = gobject.MainLoop()
def callback(ty, packet):
print("callback %s" % repr([ty, packet]))
if ty == "CFG-PRT":
if sys.argv[1] == "on":
# NMEA and UBX
packet[1]["In_proto_mask"] = 1 + 2
packet[1]["Out_proto_mask"] = 1 + 2
else:
# only UBX
packet[1]["In_proto_mask"] = 1
packet[1]["Out_proto_mask"] = 1
t.send("CFG-PRT", 20, packet)
elif ty == "ACK-ACK":
loop.quit()
return True
assert len(sys.argv) == 2
t = ubx.Parser(callback)
t.send("CFG-PRT", 0, [])
loop.run()
|
Fix the script. It did not work at all before.
|
Fix the script. It did not work at all before.
|
Python
|
mit
|
berkeleyapplied/ubx,aanjhan/ubx,aanjhan/ubx,berkeleyapplied/ubx
|
#!/usr/bin/python
# Enable or disable the use of NMEA.
import ubx
import struct
import calendar
import os
import gobject
import logging
import sys
import socket
import time
loop = gobject.MainLoop()
def callback(ty, packet):
print("callback %s" % repr([ty, packet]))
if ty == "CFG-PRT":
if sys.argv[1] == "on":
# NMEA and UBX
packet[0]["In_proto_mask"] = 1 + 2
packet[0]["Out_proto_mask"] = 1 + 2
else:
# only UBX
packet[0]["In_proto_mask"] = 1
packet[0]["Out_proto_mask"] = 1
t.send("CFG-PRT", 20, packet)
elif ty == "ACK-ACK":
loop.quit()
return True
assert len(sys.argv) == 2
t = ubx.Parser(callback)
t.send("CFG-PRT", 0, [])
loop.run()
Fix the script. It did not work at all before.
|
#!/usr/bin/python
# Enable or disable the use of NMEA.
import ubx
import struct
import calendar
import os
import gobject
import logging
import sys
import socket
import time
loop = gobject.MainLoop()
def callback(ty, packet):
print("callback %s" % repr([ty, packet]))
if ty == "CFG-PRT":
if sys.argv[1] == "on":
# NMEA and UBX
packet[1]["In_proto_mask"] = 1 + 2
packet[1]["Out_proto_mask"] = 1 + 2
else:
# only UBX
packet[1]["In_proto_mask"] = 1
packet[1]["Out_proto_mask"] = 1
t.send("CFG-PRT", 20, packet)
elif ty == "ACK-ACK":
loop.quit()
return True
assert len(sys.argv) == 2
t = ubx.Parser(callback)
t.send("CFG-PRT", 0, [])
loop.run()
|
<commit_before>#!/usr/bin/python
# Enable or disable the use of NMEA.
import ubx
import struct
import calendar
import os
import gobject
import logging
import sys
import socket
import time
loop = gobject.MainLoop()
def callback(ty, packet):
print("callback %s" % repr([ty, packet]))
if ty == "CFG-PRT":
if sys.argv[1] == "on":
# NMEA and UBX
packet[0]["In_proto_mask"] = 1 + 2
packet[0]["Out_proto_mask"] = 1 + 2
else:
# only UBX
packet[0]["In_proto_mask"] = 1
packet[0]["Out_proto_mask"] = 1
t.send("CFG-PRT", 20, packet)
elif ty == "ACK-ACK":
loop.quit()
return True
assert len(sys.argv) == 2
t = ubx.Parser(callback)
t.send("CFG-PRT", 0, [])
loop.run()
<commit_msg>Fix the script. It did not work at all before.<commit_after>
|
#!/usr/bin/python
# Enable or disable the use of NMEA.
import ubx
import struct
import calendar
import os
import gobject
import logging
import sys
import socket
import time
loop = gobject.MainLoop()
def callback(ty, packet):
print("callback %s" % repr([ty, packet]))
if ty == "CFG-PRT":
if sys.argv[1] == "on":
# NMEA and UBX
packet[1]["In_proto_mask"] = 1 + 2
packet[1]["Out_proto_mask"] = 1 + 2
else:
# only UBX
packet[1]["In_proto_mask"] = 1
packet[1]["Out_proto_mask"] = 1
t.send("CFG-PRT", 20, packet)
elif ty == "ACK-ACK":
loop.quit()
return True
assert len(sys.argv) == 2
t = ubx.Parser(callback)
t.send("CFG-PRT", 0, [])
loop.run()
|
#!/usr/bin/python
# Enable or disable the use of NMEA.
import ubx
import struct
import calendar
import os
import gobject
import logging
import sys
import socket
import time
loop = gobject.MainLoop()
def callback(ty, packet):
print("callback %s" % repr([ty, packet]))
if ty == "CFG-PRT":
if sys.argv[1] == "on":
# NMEA and UBX
packet[0]["In_proto_mask"] = 1 + 2
packet[0]["Out_proto_mask"] = 1 + 2
else:
# only UBX
packet[0]["In_proto_mask"] = 1
packet[0]["Out_proto_mask"] = 1
t.send("CFG-PRT", 20, packet)
elif ty == "ACK-ACK":
loop.quit()
return True
assert len(sys.argv) == 2
t = ubx.Parser(callback)
t.send("CFG-PRT", 0, [])
loop.run()
Fix the script. It did not work at all before.#!/usr/bin/python
# Enable or disable the use of NMEA.
import ubx
import struct
import calendar
import os
import gobject
import logging
import sys
import socket
import time
loop = gobject.MainLoop()
def callback(ty, packet):
print("callback %s" % repr([ty, packet]))
if ty == "CFG-PRT":
if sys.argv[1] == "on":
# NMEA and UBX
packet[1]["In_proto_mask"] = 1 + 2
packet[1]["Out_proto_mask"] = 1 + 2
else:
# only UBX
packet[1]["In_proto_mask"] = 1
packet[1]["Out_proto_mask"] = 1
t.send("CFG-PRT", 20, packet)
elif ty == "ACK-ACK":
loop.quit()
return True
assert len(sys.argv) == 2
t = ubx.Parser(callback)
t.send("CFG-PRT", 0, [])
loop.run()
|
<commit_before>#!/usr/bin/python
# Enable or disable the use of NMEA.
import ubx
import struct
import calendar
import os
import gobject
import logging
import sys
import socket
import time
loop = gobject.MainLoop()
def callback(ty, packet):
print("callback %s" % repr([ty, packet]))
if ty == "CFG-PRT":
if sys.argv[1] == "on":
# NMEA and UBX
packet[0]["In_proto_mask"] = 1 + 2
packet[0]["Out_proto_mask"] = 1 + 2
else:
# only UBX
packet[0]["In_proto_mask"] = 1
packet[0]["Out_proto_mask"] = 1
t.send("CFG-PRT", 20, packet)
elif ty == "ACK-ACK":
loop.quit()
return True
assert len(sys.argv) == 2
t = ubx.Parser(callback)
t.send("CFG-PRT", 0, [])
loop.run()
<commit_msg>Fix the script. It did not work at all before.<commit_after>#!/usr/bin/python
# Enable or disable the use of NMEA.
import ubx
import struct
import calendar
import os
import gobject
import logging
import sys
import socket
import time
loop = gobject.MainLoop()
def callback(ty, packet):
print("callback %s" % repr([ty, packet]))
if ty == "CFG-PRT":
if sys.argv[1] == "on":
# NMEA and UBX
packet[1]["In_proto_mask"] = 1 + 2
packet[1]["Out_proto_mask"] = 1 + 2
else:
# only UBX
packet[1]["In_proto_mask"] = 1
packet[1]["Out_proto_mask"] = 1
t.send("CFG-PRT", 20, packet)
elif ty == "ACK-ACK":
loop.quit()
return True
assert len(sys.argv) == 2
t = ubx.Parser(callback)
t.send("CFG-PRT", 0, [])
loop.run()
|
a3084c3672a2dcfdadf22535540e9f843feb3561
|
busstops/management/commands/enhance_ni_stops.py
|
busstops/management/commands/enhance_ni_stops.py
|
"""Usage:
./manage.py enhance_ni_stops
"""
import requests
from time import sleep
from django.core.management.base import BaseCommand
from ...models import StopPoint
SESSION = requests.Session()
class Command(BaseCommand):
def handle(self, *args, **options):
for stop in StopPoint.objects.filter(atco_code__startswith='7000', service__current=True,
town=''):
response = SESSION.get('http://nominatim.openstreetmap.org/reverse', params={
'format': 'json',
'lon': stop.latlong.x,
'lat': stop.latlong.y
}).json()
print(stop.atco_code)
print(response)
stop.street = response['address']['road']
stop.town = response['address'].get('locality', '')
stop.save()
sleep(1)
|
Add reverse geocode command for NI
|
Add reverse geocode command for NI
|
Python
|
mpl-2.0
|
stev-0/bustimes.org.uk,jclgoodwin/bustimes.org.uk,jclgoodwin/bustimes.org.uk,stev-0/bustimes.org.uk,stev-0/bustimes.org.uk,stev-0/bustimes.org.uk,jclgoodwin/bustimes.org.uk,jclgoodwin/bustimes.org.uk,stev-0/bustimes.org.uk
|
Add reverse geocode command for NI
|
"""Usage:
./manage.py enhance_ni_stops
"""
import requests
from time import sleep
from django.core.management.base import BaseCommand
from ...models import StopPoint
SESSION = requests.Session()
class Command(BaseCommand):
def handle(self, *args, **options):
for stop in StopPoint.objects.filter(atco_code__startswith='7000', service__current=True,
town=''):
response = SESSION.get('http://nominatim.openstreetmap.org/reverse', params={
'format': 'json',
'lon': stop.latlong.x,
'lat': stop.latlong.y
}).json()
print(stop.atco_code)
print(response)
stop.street = response['address']['road']
stop.town = response['address'].get('locality', '')
stop.save()
sleep(1)
|
<commit_before><commit_msg>Add reverse geocode command for NI<commit_after>
|
"""Usage:
./manage.py enhance_ni_stops
"""
import requests
from time import sleep
from django.core.management.base import BaseCommand
from ...models import StopPoint
SESSION = requests.Session()
class Command(BaseCommand):
def handle(self, *args, **options):
for stop in StopPoint.objects.filter(atco_code__startswith='7000', service__current=True,
town=''):
response = SESSION.get('http://nominatim.openstreetmap.org/reverse', params={
'format': 'json',
'lon': stop.latlong.x,
'lat': stop.latlong.y
}).json()
print(stop.atco_code)
print(response)
stop.street = response['address']['road']
stop.town = response['address'].get('locality', '')
stop.save()
sleep(1)
|
Add reverse geocode command for NI"""Usage:
./manage.py enhance_ni_stops
"""
import requests
from time import sleep
from django.core.management.base import BaseCommand
from ...models import StopPoint
SESSION = requests.Session()
class Command(BaseCommand):
def handle(self, *args, **options):
for stop in StopPoint.objects.filter(atco_code__startswith='7000', service__current=True,
town=''):
response = SESSION.get('http://nominatim.openstreetmap.org/reverse', params={
'format': 'json',
'lon': stop.latlong.x,
'lat': stop.latlong.y
}).json()
print(stop.atco_code)
print(response)
stop.street = response['address']['road']
stop.town = response['address'].get('locality', '')
stop.save()
sleep(1)
|
<commit_before><commit_msg>Add reverse geocode command for NI<commit_after>"""Usage:
./manage.py enhance_ni_stops
"""
import requests
from time import sleep
from django.core.management.base import BaseCommand
from ...models import StopPoint
SESSION = requests.Session()
class Command(BaseCommand):
def handle(self, *args, **options):
for stop in StopPoint.objects.filter(atco_code__startswith='7000', service__current=True,
town=''):
response = SESSION.get('http://nominatim.openstreetmap.org/reverse', params={
'format': 'json',
'lon': stop.latlong.x,
'lat': stop.latlong.y
}).json()
print(stop.atco_code)
print(response)
stop.street = response['address']['road']
stop.town = response['address'].get('locality', '')
stop.save()
sleep(1)
|
|
88b2011e0f337f1d91192dc62cb051590914ab8e
|
globOpt/scripts/normal_distr.py
|
globOpt/scripts/normal_distr.py
|
import healpy as hp
import numpy as np
import matplotlib.pyplot as plt
def load_ply(path):
lines = []
verts = []
norms = []
f = open(path, "r")
for line in f:
lines.append(line)
if (lines[0] != "ply\n"):
return 0
i = 1
#get number of vertices
while (lines[i].split()[0] != 'element'):
i += 1
if (lines[i].split()[1] == 'vertex'):
nbV = int(lines[i].split()[2])
print str(nbV) + " vertices"
i += 1
#count number of properties: if 3 no normals, if 6 normals
nbP = 0
# while (lines[i].split()[0] == 'property'):
# nbP += 1
# i += 1
#if ((lines[i].split()[0] == "element") & (lines[i].split()[1] == "face")):
# nbF = int(lines[i].split()[2])
# print str(nbF) + " faces"
while (lines[i].split()[0] != 'end_header'):
i += 1
vstart = i + 1
invertedIndex = [[] for x in xrange(nbV)]
#read vertices and normals
for i in range(vstart,vstart+nbV):
vals = lines[i].split()
flist = map(float, vals)
verts.append(flist[0:3])
#if (nbP > 3):
norms.append(flist[0:3])
#if (nbP > 6):
# curvatures.append(flist[6])
f.close()
return verts, np.swapaxes(norms,0,1)
def normalized(a):
return a / np.linalg.norm(a, 2, 0)
nside = 32
verts, norms = load_ply( "/export/home/kandinsky/nmellado/git/globOpt/data/scenes-paper/bu_lansFull/cloudRGBNormal_patches_gaussSphere.ply")
mmap = np.zeros((hp.nside2npix(nside)))
mmap[ hp.vec2pix(nside, norms[0], norms[1], norms[2]) ] += 1
mmap = mmap
hp.cartview(mmap, nest=True, title="Mollview image NESTED")
plt.savefig('out_patches.png')
|
Add script to display normal distribution
|
Add script to display normal distribution
|
Python
|
apache-2.0
|
amonszpart/globOpt,NUAAXXY/globOpt,NUAAXXY/globOpt,amonszpart/globOpt,amonszpart/globOpt,amonszpart/globOpt,NUAAXXY/globOpt,amonszpart/globOpt,NUAAXXY/globOpt,NUAAXXY/globOpt,amonszpart/globOpt,NUAAXXY/globOpt
|
Add script to display normal distribution
|
import healpy as hp
import numpy as np
import matplotlib.pyplot as plt
def load_ply(path):
lines = []
verts = []
norms = []
f = open(path, "r")
for line in f:
lines.append(line)
if (lines[0] != "ply\n"):
return 0
i = 1
#get number of vertices
while (lines[i].split()[0] != 'element'):
i += 1
if (lines[i].split()[1] == 'vertex'):
nbV = int(lines[i].split()[2])
print str(nbV) + " vertices"
i += 1
#count number of properties: if 3 no normals, if 6 normals
nbP = 0
# while (lines[i].split()[0] == 'property'):
# nbP += 1
# i += 1
#if ((lines[i].split()[0] == "element") & (lines[i].split()[1] == "face")):
# nbF = int(lines[i].split()[2])
# print str(nbF) + " faces"
while (lines[i].split()[0] != 'end_header'):
i += 1
vstart = i + 1
invertedIndex = [[] for x in xrange(nbV)]
#read vertices and normals
for i in range(vstart,vstart+nbV):
vals = lines[i].split()
flist = map(float, vals)
verts.append(flist[0:3])
#if (nbP > 3):
norms.append(flist[0:3])
#if (nbP > 6):
# curvatures.append(flist[6])
f.close()
return verts, np.swapaxes(norms,0,1)
def normalized(a):
return a / np.linalg.norm(a, 2, 0)
nside = 32
verts, norms = load_ply( "/export/home/kandinsky/nmellado/git/globOpt/data/scenes-paper/bu_lansFull/cloudRGBNormal_patches_gaussSphere.ply")
mmap = np.zeros((hp.nside2npix(nside)))
mmap[ hp.vec2pix(nside, norms[0], norms[1], norms[2]) ] += 1
mmap = mmap
hp.cartview(mmap, nest=True, title="Mollview image NESTED")
plt.savefig('out_patches.png')
|
<commit_before><commit_msg>Add script to display normal distribution<commit_after>
|
import healpy as hp
import numpy as np
import matplotlib.pyplot as plt
def load_ply(path):
lines = []
verts = []
norms = []
f = open(path, "r")
for line in f:
lines.append(line)
if (lines[0] != "ply\n"):
return 0
i = 1
#get number of vertices
while (lines[i].split()[0] != 'element'):
i += 1
if (lines[i].split()[1] == 'vertex'):
nbV = int(lines[i].split()[2])
print str(nbV) + " vertices"
i += 1
#count number of properties: if 3 no normals, if 6 normals
nbP = 0
# while (lines[i].split()[0] == 'property'):
# nbP += 1
# i += 1
#if ((lines[i].split()[0] == "element") & (lines[i].split()[1] == "face")):
# nbF = int(lines[i].split()[2])
# print str(nbF) + " faces"
while (lines[i].split()[0] != 'end_header'):
i += 1
vstart = i + 1
invertedIndex = [[] for x in xrange(nbV)]
#read vertices and normals
for i in range(vstart,vstart+nbV):
vals = lines[i].split()
flist = map(float, vals)
verts.append(flist[0:3])
#if (nbP > 3):
norms.append(flist[0:3])
#if (nbP > 6):
# curvatures.append(flist[6])
f.close()
return verts, np.swapaxes(norms,0,1)
def normalized(a):
return a / np.linalg.norm(a, 2, 0)
nside = 32
verts, norms = load_ply( "/export/home/kandinsky/nmellado/git/globOpt/data/scenes-paper/bu_lansFull/cloudRGBNormal_patches_gaussSphere.ply")
mmap = np.zeros((hp.nside2npix(nside)))
mmap[ hp.vec2pix(nside, norms[0], norms[1], norms[2]) ] += 1
mmap = mmap
hp.cartview(mmap, nest=True, title="Mollview image NESTED")
plt.savefig('out_patches.png')
|
Add script to display normal distribution
import healpy as hp
import numpy as np
import matplotlib.pyplot as plt
def load_ply(path):
lines = []
verts = []
norms = []
f = open(path, "r")
for line in f:
lines.append(line)
if (lines[0] != "ply\n"):
return 0
i = 1
#get number of vertices
while (lines[i].split()[0] != 'element'):
i += 1
if (lines[i].split()[1] == 'vertex'):
nbV = int(lines[i].split()[2])
print str(nbV) + " vertices"
i += 1
#count number of properties: if 3 no normals, if 6 normals
nbP = 0
# while (lines[i].split()[0] == 'property'):
# nbP += 1
# i += 1
#if ((lines[i].split()[0] == "element") & (lines[i].split()[1] == "face")):
# nbF = int(lines[i].split()[2])
# print str(nbF) + " faces"
while (lines[i].split()[0] != 'end_header'):
i += 1
vstart = i + 1
invertedIndex = [[] for x in xrange(nbV)]
#read vertices and normals
for i in range(vstart,vstart+nbV):
vals = lines[i].split()
flist = map(float, vals)
verts.append(flist[0:3])
#if (nbP > 3):
norms.append(flist[0:3])
#if (nbP > 6):
# curvatures.append(flist[6])
f.close()
return verts, np.swapaxes(norms,0,1)
def normalized(a):
return a / np.linalg.norm(a, 2, 0)
nside = 32
verts, norms = load_ply( "/export/home/kandinsky/nmellado/git/globOpt/data/scenes-paper/bu_lansFull/cloudRGBNormal_patches_gaussSphere.ply")
mmap = np.zeros((hp.nside2npix(nside)))
mmap[ hp.vec2pix(nside, norms[0], norms[1], norms[2]) ] += 1
mmap = mmap
hp.cartview(mmap, nest=True, title="Mollview image NESTED")
plt.savefig('out_patches.png')
|
<commit_before><commit_msg>Add script to display normal distribution<commit_after>
import healpy as hp
import numpy as np
import matplotlib.pyplot as plt
def load_ply(path):
lines = []
verts = []
norms = []
f = open(path, "r")
for line in f:
lines.append(line)
if (lines[0] != "ply\n"):
return 0
i = 1
#get number of vertices
while (lines[i].split()[0] != 'element'):
i += 1
if (lines[i].split()[1] == 'vertex'):
nbV = int(lines[i].split()[2])
print str(nbV) + " vertices"
i += 1
#count number of properties: if 3 no normals, if 6 normals
nbP = 0
# while (lines[i].split()[0] == 'property'):
# nbP += 1
# i += 1
#if ((lines[i].split()[0] == "element") & (lines[i].split()[1] == "face")):
# nbF = int(lines[i].split()[2])
# print str(nbF) + " faces"
while (lines[i].split()[0] != 'end_header'):
i += 1
vstart = i + 1
invertedIndex = [[] for x in xrange(nbV)]
#read vertices and normals
for i in range(vstart,vstart+nbV):
vals = lines[i].split()
flist = map(float, vals)
verts.append(flist[0:3])
#if (nbP > 3):
norms.append(flist[0:3])
#if (nbP > 6):
# curvatures.append(flist[6])
f.close()
return verts, np.swapaxes(norms,0,1)
def normalized(a):
return a / np.linalg.norm(a, 2, 0)
nside = 32
verts, norms = load_ply( "/export/home/kandinsky/nmellado/git/globOpt/data/scenes-paper/bu_lansFull/cloudRGBNormal_patches_gaussSphere.ply")
mmap = np.zeros((hp.nside2npix(nside)))
mmap[ hp.vec2pix(nside, norms[0], norms[1], norms[2]) ] += 1
mmap = mmap
hp.cartview(mmap, nest=True, title="Mollview image NESTED")
plt.savefig('out_patches.png')
|
|
c786ae11e5dfb6166e3d94a64ac5efed5be2de78
|
DataWrangling/stream_csv.py
|
DataWrangling/stream_csv.py
|
"""Use a generator with numpy loadtxt"""
import numpy as np
def file_stream(file_name):
for line in open(file_name):
yield line
def demo_use():
file_name = 'census_abs2011_summary.csv'
# Skip the header, return 3 columns: the age, weekly rental and total family income
data = np.loadtxt(file_stream(file_name), delimiter=',', usecols=(0, 2, 3), skiprows=1)
print('the loaded data')
print(data.shape)
print(data)
if __name__ == '__main__':
demo_use()
|
Use a generator with numpy.loadtxt
|
Use a generator with numpy.loadtxt
|
Python
|
apache-2.0
|
chengsoonong/didbits
|
Use a generator with numpy.loadtxt
|
"""Use a generator with numpy loadtxt"""
import numpy as np
def file_stream(file_name):
for line in open(file_name):
yield line
def demo_use():
file_name = 'census_abs2011_summary.csv'
# Skip the header, return 3 columns: the age, weekly rental and total family income
data = np.loadtxt(file_stream(file_name), delimiter=',', usecols=(0, 2, 3), skiprows=1)
print('the loaded data')
print(data.shape)
print(data)
if __name__ == '__main__':
demo_use()
|
<commit_before><commit_msg>Use a generator with numpy.loadtxt<commit_after>
|
"""Use a generator with numpy loadtxt"""
import numpy as np
def file_stream(file_name):
for line in open(file_name):
yield line
def demo_use():
file_name = 'census_abs2011_summary.csv'
# Skip the header, return 3 columns: the age, weekly rental and total family income
data = np.loadtxt(file_stream(file_name), delimiter=',', usecols=(0, 2, 3), skiprows=1)
print('the loaded data')
print(data.shape)
print(data)
if __name__ == '__main__':
demo_use()
|
Use a generator with numpy.loadtxt"""Use a generator with numpy loadtxt"""
import numpy as np
def file_stream(file_name):
for line in open(file_name):
yield line
def demo_use():
file_name = 'census_abs2011_summary.csv'
# Skip the header, return 3 columns: the age, weekly rental and total family income
data = np.loadtxt(file_stream(file_name), delimiter=',', usecols=(0, 2, 3), skiprows=1)
print('the loaded data')
print(data.shape)
print(data)
if __name__ == '__main__':
demo_use()
|
<commit_before><commit_msg>Use a generator with numpy.loadtxt<commit_after>"""Use a generator with numpy loadtxt"""
import numpy as np
def file_stream(file_name):
for line in open(file_name):
yield line
def demo_use():
file_name = 'census_abs2011_summary.csv'
# Skip the header, return 3 columns: the age, weekly rental and total family income
data = np.loadtxt(file_stream(file_name), delimiter=',', usecols=(0, 2, 3), skiprows=1)
print('the loaded data')
print(data.shape)
print(data)
if __name__ == '__main__':
demo_use()
|
|
73b6f640d74f274ad00bfcdedbe9483d67db9936
|
larger_embedding.py
|
larger_embedding.py
|
"""
Embed motif in larger network
"""
import copy
import numpy as np
import networkx as nx
import scipy.stats as scis
import matplotlib.pyplot as plt
from tqdm import trange
from system import SDESystem
from solver import solve_system
from filters import filter_steady_state
def get_system(N, v_in=5, D=1):
assert N >= 3, 'Cannot add FFL'
graph = nx.barabasi_albert_graph(N, 1)
graph.add_edges_from([(0,1),(1,2),(0,2)]) # add FFL
jacobian = np.asarray(nx.to_numpy_matrix(graph))
np.fill_diagonal(jacobian, -1)
external_influence = np.ones(N) * v_in/N
fluctuation_vector = np.ones(N) * D/N
initial_state = np.ones(N)
# drive FFL
external_influence[0] = v_in
fluctuation_vector[0] = D
system = SDESystem(
jacobian, fluctuation_vector,
external_influence, initial_state)
return system
def simulate_system(sde_system, reps=100):
ode_system = copy.copy(sde_system)
ode_system.fluctuation_vector = np.zeros(sde_system.fluctuation_vector.shape)
corr_mats = []
for _ in trange(reps):
sde_sol = solve_system(sde_system)
ode_sol = solve_system(ode_system)
sol = ode_sol - sde_sol
sol_extract = sol.T[int(len(sol.T)*3/4):] # extract steady-state
plt.plot(sol_extract)
plt.show()
exit()
# if filter_steady_state(ode_sol.T[int(len(ode_sol.T)*3/4):]):
# continue
# compute correlations
dim = sol_extract.shape[1]
mat = np.empty((dim,dim))
for i in range(dim):
for j in range(dim):
xs, ys = sol_extract[:,i], sol_extract[:,j]
cc, pval = scis.pearsonr(xs, ys)
mat[i,j] = cc
corr_mats.append(mat)
return np.asarray(corr_mats)
def main():
syst = get_system(10)
corr_mats = simulate_system(syst)
import ipdb; ipdb.set_trace()
if __name__ == '__main__':
main()
|
Add unstable embedding in larger network
|
Add unstable embedding in larger network
|
Python
|
mit
|
kpj/SDEMotif,kpj/SDEMotif
|
Add unstable embedding in larger network
|
"""
Embed motif in larger network
"""
import copy
import numpy as np
import networkx as nx
import scipy.stats as scis
import matplotlib.pyplot as plt
from tqdm import trange
from system import SDESystem
from solver import solve_system
from filters import filter_steady_state
def get_system(N, v_in=5, D=1):
assert N >= 3, 'Cannot add FFL'
graph = nx.barabasi_albert_graph(N, 1)
graph.add_edges_from([(0,1),(1,2),(0,2)]) # add FFL
jacobian = np.asarray(nx.to_numpy_matrix(graph))
np.fill_diagonal(jacobian, -1)
external_influence = np.ones(N) * v_in/N
fluctuation_vector = np.ones(N) * D/N
initial_state = np.ones(N)
# drive FFL
external_influence[0] = v_in
fluctuation_vector[0] = D
system = SDESystem(
jacobian, fluctuation_vector,
external_influence, initial_state)
return system
def simulate_system(sde_system, reps=100):
ode_system = copy.copy(sde_system)
ode_system.fluctuation_vector = np.zeros(sde_system.fluctuation_vector.shape)
corr_mats = []
for _ in trange(reps):
sde_sol = solve_system(sde_system)
ode_sol = solve_system(ode_system)
sol = ode_sol - sde_sol
sol_extract = sol.T[int(len(sol.T)*3/4):] # extract steady-state
plt.plot(sol_extract)
plt.show()
exit()
# if filter_steady_state(ode_sol.T[int(len(ode_sol.T)*3/4):]):
# continue
# compute correlations
dim = sol_extract.shape[1]
mat = np.empty((dim,dim))
for i in range(dim):
for j in range(dim):
xs, ys = sol_extract[:,i], sol_extract[:,j]
cc, pval = scis.pearsonr(xs, ys)
mat[i,j] = cc
corr_mats.append(mat)
return np.asarray(corr_mats)
def main():
syst = get_system(10)
corr_mats = simulate_system(syst)
import ipdb; ipdb.set_trace()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add unstable embedding in larger network<commit_after>
|
"""
Embed motif in larger network
"""
import copy
import numpy as np
import networkx as nx
import scipy.stats as scis
import matplotlib.pyplot as plt
from tqdm import trange
from system import SDESystem
from solver import solve_system
from filters import filter_steady_state
def get_system(N, v_in=5, D=1):
assert N >= 3, 'Cannot add FFL'
graph = nx.barabasi_albert_graph(N, 1)
graph.add_edges_from([(0,1),(1,2),(0,2)]) # add FFL
jacobian = np.asarray(nx.to_numpy_matrix(graph))
np.fill_diagonal(jacobian, -1)
external_influence = np.ones(N) * v_in/N
fluctuation_vector = np.ones(N) * D/N
initial_state = np.ones(N)
# drive FFL
external_influence[0] = v_in
fluctuation_vector[0] = D
system = SDESystem(
jacobian, fluctuation_vector,
external_influence, initial_state)
return system
def simulate_system(sde_system, reps=100):
ode_system = copy.copy(sde_system)
ode_system.fluctuation_vector = np.zeros(sde_system.fluctuation_vector.shape)
corr_mats = []
for _ in trange(reps):
sde_sol = solve_system(sde_system)
ode_sol = solve_system(ode_system)
sol = ode_sol - sde_sol
sol_extract = sol.T[int(len(sol.T)*3/4):] # extract steady-state
plt.plot(sol_extract)
plt.show()
exit()
# if filter_steady_state(ode_sol.T[int(len(ode_sol.T)*3/4):]):
# continue
# compute correlations
dim = sol_extract.shape[1]
mat = np.empty((dim,dim))
for i in range(dim):
for j in range(dim):
xs, ys = sol_extract[:,i], sol_extract[:,j]
cc, pval = scis.pearsonr(xs, ys)
mat[i,j] = cc
corr_mats.append(mat)
return np.asarray(corr_mats)
def main():
syst = get_system(10)
corr_mats = simulate_system(syst)
import ipdb; ipdb.set_trace()
if __name__ == '__main__':
main()
|
Add unstable embedding in larger network"""
Embed motif in larger network
"""
import copy
import numpy as np
import networkx as nx
import scipy.stats as scis
import matplotlib.pyplot as plt
from tqdm import trange
from system import SDESystem
from solver import solve_system
from filters import filter_steady_state
def get_system(N, v_in=5, D=1):
assert N >= 3, 'Cannot add FFL'
graph = nx.barabasi_albert_graph(N, 1)
graph.add_edges_from([(0,1),(1,2),(0,2)]) # add FFL
jacobian = np.asarray(nx.to_numpy_matrix(graph))
np.fill_diagonal(jacobian, -1)
external_influence = np.ones(N) * v_in/N
fluctuation_vector = np.ones(N) * D/N
initial_state = np.ones(N)
# drive FFL
external_influence[0] = v_in
fluctuation_vector[0] = D
system = SDESystem(
jacobian, fluctuation_vector,
external_influence, initial_state)
return system
def simulate_system(sde_system, reps=100):
ode_system = copy.copy(sde_system)
ode_system.fluctuation_vector = np.zeros(sde_system.fluctuation_vector.shape)
corr_mats = []
for _ in trange(reps):
sde_sol = solve_system(sde_system)
ode_sol = solve_system(ode_system)
sol = ode_sol - sde_sol
sol_extract = sol.T[int(len(sol.T)*3/4):] # extract steady-state
plt.plot(sol_extract)
plt.show()
exit()
# if filter_steady_state(ode_sol.T[int(len(ode_sol.T)*3/4):]):
# continue
# compute correlations
dim = sol_extract.shape[1]
mat = np.empty((dim,dim))
for i in range(dim):
for j in range(dim):
xs, ys = sol_extract[:,i], sol_extract[:,j]
cc, pval = scis.pearsonr(xs, ys)
mat[i,j] = cc
corr_mats.append(mat)
return np.asarray(corr_mats)
def main():
syst = get_system(10)
corr_mats = simulate_system(syst)
import ipdb; ipdb.set_trace()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add unstable embedding in larger network<commit_after>"""
Embed motif in larger network
"""
import copy
import numpy as np
import networkx as nx
import scipy.stats as scis
import matplotlib.pyplot as plt
from tqdm import trange
from system import SDESystem
from solver import solve_system
from filters import filter_steady_state
def get_system(N, v_in=5, D=1):
assert N >= 3, 'Cannot add FFL'
graph = nx.barabasi_albert_graph(N, 1)
graph.add_edges_from([(0,1),(1,2),(0,2)]) # add FFL
jacobian = np.asarray(nx.to_numpy_matrix(graph))
np.fill_diagonal(jacobian, -1)
external_influence = np.ones(N) * v_in/N
fluctuation_vector = np.ones(N) * D/N
initial_state = np.ones(N)
# drive FFL
external_influence[0] = v_in
fluctuation_vector[0] = D
system = SDESystem(
jacobian, fluctuation_vector,
external_influence, initial_state)
return system
def simulate_system(sde_system, reps=100):
ode_system = copy.copy(sde_system)
ode_system.fluctuation_vector = np.zeros(sde_system.fluctuation_vector.shape)
corr_mats = []
for _ in trange(reps):
sde_sol = solve_system(sde_system)
ode_sol = solve_system(ode_system)
sol = ode_sol - sde_sol
sol_extract = sol.T[int(len(sol.T)*3/4):] # extract steady-state
plt.plot(sol_extract)
plt.show()
exit()
# if filter_steady_state(ode_sol.T[int(len(ode_sol.T)*3/4):]):
# continue
# compute correlations
dim = sol_extract.shape[1]
mat = np.empty((dim,dim))
for i in range(dim):
for j in range(dim):
xs, ys = sol_extract[:,i], sol_extract[:,j]
cc, pval = scis.pearsonr(xs, ys)
mat[i,j] = cc
corr_mats.append(mat)
return np.asarray(corr_mats)
def main():
syst = get_system(10)
corr_mats = simulate_system(syst)
import ipdb; ipdb.set_trace()
if __name__ == '__main__':
main()
|
|
a0a8e481eaaff042ad43ae3f40110f215b144c26
|
auth0/v2/test/test_jobs.py
|
auth0/v2/test/test_jobs.py
|
import unittest
import mock
from ..jobs import Jobs
class TestJobs(unittest.TestCase):
@mock.patch('auth0.v2.jobs.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', jwt_token='jwttoken')
j.get('an-id')
mock_instance.get.assert_called_with(
'https://domain/api/v2/jobs/an-id',
)
@mock.patch('auth0.v2.jobs.RestClient')
def test_import_users(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', jwt_token='jwttoken')
j.import_users(connection_id='1234', file_obj={})
mock_instance.file_post.assert_called_with(
'https://domain/api/v2/jobs/users-imports',
data={'connection_id': '1234'},
files={'users': {}}
)
@mock.patch('auth0.v2.jobs.RestClient')
def test_verification_email(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', jwt_token='jwttoken')
j.send_verification_email({'a': 'b', 'c': 'd'})
mock_instance.post.assert_called_with(
'https://domain/api/v2/jobs/verification-email',
data={'a': 'b', 'c': 'd'}
)
|
Add unit tests for Jobs()
|
Add unit tests for Jobs()
|
Python
|
mit
|
auth0/auth0-python,auth0/auth0-python
|
Add unit tests for Jobs()
|
import unittest
import mock
from ..jobs import Jobs
class TestJobs(unittest.TestCase):
@mock.patch('auth0.v2.jobs.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', jwt_token='jwttoken')
j.get('an-id')
mock_instance.get.assert_called_with(
'https://domain/api/v2/jobs/an-id',
)
@mock.patch('auth0.v2.jobs.RestClient')
def test_import_users(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', jwt_token='jwttoken')
j.import_users(connection_id='1234', file_obj={})
mock_instance.file_post.assert_called_with(
'https://domain/api/v2/jobs/users-imports',
data={'connection_id': '1234'},
files={'users': {}}
)
@mock.patch('auth0.v2.jobs.RestClient')
def test_verification_email(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', jwt_token='jwttoken')
j.send_verification_email({'a': 'b', 'c': 'd'})
mock_instance.post.assert_called_with(
'https://domain/api/v2/jobs/verification-email',
data={'a': 'b', 'c': 'd'}
)
|
<commit_before><commit_msg>Add unit tests for Jobs()<commit_after>
|
import unittest
import mock
from ..jobs import Jobs
class TestJobs(unittest.TestCase):
@mock.patch('auth0.v2.jobs.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', jwt_token='jwttoken')
j.get('an-id')
mock_instance.get.assert_called_with(
'https://domain/api/v2/jobs/an-id',
)
@mock.patch('auth0.v2.jobs.RestClient')
def test_import_users(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', jwt_token='jwttoken')
j.import_users(connection_id='1234', file_obj={})
mock_instance.file_post.assert_called_with(
'https://domain/api/v2/jobs/users-imports',
data={'connection_id': '1234'},
files={'users': {}}
)
@mock.patch('auth0.v2.jobs.RestClient')
def test_verification_email(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', jwt_token='jwttoken')
j.send_verification_email({'a': 'b', 'c': 'd'})
mock_instance.post.assert_called_with(
'https://domain/api/v2/jobs/verification-email',
data={'a': 'b', 'c': 'd'}
)
|
Add unit tests for Jobs()import unittest
import mock
from ..jobs import Jobs
class TestJobs(unittest.TestCase):
@mock.patch('auth0.v2.jobs.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', jwt_token='jwttoken')
j.get('an-id')
mock_instance.get.assert_called_with(
'https://domain/api/v2/jobs/an-id',
)
@mock.patch('auth0.v2.jobs.RestClient')
def test_import_users(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', jwt_token='jwttoken')
j.import_users(connection_id='1234', file_obj={})
mock_instance.file_post.assert_called_with(
'https://domain/api/v2/jobs/users-imports',
data={'connection_id': '1234'},
files={'users': {}}
)
@mock.patch('auth0.v2.jobs.RestClient')
def test_verification_email(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', jwt_token='jwttoken')
j.send_verification_email({'a': 'b', 'c': 'd'})
mock_instance.post.assert_called_with(
'https://domain/api/v2/jobs/verification-email',
data={'a': 'b', 'c': 'd'}
)
|
<commit_before><commit_msg>Add unit tests for Jobs()<commit_after>import unittest
import mock
from ..jobs import Jobs
class TestJobs(unittest.TestCase):
@mock.patch('auth0.v2.jobs.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', jwt_token='jwttoken')
j.get('an-id')
mock_instance.get.assert_called_with(
'https://domain/api/v2/jobs/an-id',
)
@mock.patch('auth0.v2.jobs.RestClient')
def test_import_users(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', jwt_token='jwttoken')
j.import_users(connection_id='1234', file_obj={})
mock_instance.file_post.assert_called_with(
'https://domain/api/v2/jobs/users-imports',
data={'connection_id': '1234'},
files={'users': {}}
)
@mock.patch('auth0.v2.jobs.RestClient')
def test_verification_email(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', jwt_token='jwttoken')
j.send_verification_email({'a': 'b', 'c': 'd'})
mock_instance.post.assert_called_with(
'https://domain/api/v2/jobs/verification-email',
data={'a': 'b', 'c': 'd'}
)
|
|
1e867909e241777ab75abf19a160b83de50160d2
|
migrations/versions/34c2049aaee2_add_indexes_on_ads_for_numero_and_insee.py
|
migrations/versions/34c2049aaee2_add_indexes_on_ads_for_numero_and_insee.py
|
"""Add indexes on ADS for numero and insee
Revision ID: 34c2049aaee2
Revises: e187aca7c77a
Create Date: 2019-10-21 16:35:48.431148
"""
# revision identifiers, used by Alembic.
revision = '34c2049aaee2'
down_revision = 'e187aca7c77a'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_index('ads_insee_index', 'ADS', ['insee'], unique=False)
op.create_index('ads_numero_index', 'ADS', ['numero'], unique=False)
def downgrade():
op.drop_index('ads_numero_index', table_name='ADS')
op.drop_index('ads_insee_index', table_name='ADS')
|
Add migrations for indexes on ADS
|
Add migrations for indexes on ADS
|
Python
|
agpl-3.0
|
openmaraude/APITaxi,openmaraude/APITaxi
|
Add migrations for indexes on ADS
|
"""Add indexes on ADS for numero and insee
Revision ID: 34c2049aaee2
Revises: e187aca7c77a
Create Date: 2019-10-21 16:35:48.431148
"""
# revision identifiers, used by Alembic.
revision = '34c2049aaee2'
down_revision = 'e187aca7c77a'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_index('ads_insee_index', 'ADS', ['insee'], unique=False)
op.create_index('ads_numero_index', 'ADS', ['numero'], unique=False)
def downgrade():
op.drop_index('ads_numero_index', table_name='ADS')
op.drop_index('ads_insee_index', table_name='ADS')
|
<commit_before><commit_msg>Add migrations for indexes on ADS<commit_after>
|
"""Add indexes on ADS for numero and insee
Revision ID: 34c2049aaee2
Revises: e187aca7c77a
Create Date: 2019-10-21 16:35:48.431148
"""
# revision identifiers, used by Alembic.
revision = '34c2049aaee2'
down_revision = 'e187aca7c77a'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_index('ads_insee_index', 'ADS', ['insee'], unique=False)
op.create_index('ads_numero_index', 'ADS', ['numero'], unique=False)
def downgrade():
op.drop_index('ads_numero_index', table_name='ADS')
op.drop_index('ads_insee_index', table_name='ADS')
|
Add migrations for indexes on ADS"""Add indexes on ADS for numero and insee
Revision ID: 34c2049aaee2
Revises: e187aca7c77a
Create Date: 2019-10-21 16:35:48.431148
"""
# revision identifiers, used by Alembic.
revision = '34c2049aaee2'
down_revision = 'e187aca7c77a'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_index('ads_insee_index', 'ADS', ['insee'], unique=False)
op.create_index('ads_numero_index', 'ADS', ['numero'], unique=False)
def downgrade():
op.drop_index('ads_numero_index', table_name='ADS')
op.drop_index('ads_insee_index', table_name='ADS')
|
<commit_before><commit_msg>Add migrations for indexes on ADS<commit_after>"""Add indexes on ADS for numero and insee
Revision ID: 34c2049aaee2
Revises: e187aca7c77a
Create Date: 2019-10-21 16:35:48.431148
"""
# revision identifiers, used by Alembic.
revision = '34c2049aaee2'
down_revision = 'e187aca7c77a'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_index('ads_insee_index', 'ADS', ['insee'], unique=False)
op.create_index('ads_numero_index', 'ADS', ['numero'], unique=False)
def downgrade():
op.drop_index('ads_numero_index', table_name='ADS')
op.drop_index('ads_insee_index', table_name='ADS')
|
|
8e0c00254997e0223141bf02f599bce3eacca747
|
proc-mem.py
|
proc-mem.py
|
#!/usr/bin/env python
import os
import sys
def show_usage():
sys.stderr.write('''memio - Simple I/O for /proc/<pid>/mem
Dump /proc/<pid>/maps:
memio.py <pid>
Read from or write to (when some input is present on stdin) memory:
memio.py <pid> <start> (<end> | +<size>)
memio.py <pid> <hexrange>
<hexrange> is in /proc/<pid>/maps format (e.g., 00400000-004f2000).
''')
sys.exit(1)
def parse_args():
n_args = len(sys.argv)
start = end = None
pid = int(sys.argv[1], 0)
if n_args == 3:
start, end = map(lambda x: int(x, 16), sys.argv[2].split('-'))
elif n_args == 4:
start = int(sys.argv[2], 0)
if sys.argv[3][0] == '+':
end = start + int(sys.argv[3][1:], 0)
else:
end = int(sys.argv[3], 0)
return pid, start, end
def mem_io_range(pid, start, end, stream, read):
page_size = os.sysconf('SC_PAGE_SIZE')
mode = os.O_RDONLY if read else os.O_WRONLY
fd = os.open('/proc/{}/mem'.format(pid), mode)
os.lseek(fd, start, os.SEEK_SET)
to_do = end - start
while to_do > 0:
chunk_size = min(to_do, page_size)
if read:
data = os.read(fd, chunk_size)
stream.write(data)
else:
data = stream.read(chunk_size)
if not data:
to_do = 0
os.write(fd, data)
to_do -= chunk_size
os.close(fd)
def dump_maps(pid, sink):
with open('/proc/{}/maps'.format(pid)) as maps:
sink.write(maps.read())
def main():
if len(sys.argv) not in (2, 3, 4):
show_usage()
pid, start, end = parse_args()
if start and end:
if sys.stdin.isatty():
mem_io_range(pid, start, end, sys.stdout, True)
else:
mem_io_range(pid, start, end, sys.stdin, False)
else:
dump_maps(pid, sys.stdout)
if __name__ == '__main__':
main()
|
Add process memory usage script
|
Add process memory usage script
|
Python
|
mit
|
voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts
|
Add process memory usage script
|
#!/usr/bin/env python
import os
import sys
def show_usage():
sys.stderr.write('''memio - Simple I/O for /proc/<pid>/mem
Dump /proc/<pid>/maps:
memio.py <pid>
Read from or write to (when some input is present on stdin) memory:
memio.py <pid> <start> (<end> | +<size>)
memio.py <pid> <hexrange>
<hexrange> is in /proc/<pid>/maps format (e.g., 00400000-004f2000).
''')
sys.exit(1)
def parse_args():
n_args = len(sys.argv)
start = end = None
pid = int(sys.argv[1], 0)
if n_args == 3:
start, end = map(lambda x: int(x, 16), sys.argv[2].split('-'))
elif n_args == 4:
start = int(sys.argv[2], 0)
if sys.argv[3][0] == '+':
end = start + int(sys.argv[3][1:], 0)
else:
end = int(sys.argv[3], 0)
return pid, start, end
def mem_io_range(pid, start, end, stream, read):
page_size = os.sysconf('SC_PAGE_SIZE')
mode = os.O_RDONLY if read else os.O_WRONLY
fd = os.open('/proc/{}/mem'.format(pid), mode)
os.lseek(fd, start, os.SEEK_SET)
to_do = end - start
while to_do > 0:
chunk_size = min(to_do, page_size)
if read:
data = os.read(fd, chunk_size)
stream.write(data)
else:
data = stream.read(chunk_size)
if not data:
to_do = 0
os.write(fd, data)
to_do -= chunk_size
os.close(fd)
def dump_maps(pid, sink):
with open('/proc/{}/maps'.format(pid)) as maps:
sink.write(maps.read())
def main():
if len(sys.argv) not in (2, 3, 4):
show_usage()
pid, start, end = parse_args()
if start and end:
if sys.stdin.isatty():
mem_io_range(pid, start, end, sys.stdout, True)
else:
mem_io_range(pid, start, end, sys.stdin, False)
else:
dump_maps(pid, sys.stdout)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add process memory usage script<commit_after>
|
#!/usr/bin/env python
import os
import sys
def show_usage():
sys.stderr.write('''memio - Simple I/O for /proc/<pid>/mem
Dump /proc/<pid>/maps:
memio.py <pid>
Read from or write to (when some input is present on stdin) memory:
memio.py <pid> <start> (<end> | +<size>)
memio.py <pid> <hexrange>
<hexrange> is in /proc/<pid>/maps format (e.g., 00400000-004f2000).
''')
sys.exit(1)
def parse_args():
n_args = len(sys.argv)
start = end = None
pid = int(sys.argv[1], 0)
if n_args == 3:
start, end = map(lambda x: int(x, 16), sys.argv[2].split('-'))
elif n_args == 4:
start = int(sys.argv[2], 0)
if sys.argv[3][0] == '+':
end = start + int(sys.argv[3][1:], 0)
else:
end = int(sys.argv[3], 0)
return pid, start, end
def mem_io_range(pid, start, end, stream, read):
page_size = os.sysconf('SC_PAGE_SIZE')
mode = os.O_RDONLY if read else os.O_WRONLY
fd = os.open('/proc/{}/mem'.format(pid), mode)
os.lseek(fd, start, os.SEEK_SET)
to_do = end - start
while to_do > 0:
chunk_size = min(to_do, page_size)
if read:
data = os.read(fd, chunk_size)
stream.write(data)
else:
data = stream.read(chunk_size)
if not data:
to_do = 0
os.write(fd, data)
to_do -= chunk_size
os.close(fd)
def dump_maps(pid, sink):
with open('/proc/{}/maps'.format(pid)) as maps:
sink.write(maps.read())
def main():
if len(sys.argv) not in (2, 3, 4):
show_usage()
pid, start, end = parse_args()
if start and end:
if sys.stdin.isatty():
mem_io_range(pid, start, end, sys.stdout, True)
else:
mem_io_range(pid, start, end, sys.stdin, False)
else:
dump_maps(pid, sys.stdout)
if __name__ == '__main__':
main()
|
Add process memory usage script#!/usr/bin/env python
import os
import sys
def show_usage():
sys.stderr.write('''memio - Simple I/O for /proc/<pid>/mem
Dump /proc/<pid>/maps:
memio.py <pid>
Read from or write to (when some input is present on stdin) memory:
memio.py <pid> <start> (<end> | +<size>)
memio.py <pid> <hexrange>
<hexrange> is in /proc/<pid>/maps format (e.g., 00400000-004f2000).
''')
sys.exit(1)
def parse_args():
n_args = len(sys.argv)
start = end = None
pid = int(sys.argv[1], 0)
if n_args == 3:
start, end = map(lambda x: int(x, 16), sys.argv[2].split('-'))
elif n_args == 4:
start = int(sys.argv[2], 0)
if sys.argv[3][0] == '+':
end = start + int(sys.argv[3][1:], 0)
else:
end = int(sys.argv[3], 0)
return pid, start, end
def mem_io_range(pid, start, end, stream, read):
page_size = os.sysconf('SC_PAGE_SIZE')
mode = os.O_RDONLY if read else os.O_WRONLY
fd = os.open('/proc/{}/mem'.format(pid), mode)
os.lseek(fd, start, os.SEEK_SET)
to_do = end - start
while to_do > 0:
chunk_size = min(to_do, page_size)
if read:
data = os.read(fd, chunk_size)
stream.write(data)
else:
data = stream.read(chunk_size)
if not data:
to_do = 0
os.write(fd, data)
to_do -= chunk_size
os.close(fd)
def dump_maps(pid, sink):
with open('/proc/{}/maps'.format(pid)) as maps:
sink.write(maps.read())
def main():
if len(sys.argv) not in (2, 3, 4):
show_usage()
pid, start, end = parse_args()
if start and end:
if sys.stdin.isatty():
mem_io_range(pid, start, end, sys.stdout, True)
else:
mem_io_range(pid, start, end, sys.stdin, False)
else:
dump_maps(pid, sys.stdout)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add process memory usage script<commit_after>#!/usr/bin/env python
import os
import sys
def show_usage():
sys.stderr.write('''memio - Simple I/O for /proc/<pid>/mem
Dump /proc/<pid>/maps:
memio.py <pid>
Read from or write to (when some input is present on stdin) memory:
memio.py <pid> <start> (<end> | +<size>)
memio.py <pid> <hexrange>
<hexrange> is in /proc/<pid>/maps format (e.g., 00400000-004f2000).
''')
sys.exit(1)
def parse_args():
n_args = len(sys.argv)
start = end = None
pid = int(sys.argv[1], 0)
if n_args == 3:
start, end = map(lambda x: int(x, 16), sys.argv[2].split('-'))
elif n_args == 4:
start = int(sys.argv[2], 0)
if sys.argv[3][0] == '+':
end = start + int(sys.argv[3][1:], 0)
else:
end = int(sys.argv[3], 0)
return pid, start, end
def mem_io_range(pid, start, end, stream, read):
page_size = os.sysconf('SC_PAGE_SIZE')
mode = os.O_RDONLY if read else os.O_WRONLY
fd = os.open('/proc/{}/mem'.format(pid), mode)
os.lseek(fd, start, os.SEEK_SET)
to_do = end - start
while to_do > 0:
chunk_size = min(to_do, page_size)
if read:
data = os.read(fd, chunk_size)
stream.write(data)
else:
data = stream.read(chunk_size)
if not data:
to_do = 0
os.write(fd, data)
to_do -= chunk_size
os.close(fd)
def dump_maps(pid, sink):
with open('/proc/{}/maps'.format(pid)) as maps:
sink.write(maps.read())
def main():
if len(sys.argv) not in (2, 3, 4):
show_usage()
pid, start, end = parse_args()
if start and end:
if sys.stdin.isatty():
mem_io_range(pid, start, end, sys.stdout, True)
else:
mem_io_range(pid, start, end, sys.stdin, False)
else:
dump_maps(pid, sys.stdout)
if __name__ == '__main__':
main()
|
|
95ab6c0c23f756372a3e34e4386c15b4beaf0aa7
|
tests/UnreachableSymbolsRemove/SimpleTest.py
|
tests/UnreachableSymbolsRemove/SimpleTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import main, TestCase
from grammpy import *
from grammpy_transforms import *
class SimpleTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Add file for simple test of unreachable symbols remove
|
Add file for simple test of unreachable symbols remove
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add file for simple test of unreachable symbols remove
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import main, TestCase
from grammpy import *
from grammpy_transforms import *
class SimpleTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file for simple test of unreachable symbols remove<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import main, TestCase
from grammpy import *
from grammpy_transforms import *
class SimpleTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Add file for simple test of unreachable symbols remove#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import main, TestCase
from grammpy import *
from grammpy_transforms import *
class SimpleTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file for simple test of unreachable symbols remove<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import main, TestCase
from grammpy import *
from grammpy_transforms import *
class SimpleTest(TestCase):
pass
if __name__ == '__main__':
main()
|
|
1bd5e8897991cec7d1c7f5c6a8320415841887e5
|
migrations/versions/0104_more_letter_orgs.py
|
migrations/versions/0104_more_letter_orgs.py
|
"""empty message
Revision ID: 0104_more_letter_orgs
Revises: 0103_add_historical_redact
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0104_more_letter_orgs'
down_revision = '0103_add_historical_redact'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from flask import current_app
def upgrade():
op.execute("""
INSERT INTO dvla_organisation VALUES
('003', 'Department for Work and Pensions'),
('004', 'Government Equalities Office')
""")
def downgrade():
# data migration, no downloads
pass
|
Add more organisations for letter branding
|
Add more organisations for letter branding
> The logos are now ready to go on DVLA side- so far we've got:
> 001 = HM Government
> 002 = OPG
> 003 = DWP
> 004 = GEO
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add more organisations for letter branding
> The logos are now ready to go on DVLA side- so far we've got:
> 001 = HM Government
> 002 = OPG
> 003 = DWP
> 004 = GEO
|
"""empty message
Revision ID: 0104_more_letter_orgs
Revises: 0103_add_historical_redact
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0104_more_letter_orgs'
down_revision = '0103_add_historical_redact'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from flask import current_app
def upgrade():
op.execute("""
INSERT INTO dvla_organisation VALUES
('003', 'Department for Work and Pensions'),
('004', 'Government Equalities Office')
""")
def downgrade():
# data migration, no downloads
pass
|
<commit_before><commit_msg>Add more organisations for letter branding
> The logos are now ready to go on DVLA side- so far we've got:
> 001 = HM Government
> 002 = OPG
> 003 = DWP
> 004 = GEO<commit_after>
|
"""empty message
Revision ID: 0104_more_letter_orgs
Revises: 0103_add_historical_redact
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0104_more_letter_orgs'
down_revision = '0103_add_historical_redact'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from flask import current_app
def upgrade():
op.execute("""
INSERT INTO dvla_organisation VALUES
('003', 'Department for Work and Pensions'),
('004', 'Government Equalities Office')
""")
def downgrade():
# data migration, no downloads
pass
|
Add more organisations for letter branding
> The logos are now ready to go on DVLA side- so far we've got:
> 001 = HM Government
> 002 = OPG
> 003 = DWP
> 004 = GEO"""empty message
Revision ID: 0104_more_letter_orgs
Revises: 0103_add_historical_redact
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0104_more_letter_orgs'
down_revision = '0103_add_historical_redact'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from flask import current_app
def upgrade():
op.execute("""
INSERT INTO dvla_organisation VALUES
('003', 'Department for Work and Pensions'),
('004', 'Government Equalities Office')
""")
def downgrade():
# data migration, no downloads
pass
|
<commit_before><commit_msg>Add more organisations for letter branding
> The logos are now ready to go on DVLA side- so far we've got:
> 001 = HM Government
> 002 = OPG
> 003 = DWP
> 004 = GEO<commit_after>"""empty message
Revision ID: 0104_more_letter_orgs
Revises: 0103_add_historical_redact
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0104_more_letter_orgs'
down_revision = '0103_add_historical_redact'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from flask import current_app
def upgrade():
op.execute("""
INSERT INTO dvla_organisation VALUES
('003', 'Department for Work and Pensions'),
('004', 'Government Equalities Office')
""")
def downgrade():
# data migration, no downloads
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.