commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
11bad7dcf3fa4d9fdf40eee49505fa55dc0243e8
|
src/collectors/users/users.py
|
src/collectors/users/users.py
|
# coding=utf-8
"""
Collects the number of users logged in and shells per user
#### Dependencies
* [pyutmp](http://software.clapper.org/pyutmp/)
"""
import diamond.collector
from pyutmp import UtmpFile
class UsersCollector(diamond.collector.Collector):
def get_default_config_help(self):
"""
Returns the default collector help text
"""
config_help = super(UsersCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(UsersCollector, self).get_default_config()
config.update({
'path': 'users',
'method': 'Threaded',
})
return config
def collect(self):
metrics = {}
metrics['total'] = 0
for utmp in UtmpFile():
if utmp.ut_user_process:
metrics[utmp.ut_user] = metrics.get(utmp.ut_user, 0)+1
metrics['total'] = metrics['total']+1
for metric_name in metrics.keys():
self.publish(metric_name, metrics[metric_name])
return True
|
# coding=utf-8
"""
Collects the number of users logged in and shells per user
#### Dependencies
* [pyutmp](http://software.clapper.org/pyutmp/)
"""
import diamond.collector
from pyutmp import UtmpFile
class UsersCollector(diamond.collector.Collector):
def get_default_config_help(self):
"""
Returns the default collector help text
"""
config_help = super(UsersCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(UsersCollector, self).get_default_config()
config.update({
'path': 'users',
'method': 'Threaded',
'utmp': None,
})
return config
def collect(self):
metrics = {}
metrics['total'] = 0
for utmp in UtmpFile(path=self.config['utmp']):
if utmp.ut_user_process:
metrics[utmp.ut_user] = metrics.get(utmp.ut_user, 0)+1
metrics['total'] = metrics['total']+1
for metric_name in metrics.keys():
self.publish(metric_name, metrics[metric_name])
return True
|
Add in a way to specify the utmp file path for unit testing
|
Add in a way to specify the utmp file path for unit testing
|
Python
|
mit
|
EzyInsights/Diamond,jumping/Diamond,acquia/Diamond,datafiniti/Diamond,thardie/Diamond,hvnsweeting/Diamond,thardie/Diamond,Basis/Diamond,saucelabs/Diamond,mfriedenhagen/Diamond,Ensighten/Diamond,ceph/Diamond,Slach/Diamond,h00dy/Diamond,ceph/Diamond,saucelabs/Diamond,Clever/Diamond,Netuitive/Diamond,sebbrandt87/Diamond,zoidbergwill/Diamond,TAKEALOT/Diamond,jumping/Diamond,Slach/Diamond,stuartbfox/Diamond,CYBERBUGJR/Diamond,h00dy/Diamond,stuartbfox/Diamond,EzyInsights/Diamond,datafiniti/Diamond,Netuitive/Diamond,eMerzh/Diamond-1,eMerzh/Diamond-1,Netuitive/netuitive-diamond,works-mobile/Diamond,MediaMath/Diamond,codepython/Diamond,actmd/Diamond,h00dy/Diamond,mfriedenhagen/Diamond,Ssawa/Diamond,sebbrandt87/Diamond,signalfx/Diamond,disqus/Diamond,jaingaurav/Diamond,anandbhoraskar/Diamond,tuenti/Diamond,Slach/Diamond,mfriedenhagen/Diamond,timchenxiaoyu/Diamond,russss/Diamond,ramjothikumar/Diamond,dcsquared13/Diamond,tellapart/Diamond,CYBERBUGJR/Diamond,TAKEALOT/Diamond,Clever/Diamond,zoidbergwill/Diamond,rtoma/Diamond,jriguera/Diamond,actmd/Diamond,CYBERBUGJR/Diamond,janisz/Diamond-1,anandbhoraskar/Diamond,signalfx/Diamond,eMerzh/Diamond-1,stuartbfox/Diamond,MediaMath/Diamond,russss/Diamond,saucelabs/Diamond,metamx/Diamond,MediaMath/Diamond,Nihn/Diamond-1,Basis/Diamond,timchenxiaoyu/Diamond,works-mobile/Diamond,MichaelDoyle/Diamond,gg7/diamond,cannium/Diamond,datafiniti/Diamond,tuenti/Diamond,janisz/Diamond-1,joel-airspring/Diamond,disqus/Diamond,Precis/Diamond,zoidbergwill/Diamond,rtoma/Diamond,TAKEALOT/Diamond,Ensighten/Diamond,mzupan/Diamond,metamx/Diamond,MichaelDoyle/Diamond,jaingaurav/Diamond,ramjothikumar/Diamond,h00dy/Diamond,actmd/Diamond,stuartbfox/Diamond,thardie/Diamond,tuenti/Diamond,dcsquared13/Diamond,TAKEALOT/Diamond,hvnsweeting/Diamond,zoidbergwill/Diamond,rtoma/Diamond,Slach/Diamond,russss/Diamond,Precis/Diamond,acquia/Diamond,MichaelDoyle/Diamond,skbkontur/Diamond,socialwareinc/Diamond,TinLe/Diamond,tellapart/Diamond,joel-airspring/Diamond,tusharmakkar08/Diamond,socialwareinc/Diamond,ceph/Diamond,hamelg/Diamond,tusharmakkar08/Diamond,joel-airspring/Diamond,python-diamond/Diamond,gg7/diamond,bmhatfield/Diamond,codepython/Diamond,jumping/Diamond,metamx/Diamond,timchenxiaoyu/Diamond,codepython/Diamond,Netuitive/netuitive-diamond,mzupan/Diamond,works-mobile/Diamond,bmhatfield/Diamond,Clever/Diamond,Ssawa/Diamond,EzyInsights/Diamond,tusharmakkar08/Diamond,joel-airspring/Diamond,Ensighten/Diamond,hamelg/Diamond,actmd/Diamond,jriguera/Diamond,Precis/Diamond,bmhatfield/Diamond,jaingaurav/Diamond,jriguera/Diamond,codepython/Diamond,CYBERBUGJR/Diamond,krbaker/Diamond,dcsquared13/Diamond,anandbhoraskar/Diamond,Nihn/Diamond-1,thardie/Diamond,Ssawa/Diamond,Nihn/Diamond-1,Ssawa/Diamond,sebbrandt87/Diamond,gg7/diamond,mfriedenhagen/Diamond,krbaker/Diamond,eMerzh/Diamond-1,sebbrandt87/Diamond,mzupan/Diamond,Ormod/Diamond,skbkontur/Diamond,Ormod/Diamond,mzupan/Diamond,Basis/Diamond,Basis/Diamond,ramjothikumar/Diamond,signalfx/Diamond,Ormod/Diamond,dcsquared13/Diamond,TinLe/Diamond,hvnsweeting/Diamond,jaingaurav/Diamond,EzyInsights/Diamond,russss/Diamond,socialwareinc/Diamond,jumping/Diamond,hvnsweeting/Diamond,tellapart/Diamond,rtoma/Diamond,szibis/Diamond,Netuitive/netuitive-diamond,acquia/Diamond,Netuitive/Diamond,datafiniti/Diamond,skbkontur/Diamond,cannium/Diamond,ceph/Diamond,Netuitive/Diamond,tusharmakkar08/Diamond,Ensighten/Diamond,MichaelDoyle/Diamond,szibis/Diamond,szibis/Diamond,Clever/Diamond,tuenti/Diamond,TinLe/Diamond,signalfx/Diamond,Nihn/Diamond-1,krbaker/Diamond,MediaMath/Diamond,TinLe/Diamond,bmhatfield/Diamond,anandbhoraskar/Diamond,gg7/diamond,janisz/Diamond-1,jriguera/Diamond,works-mobile/Diamond,cannium/Diamond,saucelabs/Diamond,python-diamond/Diamond,hamelg/Diamond,python-diamond/Diamond,timchenxiaoyu/Diamond,Precis/Diamond,Ormod/Diamond,socialwareinc/Diamond,janisz/Diamond-1,krbaker/Diamond,tellapart/Diamond,szibis/Diamond,ramjothikumar/Diamond,hamelg/Diamond,skbkontur/Diamond,cannium/Diamond,Netuitive/netuitive-diamond,acquia/Diamond,disqus/Diamond
|
# coding=utf-8
"""
Collects the number of users logged in and shells per user
#### Dependencies
* [pyutmp](http://software.clapper.org/pyutmp/)
"""
import diamond.collector
from pyutmp import UtmpFile
class UsersCollector(diamond.collector.Collector):
def get_default_config_help(self):
"""
Returns the default collector help text
"""
config_help = super(UsersCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(UsersCollector, self).get_default_config()
config.update({
'path': 'users',
'method': 'Threaded',
})
return config
def collect(self):
metrics = {}
metrics['total'] = 0
for utmp in UtmpFile():
if utmp.ut_user_process:
metrics[utmp.ut_user] = metrics.get(utmp.ut_user, 0)+1
metrics['total'] = metrics['total']+1
for metric_name in metrics.keys():
self.publish(metric_name, metrics[metric_name])
return True
Add in a way to specify the utmp file path for unit testing
|
# coding=utf-8
"""
Collects the number of users logged in and shells per user
#### Dependencies
* [pyutmp](http://software.clapper.org/pyutmp/)
"""
import diamond.collector
from pyutmp import UtmpFile
class UsersCollector(diamond.collector.Collector):
def get_default_config_help(self):
"""
Returns the default collector help text
"""
config_help = super(UsersCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(UsersCollector, self).get_default_config()
config.update({
'path': 'users',
'method': 'Threaded',
'utmp': None,
})
return config
def collect(self):
metrics = {}
metrics['total'] = 0
for utmp in UtmpFile(path=self.config['utmp']):
if utmp.ut_user_process:
metrics[utmp.ut_user] = metrics.get(utmp.ut_user, 0)+1
metrics['total'] = metrics['total']+1
for metric_name in metrics.keys():
self.publish(metric_name, metrics[metric_name])
return True
|
<commit_before># coding=utf-8
"""
Collects the number of users logged in and shells per user
#### Dependencies
* [pyutmp](http://software.clapper.org/pyutmp/)
"""
import diamond.collector
from pyutmp import UtmpFile
class UsersCollector(diamond.collector.Collector):
def get_default_config_help(self):
"""
Returns the default collector help text
"""
config_help = super(UsersCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(UsersCollector, self).get_default_config()
config.update({
'path': 'users',
'method': 'Threaded',
})
return config
def collect(self):
metrics = {}
metrics['total'] = 0
for utmp in UtmpFile():
if utmp.ut_user_process:
metrics[utmp.ut_user] = metrics.get(utmp.ut_user, 0)+1
metrics['total'] = metrics['total']+1
for metric_name in metrics.keys():
self.publish(metric_name, metrics[metric_name])
return True
<commit_msg>Add in a way to specify the utmp file path for unit testing<commit_after>
|
# coding=utf-8
"""
Collects the number of users logged in and shells per user
#### Dependencies
* [pyutmp](http://software.clapper.org/pyutmp/)
"""
import diamond.collector
from pyutmp import UtmpFile
class UsersCollector(diamond.collector.Collector):
def get_default_config_help(self):
"""
Returns the default collector help text
"""
config_help = super(UsersCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(UsersCollector, self).get_default_config()
config.update({
'path': 'users',
'method': 'Threaded',
'utmp': None,
})
return config
def collect(self):
metrics = {}
metrics['total'] = 0
for utmp in UtmpFile(path=self.config['utmp']):
if utmp.ut_user_process:
metrics[utmp.ut_user] = metrics.get(utmp.ut_user, 0)+1
metrics['total'] = metrics['total']+1
for metric_name in metrics.keys():
self.publish(metric_name, metrics[metric_name])
return True
|
# coding=utf-8
"""
Collects the number of users logged in and shells per user
#### Dependencies
* [pyutmp](http://software.clapper.org/pyutmp/)
"""
import diamond.collector
from pyutmp import UtmpFile
class UsersCollector(diamond.collector.Collector):
def get_default_config_help(self):
"""
Returns the default collector help text
"""
config_help = super(UsersCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(UsersCollector, self).get_default_config()
config.update({
'path': 'users',
'method': 'Threaded',
})
return config
def collect(self):
metrics = {}
metrics['total'] = 0
for utmp in UtmpFile():
if utmp.ut_user_process:
metrics[utmp.ut_user] = metrics.get(utmp.ut_user, 0)+1
metrics['total'] = metrics['total']+1
for metric_name in metrics.keys():
self.publish(metric_name, metrics[metric_name])
return True
Add in a way to specify the utmp file path for unit testing# coding=utf-8
"""
Collects the number of users logged in and shells per user
#### Dependencies
* [pyutmp](http://software.clapper.org/pyutmp/)
"""
import diamond.collector
from pyutmp import UtmpFile
class UsersCollector(diamond.collector.Collector):
def get_default_config_help(self):
"""
Returns the default collector help text
"""
config_help = super(UsersCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(UsersCollector, self).get_default_config()
config.update({
'path': 'users',
'method': 'Threaded',
'utmp': None,
})
return config
def collect(self):
metrics = {}
metrics['total'] = 0
for utmp in UtmpFile(path=self.config['utmp']):
if utmp.ut_user_process:
metrics[utmp.ut_user] = metrics.get(utmp.ut_user, 0)+1
metrics['total'] = metrics['total']+1
for metric_name in metrics.keys():
self.publish(metric_name, metrics[metric_name])
return True
|
<commit_before># coding=utf-8
"""
Collects the number of users logged in and shells per user
#### Dependencies
* [pyutmp](http://software.clapper.org/pyutmp/)
"""
import diamond.collector
from pyutmp import UtmpFile
class UsersCollector(diamond.collector.Collector):
def get_default_config_help(self):
"""
Returns the default collector help text
"""
config_help = super(UsersCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(UsersCollector, self).get_default_config()
config.update({
'path': 'users',
'method': 'Threaded',
})
return config
def collect(self):
metrics = {}
metrics['total'] = 0
for utmp in UtmpFile():
if utmp.ut_user_process:
metrics[utmp.ut_user] = metrics.get(utmp.ut_user, 0)+1
metrics['total'] = metrics['total']+1
for metric_name in metrics.keys():
self.publish(metric_name, metrics[metric_name])
return True
<commit_msg>Add in a way to specify the utmp file path for unit testing<commit_after># coding=utf-8
"""
Collects the number of users logged in and shells per user
#### Dependencies
* [pyutmp](http://software.clapper.org/pyutmp/)
"""
import diamond.collector
from pyutmp import UtmpFile
class UsersCollector(diamond.collector.Collector):
def get_default_config_help(self):
"""
Returns the default collector help text
"""
config_help = super(UsersCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(UsersCollector, self).get_default_config()
config.update({
'path': 'users',
'method': 'Threaded',
'utmp': None,
})
return config
def collect(self):
metrics = {}
metrics['total'] = 0
for utmp in UtmpFile(path=self.config['utmp']):
if utmp.ut_user_process:
metrics[utmp.ut_user] = metrics.get(utmp.ut_user, 0)+1
metrics['total'] = metrics['total']+1
for metric_name in metrics.keys():
self.publish(metric_name, metrics[metric_name])
return True
|
f8f59ff7a33a0a702e04ef6fc700c14467de9adf
|
hardware/sense_hat/turn_off_lights.py
|
hardware/sense_hat/turn_off_lights.py
|
# based on https://www.raspberrypi.org/learning/sense-hat-marble-maze/worksheet/
from sense_hat import SenseHat
from time import sleep
sense = SenseHat()
sense.clear()
|
Add script to turn off all lights on Sense Hat
|
Add script to turn off all lights on Sense Hat
|
Python
|
mit
|
claremacrae/raspi_code,claremacrae/raspi_code,claremacrae/raspi_code
|
Add script to turn off all lights on Sense Hat
|
# based on https://www.raspberrypi.org/learning/sense-hat-marble-maze/worksheet/
from sense_hat import SenseHat
from time import sleep
sense = SenseHat()
sense.clear()
|
<commit_before><commit_msg>Add script to turn off all lights on Sense Hat<commit_after>
|
# based on https://www.raspberrypi.org/learning/sense-hat-marble-maze/worksheet/
from sense_hat import SenseHat
from time import sleep
sense = SenseHat()
sense.clear()
|
Add script to turn off all lights on Sense Hat# based on https://www.raspberrypi.org/learning/sense-hat-marble-maze/worksheet/
from sense_hat import SenseHat
from time import sleep
sense = SenseHat()
sense.clear()
|
<commit_before><commit_msg>Add script to turn off all lights on Sense Hat<commit_after># based on https://www.raspberrypi.org/learning/sense-hat-marble-maze/worksheet/
from sense_hat import SenseHat
from time import sleep
sense = SenseHat()
sense.clear()
|
|
b9ec3b4096fa0541a40d8f1f46b79b3726489e19
|
src/gensim/test/test_utils.py
|
src/gensim/test/test_utils.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking various utils functions
"""
import logging
import unittest
from gensim import utils
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.WARNING)
class TestIsCorpus(unittest.TestCase):
def test_None(self):
# test None
print utils.isCorpus(None)
result = utils.isCorpus(None)
expected = (False, None)
self.assertEqual(expected, result)
def test_simple_lists_of_tuples(self):
# test list words
# one document, one word
potentialCorpus = [[(0, 4.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
# one document, several words
potentialCorpus = [[(0, 4.), (1, 2.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
potentialCorpus = [[(0, 4.), (1, 2.), (2, 5.), (3, 8.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
# several documents, one word
potentialCorpus = [[(0, 4.)], [(1, 2.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
potentialCorpus = [[(0, 4.)], [(1, 2.)], [(2, 5.)], [(3, 8.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
def test_int_tuples(self):
potentialCorpus = [[(0, 4)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
def test_invalid_formats(self):
# test invalid formats
# these are no corpus, because they do not consists of 2-tuples with
# the form(int, float).
potentials = list()
potentials.append(["human"])
potentials.append("human")
potentials.append(["human", "star"])
potentials.append([1, 2, 3, 4, 5, 5])
potentials.append([[(0, 'string')]])
for noCorpus in potentials:
result = utils.isCorpus(noCorpus)
expected = (False, noCorpus)
self.assertEqual(expected, result)
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
|
Add tests for isCorpus() in utils.py.
|
Add tests for isCorpus() in utils.py.
|
Python
|
lgpl-2.1
|
jaksmid/gensim,zachmayer/gensim,markroxor/gensim,zachmayer/gensim,loisaidasam/gensim,ziky90/gensim,akutuzov/gensim,tzoiker/gensim,ziky90/gensim,olavurmortensen/gensim,ziky90/gensim,tzoiker/gensim,mattilyra/gensim,manasRK/gensim,jaksmid/gensim,mattilyra/gensim,isohyt/gensim,gojomo/gensim,quole/gensim,pombredanne/gensim,piskvorky/gensim,davechallis/gensim,isohyt/gensim,macks22/gensim,bhargavvader/gensim,gojomo/gensim,robotcator/gensim,gojomo/gensim,RaRe-Technologies/gensim,piskvorky/gensim,laic/gensim,quole/gensim,bhargavvader/gensim,markroxor/gensim,macks22/gensim,gojomo/gensim,RaRe-Technologies/gensim,akutuzov/gensim,Kreiswolke/gensim,summanlp/gensim,Kreiswolke/gensim,ELind77/gensim,markroxor/gensim,summanlp/gensim,manasRK/gensim,davechallis/gensim,RaRe-Technologies/gensim,laic/gensim,Kreiswolke/gensim,tzoiker/gensim,ELind77/gensim,pombredanne/gensim,jaksmid/gensim,bhargavvader/gensim,olavurmortensen/gensim,RaRe-Technologies/gensim,dsquareindia/gensim,olavurmortensen/gensim,zachmayer/gensim,piskvorky/gensim,isohyt/gensim,quole/gensim,robotcator/gensim,robotcator/gensim,laic/gensim,pombredanne/gensim,manasRK/gensim,akutuzov/gensim,loisaidasam/gensim,macks22/gensim,dsquareindia/gensim,davechallis/gensim,mattilyra/gensim,dsquareindia/gensim,ELind77/gensim,summanlp/gensim,loisaidasam/gensim
|
Add tests for isCorpus() in utils.py.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking various utils functions
"""
import logging
import unittest
from gensim import utils
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.WARNING)
class TestIsCorpus(unittest.TestCase):
def test_None(self):
# test None
print utils.isCorpus(None)
result = utils.isCorpus(None)
expected = (False, None)
self.assertEqual(expected, result)
def test_simple_lists_of_tuples(self):
# test list words
# one document, one word
potentialCorpus = [[(0, 4.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
# one document, several words
potentialCorpus = [[(0, 4.), (1, 2.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
potentialCorpus = [[(0, 4.), (1, 2.), (2, 5.), (3, 8.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
# several documents, one word
potentialCorpus = [[(0, 4.)], [(1, 2.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
potentialCorpus = [[(0, 4.)], [(1, 2.)], [(2, 5.)], [(3, 8.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
def test_int_tuples(self):
potentialCorpus = [[(0, 4)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
def test_invalid_formats(self):
# test invalid formats
# these are no corpus, because they do not consists of 2-tuples with
# the form(int, float).
potentials = list()
potentials.append(["human"])
potentials.append("human")
potentials.append(["human", "star"])
potentials.append([1, 2, 3, 4, 5, 5])
potentials.append([[(0, 'string')]])
for noCorpus in potentials:
result = utils.isCorpus(noCorpus)
expected = (False, noCorpus)
self.assertEqual(expected, result)
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
|
<commit_before><commit_msg>Add tests for isCorpus() in utils.py.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking various utils functions
"""
import logging
import unittest
from gensim import utils
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.WARNING)
class TestIsCorpus(unittest.TestCase):
def test_None(self):
# test None
print utils.isCorpus(None)
result = utils.isCorpus(None)
expected = (False, None)
self.assertEqual(expected, result)
def test_simple_lists_of_tuples(self):
# test list words
# one document, one word
potentialCorpus = [[(0, 4.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
# one document, several words
potentialCorpus = [[(0, 4.), (1, 2.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
potentialCorpus = [[(0, 4.), (1, 2.), (2, 5.), (3, 8.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
# several documents, one word
potentialCorpus = [[(0, 4.)], [(1, 2.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
potentialCorpus = [[(0, 4.)], [(1, 2.)], [(2, 5.)], [(3, 8.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
def test_int_tuples(self):
potentialCorpus = [[(0, 4)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
def test_invalid_formats(self):
# test invalid formats
# these are no corpus, because they do not consists of 2-tuples with
# the form(int, float).
potentials = list()
potentials.append(["human"])
potentials.append("human")
potentials.append(["human", "star"])
potentials.append([1, 2, 3, 4, 5, 5])
potentials.append([[(0, 'string')]])
for noCorpus in potentials:
result = utils.isCorpus(noCorpus)
expected = (False, noCorpus)
self.assertEqual(expected, result)
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
|
Add tests for isCorpus() in utils.py.#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking various utils functions
"""
import logging
import unittest
from gensim import utils
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.WARNING)
class TestIsCorpus(unittest.TestCase):
def test_None(self):
# test None
print utils.isCorpus(None)
result = utils.isCorpus(None)
expected = (False, None)
self.assertEqual(expected, result)
def test_simple_lists_of_tuples(self):
# test list words
# one document, one word
potentialCorpus = [[(0, 4.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
# one document, several words
potentialCorpus = [[(0, 4.), (1, 2.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
potentialCorpus = [[(0, 4.), (1, 2.), (2, 5.), (3, 8.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
# several documents, one word
potentialCorpus = [[(0, 4.)], [(1, 2.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
potentialCorpus = [[(0, 4.)], [(1, 2.)], [(2, 5.)], [(3, 8.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
def test_int_tuples(self):
potentialCorpus = [[(0, 4)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
def test_invalid_formats(self):
# test invalid formats
# these are no corpus, because they do not consists of 2-tuples with
# the form(int, float).
potentials = list()
potentials.append(["human"])
potentials.append("human")
potentials.append(["human", "star"])
potentials.append([1, 2, 3, 4, 5, 5])
potentials.append([[(0, 'string')]])
for noCorpus in potentials:
result = utils.isCorpus(noCorpus)
expected = (False, noCorpus)
self.assertEqual(expected, result)
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
|
<commit_before><commit_msg>Add tests for isCorpus() in utils.py.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking various utils functions
"""
import logging
import unittest
from gensim import utils
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.WARNING)
class TestIsCorpus(unittest.TestCase):
def test_None(self):
# test None
print utils.isCorpus(None)
result = utils.isCorpus(None)
expected = (False, None)
self.assertEqual(expected, result)
def test_simple_lists_of_tuples(self):
# test list words
# one document, one word
potentialCorpus = [[(0, 4.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
# one document, several words
potentialCorpus = [[(0, 4.), (1, 2.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
potentialCorpus = [[(0, 4.), (1, 2.), (2, 5.), (3, 8.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
# several documents, one word
potentialCorpus = [[(0, 4.)], [(1, 2.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
potentialCorpus = [[(0, 4.)], [(1, 2.)], [(2, 5.)], [(3, 8.)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
def test_int_tuples(self):
potentialCorpus = [[(0, 4)]]
result = utils.isCorpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
def test_invalid_formats(self):
# test invalid formats
# these are no corpus, because they do not consists of 2-tuples with
# the form(int, float).
potentials = list()
potentials.append(["human"])
potentials.append("human")
potentials.append(["human", "star"])
potentials.append([1, 2, 3, 4, 5, 5])
potentials.append([[(0, 'string')]])
for noCorpus in potentials:
result = utils.isCorpus(noCorpus)
expected = (False, noCorpus)
self.assertEqual(expected, result)
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
|
|
2003269e22361e201f93bb1dba1ca4b4572d2968
|
interestingness/diffTest.py
|
interestingness/diffTest.py
|
#!/usr/bin/env python
from optparse import OptionParser
import timedRun
import filecmp
def parseOptions(arguments):
parser = OptionParser()
parser.disable_interspersed_args()
parser.add_option('-t', '--timeout', type='int', action='store', dest='condTimeout',
default=120,
help='Optionally set the timeout. Defaults to "%default" seconds.')
parser.add_option('-a', '--a-arg', type='string', action='append', dest='aArgs',
default=[],
help='Set of extra arguments given to first run.')
parser.add_option('-b', '--b-arg', type='string', action='append', dest='bArgs',
default=[],
help='Set of extra arguments given to second run.')
options, args = parser.parse_args(arguments)
return options.condTimeout, options.aArgs, options.bArgs, args
def interesting(cliArgs, tempPrefix):
(timeout, aArgs, bArgs, args) = parseOptions(cliArgs)
aRuninfo = timedRun.timed_run(args[:1] + aArgs + args[1:], timeout, tempPrefix + "-a")
bRuninfo = timedRun.timed_run(args[:1] + bArgs + args[1:], timeout, tempPrefix + "-b")
timeString = " (1st Run: %.3f seconds) (2nd Run: %.3f seconds)" % (aRuninfo.elapsedtime, bRuninfo.elapsedtime)
if aRuninfo.sta != timedRun.TIMED_OUT and bRuninfo.sta != timedRun.TIMED_OUT:
if aRuninfo.rc != bRuninfo.rc:
print ("[Interesting] Different return code. (%d, %d) " % (aRuninfo.rc, bRuninfo.rc)) + timeString
return True
if not filecmp.cmp(aRuninfo.out, bRuninfo.out):
print "[Interesting] Different output. " + timeString
return True
if not filecmp.cmp(aRuninfo.err, bRuninfo.err):
print "[Interesting] Different error output. " + timeString
return True
else:
print "[Uninteresting] At least one test timed out." + timeString
return False
print "[Uninteresting] Identical behaviour." + timeString
return False
|
Add a new interesting test to check different behaviour based on command line switches.
|
Add a new interesting test to check different behaviour based on command line switches.
|
Python
|
mpl-2.0
|
nth10sd/lithium,nth10sd/lithium,MozillaSecurity/lithium,MozillaSecurity/lithium
|
Add a new interesting test to check different behaviour based on command line switches.
|
#!/usr/bin/env python
from optparse import OptionParser
import timedRun
import filecmp
def parseOptions(arguments):
parser = OptionParser()
parser.disable_interspersed_args()
parser.add_option('-t', '--timeout', type='int', action='store', dest='condTimeout',
default=120,
help='Optionally set the timeout. Defaults to "%default" seconds.')
parser.add_option('-a', '--a-arg', type='string', action='append', dest='aArgs',
default=[],
help='Set of extra arguments given to first run.')
parser.add_option('-b', '--b-arg', type='string', action='append', dest='bArgs',
default=[],
help='Set of extra arguments given to second run.')
options, args = parser.parse_args(arguments)
return options.condTimeout, options.aArgs, options.bArgs, args
def interesting(cliArgs, tempPrefix):
(timeout, aArgs, bArgs, args) = parseOptions(cliArgs)
aRuninfo = timedRun.timed_run(args[:1] + aArgs + args[1:], timeout, tempPrefix + "-a")
bRuninfo = timedRun.timed_run(args[:1] + bArgs + args[1:], timeout, tempPrefix + "-b")
timeString = " (1st Run: %.3f seconds) (2nd Run: %.3f seconds)" % (aRuninfo.elapsedtime, bRuninfo.elapsedtime)
if aRuninfo.sta != timedRun.TIMED_OUT and bRuninfo.sta != timedRun.TIMED_OUT:
if aRuninfo.rc != bRuninfo.rc:
print ("[Interesting] Different return code. (%d, %d) " % (aRuninfo.rc, bRuninfo.rc)) + timeString
return True
if not filecmp.cmp(aRuninfo.out, bRuninfo.out):
print "[Interesting] Different output. " + timeString
return True
if not filecmp.cmp(aRuninfo.err, bRuninfo.err):
print "[Interesting] Different error output. " + timeString
return True
else:
print "[Uninteresting] At least one test timed out." + timeString
return False
print "[Uninteresting] Identical behaviour." + timeString
return False
|
<commit_before><commit_msg>Add a new interesting test to check different behaviour based on command line switches.<commit_after>
|
#!/usr/bin/env python
from optparse import OptionParser
import timedRun
import filecmp
def parseOptions(arguments):
parser = OptionParser()
parser.disable_interspersed_args()
parser.add_option('-t', '--timeout', type='int', action='store', dest='condTimeout',
default=120,
help='Optionally set the timeout. Defaults to "%default" seconds.')
parser.add_option('-a', '--a-arg', type='string', action='append', dest='aArgs',
default=[],
help='Set of extra arguments given to first run.')
parser.add_option('-b', '--b-arg', type='string', action='append', dest='bArgs',
default=[],
help='Set of extra arguments given to second run.')
options, args = parser.parse_args(arguments)
return options.condTimeout, options.aArgs, options.bArgs, args
def interesting(cliArgs, tempPrefix):
(timeout, aArgs, bArgs, args) = parseOptions(cliArgs)
aRuninfo = timedRun.timed_run(args[:1] + aArgs + args[1:], timeout, tempPrefix + "-a")
bRuninfo = timedRun.timed_run(args[:1] + bArgs + args[1:], timeout, tempPrefix + "-b")
timeString = " (1st Run: %.3f seconds) (2nd Run: %.3f seconds)" % (aRuninfo.elapsedtime, bRuninfo.elapsedtime)
if aRuninfo.sta != timedRun.TIMED_OUT and bRuninfo.sta != timedRun.TIMED_OUT:
if aRuninfo.rc != bRuninfo.rc:
print ("[Interesting] Different return code. (%d, %d) " % (aRuninfo.rc, bRuninfo.rc)) + timeString
return True
if not filecmp.cmp(aRuninfo.out, bRuninfo.out):
print "[Interesting] Different output. " + timeString
return True
if not filecmp.cmp(aRuninfo.err, bRuninfo.err):
print "[Interesting] Different error output. " + timeString
return True
else:
print "[Uninteresting] At least one test timed out." + timeString
return False
print "[Uninteresting] Identical behaviour." + timeString
return False
|
Add a new interesting test to check different behaviour based on command line switches.#!/usr/bin/env python
from optparse import OptionParser
import timedRun
import filecmp
def parseOptions(arguments):
parser = OptionParser()
parser.disable_interspersed_args()
parser.add_option('-t', '--timeout', type='int', action='store', dest='condTimeout',
default=120,
help='Optionally set the timeout. Defaults to "%default" seconds.')
parser.add_option('-a', '--a-arg', type='string', action='append', dest='aArgs',
default=[],
help='Set of extra arguments given to first run.')
parser.add_option('-b', '--b-arg', type='string', action='append', dest='bArgs',
default=[],
help='Set of extra arguments given to second run.')
options, args = parser.parse_args(arguments)
return options.condTimeout, options.aArgs, options.bArgs, args
def interesting(cliArgs, tempPrefix):
(timeout, aArgs, bArgs, args) = parseOptions(cliArgs)
aRuninfo = timedRun.timed_run(args[:1] + aArgs + args[1:], timeout, tempPrefix + "-a")
bRuninfo = timedRun.timed_run(args[:1] + bArgs + args[1:], timeout, tempPrefix + "-b")
timeString = " (1st Run: %.3f seconds) (2nd Run: %.3f seconds)" % (aRuninfo.elapsedtime, bRuninfo.elapsedtime)
if aRuninfo.sta != timedRun.TIMED_OUT and bRuninfo.sta != timedRun.TIMED_OUT:
if aRuninfo.rc != bRuninfo.rc:
print ("[Interesting] Different return code. (%d, %d) " % (aRuninfo.rc, bRuninfo.rc)) + timeString
return True
if not filecmp.cmp(aRuninfo.out, bRuninfo.out):
print "[Interesting] Different output. " + timeString
return True
if not filecmp.cmp(aRuninfo.err, bRuninfo.err):
print "[Interesting] Different error output. " + timeString
return True
else:
print "[Uninteresting] At least one test timed out." + timeString
return False
print "[Uninteresting] Identical behaviour." + timeString
return False
|
<commit_before><commit_msg>Add a new interesting test to check different behaviour based on command line switches.<commit_after>#!/usr/bin/env python
from optparse import OptionParser
import timedRun
import filecmp
def parseOptions(arguments):
parser = OptionParser()
parser.disable_interspersed_args()
parser.add_option('-t', '--timeout', type='int', action='store', dest='condTimeout',
default=120,
help='Optionally set the timeout. Defaults to "%default" seconds.')
parser.add_option('-a', '--a-arg', type='string', action='append', dest='aArgs',
default=[],
help='Set of extra arguments given to first run.')
parser.add_option('-b', '--b-arg', type='string', action='append', dest='bArgs',
default=[],
help='Set of extra arguments given to second run.')
options, args = parser.parse_args(arguments)
return options.condTimeout, options.aArgs, options.bArgs, args
def interesting(cliArgs, tempPrefix):
(timeout, aArgs, bArgs, args) = parseOptions(cliArgs)
aRuninfo = timedRun.timed_run(args[:1] + aArgs + args[1:], timeout, tempPrefix + "-a")
bRuninfo = timedRun.timed_run(args[:1] + bArgs + args[1:], timeout, tempPrefix + "-b")
timeString = " (1st Run: %.3f seconds) (2nd Run: %.3f seconds)" % (aRuninfo.elapsedtime, bRuninfo.elapsedtime)
if aRuninfo.sta != timedRun.TIMED_OUT and bRuninfo.sta != timedRun.TIMED_OUT:
if aRuninfo.rc != bRuninfo.rc:
print ("[Interesting] Different return code. (%d, %d) " % (aRuninfo.rc, bRuninfo.rc)) + timeString
return True
if not filecmp.cmp(aRuninfo.out, bRuninfo.out):
print "[Interesting] Different output. " + timeString
return True
if not filecmp.cmp(aRuninfo.err, bRuninfo.err):
print "[Interesting] Different error output. " + timeString
return True
else:
print "[Uninteresting] At least one test timed out." + timeString
return False
print "[Uninteresting] Identical behaviour." + timeString
return False
|
|
2327b809bb9ffbf53b259e374512e7868d5f17df
|
scripts/ensure_log_dates.py
|
scripts/ensure_log_dates.py
|
# -*- coding: utf-8 -*-
import datetime
import logging
import sys
from modularodm import Q
from website.models import NodeLog
from bson import ObjectId
from nose.tools import *
from tests.base import OsfTestCase
from tests.factories import NodeLogFactory
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def find_invalid_logs():
for log in NodeLog.find(Q('action', 'eq', NodeLog.WIKI_DELETED)):
# Derive UTC datetime object from ObjectId
id_date = ObjectId(log._id).generation_time
id_date = id_date.replace(tzinfo=None) - id_date.utcoffset()
if id_date > log.date:
yield log
def fix_invalid_log(log):
new_dt = ObjectId(log._id).generation_time
new_dt = new_dt.replace(tzinfo=None) - new_dt.utcoffset()
NodeLog._fields['date'].__set__(
log,
new_dt,
safe=False
)
log.save()
if __name__ == '__main__':
if 'dry' in sys.argv:
for log in find_invalid_logs():
print(log._id)
else:
for log in find_invalid_logs():
fix_invalid_log(log)
class TestEnsureLogDates(OsfTestCase):
def setUp(self):
super(TestEnsureLogDates, self).setUp()
self.good_log = NodeLogFactory(action=NodeLog.WIKI_DELETED)
self.bad_log = NodeLogFactory(action=NodeLog.WIKI_DELETED)
self.mongo = self.good_log._storage[0].db['nodelog']
self.mongo.update(
{'_id': self.bad_log._id},
{'$set': {'date': datetime.datetime.utcnow() - datetime.timedelta(weeks=52)}},
)
self.bad_log.reload()
def tearDown(self):
super(TestEnsureLogDates, self).tearDown()
def test_find_invalid_logs(self):
assert_equal(
1,
len(list(find_invalid_logs()))
)
def test_fix_invalid_log(self):
fix_invalid_log(self.bad_log)
assert_true(
self.good_log.date - self.bad_log.date < datetime.timedelta(seconds=1)
)
|
Add migration script for defective "delete wiki page" logs
|
Add migration script for defective "delete wiki page" logs
|
Python
|
apache-2.0
|
CenterForOpenScience/osf.io,sbt9uc/osf.io,jinluyuan/osf.io,zamattiac/osf.io,binoculars/osf.io,Nesiehr/osf.io,cosenal/osf.io,jeffreyliu3230/osf.io,GaryKriebel/osf.io,jmcarp/osf.io,samanehsan/osf.io,dplorimer/osf,caseyrygt/osf.io,rdhyee/osf.io,monikagrabowska/osf.io,jinluyuan/osf.io,TomHeatwole/osf.io,Nesiehr/osf.io,zkraime/osf.io,asanfilippo7/osf.io,billyhunt/osf.io,zachjanicki/osf.io,TomHeatwole/osf.io,cwisecarver/osf.io,caseyrollins/osf.io,kwierman/osf.io,aaxelb/osf.io,wearpants/osf.io,mattclark/osf.io,SSJohns/osf.io,doublebits/osf.io,revanthkolli/osf.io,ticklemepierce/osf.io,revanthkolli/osf.io,felliott/osf.io,felliott/osf.io,erinspace/osf.io,jmcarp/osf.io,jolene-esposito/osf.io,leb2dg/osf.io,zamattiac/osf.io,caneruguz/osf.io,icereval/osf.io,mfraezz/osf.io,petermalcolm/osf.io,kch8qx/osf.io,MerlinZhang/osf.io,erinspace/osf.io,GageGaskins/osf.io,danielneis/osf.io,ckc6cz/osf.io,RomanZWang/osf.io,rdhyee/osf.io,caseyrollins/osf.io,binoculars/osf.io,RomanZWang/osf.io,haoyuchen1992/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,fabianvf/osf.io,barbour-em/osf.io,brandonPurvis/osf.io,alexschiller/osf.io,alexschiller/osf.io,kch8qx/osf.io,Johnetordoff/osf.io,baylee-d/osf.io,HarryRybacki/osf.io,danielneis/osf.io,HalcyonChimera/osf.io,monikagrabowska/osf.io,jeffreyliu3230/osf.io,wearpants/osf.io,fabianvf/osf.io,MerlinZhang/osf.io,bdyetton/prettychart,Ghalko/osf.io,himanshuo/osf.io,wearpants/osf.io,cldershem/osf.io,njantrania/osf.io,cslzchen/osf.io,njantrania/osf.io,njantrania/osf.io,GaryKriebel/osf.io,mluo613/osf.io,KAsante95/osf.io,kwierman/osf.io,mluke93/osf.io,jeffreyliu3230/osf.io,crcresearch/osf.io,laurenrevere/osf.io,hmoco/osf.io,alexschiller/osf.io,sloria/osf.io,adlius/osf.io,SSJohns/osf.io,pattisdr/osf.io,amyshi188/osf.io,caneruguz/osf.io,cldershem/osf.io,TomBaxter/osf.io,binoculars/osf.io,DanielSBrown/osf.io,lyndsysimon/osf.io,pattisdr/osf.io,Ghalko/osf.io,GageGaskins/osf.io,emetsger/osf.io,cwisecarver/osf.io,abought/osf.io,dplorimer/osf,caseyrollins/osf.io,billyhunt/osf.io,himanshuo/osf.io,kwierman/osf.io,mattclark/osf.io,crcresearch/osf.io,KAsante95/osf.io,zkraime/osf.io,jmcarp/osf.io,rdhyee/osf.io,barbour-em/osf.io,haoyuchen1992/osf.io,GageGaskins/osf.io,kch8qx/osf.io,chennan47/osf.io,reinaH/osf.io,Johnetordoff/osf.io,acshi/osf.io,crcresearch/osf.io,Ghalko/osf.io,abought/osf.io,samchrisinger/osf.io,dplorimer/osf,cosenal/osf.io,chrisseto/osf.io,Nesiehr/osf.io,samanehsan/osf.io,amyshi188/osf.io,CenterForOpenScience/osf.io,caneruguz/osf.io,monikagrabowska/osf.io,samanehsan/osf.io,kushG/osf.io,cwisecarver/osf.io,Johnetordoff/osf.io,jolene-esposito/osf.io,abought/osf.io,cldershem/osf.io,jnayak1/osf.io,mfraezz/osf.io,DanielSBrown/osf.io,Nesiehr/osf.io,TomBaxter/osf.io,zachjanicki/osf.io,lamdnhan/osf.io,HalcyonChimera/osf.io,mfraezz/osf.io,kwierman/osf.io,laurenrevere/osf.io,AndrewSallans/osf.io,jinluyuan/osf.io,arpitar/osf.io,asanfilippo7/osf.io,revanthkolli/osf.io,brianjgeiger/osf.io,reinaH/osf.io,GaryKriebel/osf.io,kushG/osf.io,laurenrevere/osf.io,danielneis/osf.io,brandonPurvis/osf.io,cslzchen/osf.io,samchrisinger/osf.io,cldershem/osf.io,chennan47/osf.io,caseyrygt/osf.io,caneruguz/osf.io,saradbowman/osf.io,bdyetton/prettychart,HarryRybacki/osf.io,kch8qx/osf.io,asanfilippo7/osf.io,kch8qx/osf.io,bdyetton/prettychart,arpitar/osf.io,lyndsysimon/osf.io,himanshuo/osf.io,RomanZWang/osf.io,SSJohns/osf.io,ZobairAlijan/osf.io,mluo613/osf.io,ticklemepierce/osf.io,RomanZWang/osf.io,RomanZWang/osf.io,zkraime/osf.io,zamattiac/osf.io,sloria/osf.io,Ghalko/osf.io,jeffreyliu3230/osf.io,zachjanicki/osf.io,leb2dg/osf.io,cslzchen/osf.io,billyhunt/osf.io,sloria/osf.io,petermalcolm/osf.io,chrisseto/osf.io,danielneis/osf.io,mattclark/osf.io,himanshuo/osf.io,ticklemepierce/osf.io,pattisdr/osf.io,adlius/osf.io,petermalcolm/osf.io,chrisseto/osf.io,felliott/osf.io,jolene-esposito/osf.io,alexschiller/osf.io,samanehsan/osf.io,petermalcolm/osf.io,DanielSBrown/osf.io,lamdnhan/osf.io,icereval/osf.io,arpitar/osf.io,jnayak1/osf.io,sbt9uc/osf.io,acshi/osf.io,emetsger/osf.io,ckc6cz/osf.io,rdhyee/osf.io,icereval/osf.io,doublebits/osf.io,hmoco/osf.io,hmoco/osf.io,brandonPurvis/osf.io,erinspace/osf.io,wearpants/osf.io,cslzchen/osf.io,ckc6cz/osf.io,ZobairAlijan/osf.io,KAsante95/osf.io,acshi/osf.io,haoyuchen1992/osf.io,alexschiller/osf.io,brianjgeiger/osf.io,mluke93/osf.io,ZobairAlijan/osf.io,doublebits/osf.io,adlius/osf.io,dplorimer/osf,CenterForOpenScience/osf.io,TomHeatwole/osf.io,KAsante95/osf.io,cwisecarver/osf.io,brandonPurvis/osf.io,emetsger/osf.io,fabianvf/osf.io,acshi/osf.io,doublebits/osf.io,aaxelb/osf.io,chennan47/osf.io,GaryKriebel/osf.io,ticklemepierce/osf.io,samchrisinger/osf.io,billyhunt/osf.io,cosenal/osf.io,mluo613/osf.io,saradbowman/osf.io,emetsger/osf.io,mfraezz/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,jinluyuan/osf.io,caseyrygt/osf.io,jolene-esposito/osf.io,reinaH/osf.io,bdyetton/prettychart,jnayak1/osf.io,doublebits/osf.io,asanfilippo7/osf.io,fabianvf/osf.io,revanthkolli/osf.io,leb2dg/osf.io,GageGaskins/osf.io,monikagrabowska/osf.io,jnayak1/osf.io,monikagrabowska/osf.io,billyhunt/osf.io,haoyuchen1992/osf.io,SSJohns/osf.io,TomHeatwole/osf.io,barbour-em/osf.io,chrisseto/osf.io,brandonPurvis/osf.io,CenterForOpenScience/osf.io,sbt9uc/osf.io,lyndsysimon/osf.io,lyndsysimon/osf.io,caseyrygt/osf.io,njantrania/osf.io,baylee-d/osf.io,abought/osf.io,mluo613/osf.io,HarryRybacki/osf.io,baylee-d/osf.io,amyshi188/osf.io,Johnetordoff/osf.io,AndrewSallans/osf.io,DanielSBrown/osf.io,KAsante95/osf.io,kushG/osf.io,adlius/osf.io,amyshi188/osf.io,mluo613/osf.io,acshi/osf.io,hmoco/osf.io,kushG/osf.io,MerlinZhang/osf.io,MerlinZhang/osf.io,zamattiac/osf.io,mluke93/osf.io,felliott/osf.io,mluke93/osf.io,sbt9uc/osf.io,ckc6cz/osf.io,samchrisinger/osf.io,ZobairAlijan/osf.io,zachjanicki/osf.io,leb2dg/osf.io,zkraime/osf.io,GageGaskins/osf.io,cosenal/osf.io,arpitar/osf.io,barbour-em/osf.io,jmcarp/osf.io,aaxelb/osf.io,lamdnhan/osf.io,lamdnhan/osf.io,TomBaxter/osf.io,reinaH/osf.io,brianjgeiger/osf.io,HarryRybacki/osf.io
|
Add migration script for defective "delete wiki page" logs
|
# -*- coding: utf-8 -*-
import datetime
import logging
import sys
from modularodm import Q
from website.models import NodeLog
from bson import ObjectId
from nose.tools import *
from tests.base import OsfTestCase
from tests.factories import NodeLogFactory
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def find_invalid_logs():
for log in NodeLog.find(Q('action', 'eq', NodeLog.WIKI_DELETED)):
# Derive UTC datetime object from ObjectId
id_date = ObjectId(log._id).generation_time
id_date = id_date.replace(tzinfo=None) - id_date.utcoffset()
if id_date > log.date:
yield log
def fix_invalid_log(log):
new_dt = ObjectId(log._id).generation_time
new_dt = new_dt.replace(tzinfo=None) - new_dt.utcoffset()
NodeLog._fields['date'].__set__(
log,
new_dt,
safe=False
)
log.save()
if __name__ == '__main__':
if 'dry' in sys.argv:
for log in find_invalid_logs():
print(log._id)
else:
for log in find_invalid_logs():
fix_invalid_log(log)
class TestEnsureLogDates(OsfTestCase):
def setUp(self):
super(TestEnsureLogDates, self).setUp()
self.good_log = NodeLogFactory(action=NodeLog.WIKI_DELETED)
self.bad_log = NodeLogFactory(action=NodeLog.WIKI_DELETED)
self.mongo = self.good_log._storage[0].db['nodelog']
self.mongo.update(
{'_id': self.bad_log._id},
{'$set': {'date': datetime.datetime.utcnow() - datetime.timedelta(weeks=52)}},
)
self.bad_log.reload()
def tearDown(self):
super(TestEnsureLogDates, self).tearDown()
def test_find_invalid_logs(self):
assert_equal(
1,
len(list(find_invalid_logs()))
)
def test_fix_invalid_log(self):
fix_invalid_log(self.bad_log)
assert_true(
self.good_log.date - self.bad_log.date < datetime.timedelta(seconds=1)
)
|
<commit_before><commit_msg>Add migration script for defective "delete wiki page" logs<commit_after>
|
# -*- coding: utf-8 -*-
import datetime
import logging
import sys
from modularodm import Q
from website.models import NodeLog
from bson import ObjectId
from nose.tools import *
from tests.base import OsfTestCase
from tests.factories import NodeLogFactory
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def find_invalid_logs():
for log in NodeLog.find(Q('action', 'eq', NodeLog.WIKI_DELETED)):
# Derive UTC datetime object from ObjectId
id_date = ObjectId(log._id).generation_time
id_date = id_date.replace(tzinfo=None) - id_date.utcoffset()
if id_date > log.date:
yield log
def fix_invalid_log(log):
new_dt = ObjectId(log._id).generation_time
new_dt = new_dt.replace(tzinfo=None) - new_dt.utcoffset()
NodeLog._fields['date'].__set__(
log,
new_dt,
safe=False
)
log.save()
if __name__ == '__main__':
if 'dry' in sys.argv:
for log in find_invalid_logs():
print(log._id)
else:
for log in find_invalid_logs():
fix_invalid_log(log)
class TestEnsureLogDates(OsfTestCase):
def setUp(self):
super(TestEnsureLogDates, self).setUp()
self.good_log = NodeLogFactory(action=NodeLog.WIKI_DELETED)
self.bad_log = NodeLogFactory(action=NodeLog.WIKI_DELETED)
self.mongo = self.good_log._storage[0].db['nodelog']
self.mongo.update(
{'_id': self.bad_log._id},
{'$set': {'date': datetime.datetime.utcnow() - datetime.timedelta(weeks=52)}},
)
self.bad_log.reload()
def tearDown(self):
super(TestEnsureLogDates, self).tearDown()
def test_find_invalid_logs(self):
assert_equal(
1,
len(list(find_invalid_logs()))
)
def test_fix_invalid_log(self):
fix_invalid_log(self.bad_log)
assert_true(
self.good_log.date - self.bad_log.date < datetime.timedelta(seconds=1)
)
|
Add migration script for defective "delete wiki page" logs# -*- coding: utf-8 -*-
import datetime
import logging
import sys
from modularodm import Q
from website.models import NodeLog
from bson import ObjectId
from nose.tools import *
from tests.base import OsfTestCase
from tests.factories import NodeLogFactory
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def find_invalid_logs():
for log in NodeLog.find(Q('action', 'eq', NodeLog.WIKI_DELETED)):
# Derive UTC datetime object from ObjectId
id_date = ObjectId(log._id).generation_time
id_date = id_date.replace(tzinfo=None) - id_date.utcoffset()
if id_date > log.date:
yield log
def fix_invalid_log(log):
new_dt = ObjectId(log._id).generation_time
new_dt = new_dt.replace(tzinfo=None) - new_dt.utcoffset()
NodeLog._fields['date'].__set__(
log,
new_dt,
safe=False
)
log.save()
if __name__ == '__main__':
if 'dry' in sys.argv:
for log in find_invalid_logs():
print(log._id)
else:
for log in find_invalid_logs():
fix_invalid_log(log)
class TestEnsureLogDates(OsfTestCase):
def setUp(self):
super(TestEnsureLogDates, self).setUp()
self.good_log = NodeLogFactory(action=NodeLog.WIKI_DELETED)
self.bad_log = NodeLogFactory(action=NodeLog.WIKI_DELETED)
self.mongo = self.good_log._storage[0].db['nodelog']
self.mongo.update(
{'_id': self.bad_log._id},
{'$set': {'date': datetime.datetime.utcnow() - datetime.timedelta(weeks=52)}},
)
self.bad_log.reload()
def tearDown(self):
super(TestEnsureLogDates, self).tearDown()
def test_find_invalid_logs(self):
assert_equal(
1,
len(list(find_invalid_logs()))
)
def test_fix_invalid_log(self):
fix_invalid_log(self.bad_log)
assert_true(
self.good_log.date - self.bad_log.date < datetime.timedelta(seconds=1)
)
|
<commit_before><commit_msg>Add migration script for defective "delete wiki page" logs<commit_after># -*- coding: utf-8 -*-
import datetime
import logging
import sys
from modularodm import Q
from website.models import NodeLog
from bson import ObjectId
from nose.tools import *
from tests.base import OsfTestCase
from tests.factories import NodeLogFactory
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def find_invalid_logs():
for log in NodeLog.find(Q('action', 'eq', NodeLog.WIKI_DELETED)):
# Derive UTC datetime object from ObjectId
id_date = ObjectId(log._id).generation_time
id_date = id_date.replace(tzinfo=None) - id_date.utcoffset()
if id_date > log.date:
yield log
def fix_invalid_log(log):
new_dt = ObjectId(log._id).generation_time
new_dt = new_dt.replace(tzinfo=None) - new_dt.utcoffset()
NodeLog._fields['date'].__set__(
log,
new_dt,
safe=False
)
log.save()
if __name__ == '__main__':
if 'dry' in sys.argv:
for log in find_invalid_logs():
print(log._id)
else:
for log in find_invalid_logs():
fix_invalid_log(log)
class TestEnsureLogDates(OsfTestCase):
def setUp(self):
super(TestEnsureLogDates, self).setUp()
self.good_log = NodeLogFactory(action=NodeLog.WIKI_DELETED)
self.bad_log = NodeLogFactory(action=NodeLog.WIKI_DELETED)
self.mongo = self.good_log._storage[0].db['nodelog']
self.mongo.update(
{'_id': self.bad_log._id},
{'$set': {'date': datetime.datetime.utcnow() - datetime.timedelta(weeks=52)}},
)
self.bad_log.reload()
def tearDown(self):
super(TestEnsureLogDates, self).tearDown()
def test_find_invalid_logs(self):
assert_equal(
1,
len(list(find_invalid_logs()))
)
def test_fix_invalid_log(self):
fix_invalid_log(self.bad_log)
assert_true(
self.good_log.date - self.bad_log.date < datetime.timedelta(seconds=1)
)
|
|
fdb39ca0f3aee733646e2bd4e8021b4abbedcf52
|
tip/algorithms/sorting/quicksort.py
|
tip/algorithms/sorting/quicksort.py
|
"""Quick Sort Algorithm.
The Quick Sort is a recursive sort of order in range from O(n log n) with
the best pivots, to O(n2) with the worst pivots, where n is the number
of elements in the array.
Quicksort, also known as partition-exchange sort, uses these steps:
1 - Choose any element of the array to be the pivot.
2 - Divide all other elements (except the pivot) into two partitions.
3 - All elements less than the pivot must be in the first partition.
4 - All elements greater than the pivot must be in the second partition.
5 - Use recursion to sort both partitions.
6 - Join first sorted partition, the pivot, and the second sorted partition.
Quicksort has a reputation as the fastest sort. Optimized variants of quicksort
are common features of many languages and libraries. One often contrasts
quicksort with merge sort, because both sorts have an average time
of O(n log n).
"""
def partition(unsorted_list):
"""Partition array using a pivot value."""
return unsorted_list
def quick_sort(unsorted_list):
"""Sorts the input list using the quick sort algorithm.
>>> unsorted_list = [4, 5, 1, 6, 3]
>>> quick_sort(unsorted_list)
[1, 3, 4, 5, 6]
"""
unsorted_list = sorted(unsorted_list)
return unsorted_list
|
Add some info about Quick Sort
|
Add some info about Quick Sort
|
Python
|
unlicense
|
davidgasquez/tip
|
Add some info about Quick Sort
|
"""Quick Sort Algorithm.
The Quick Sort is a recursive sort of order in range from O(n log n) with
the best pivots, to O(n2) with the worst pivots, where n is the number
of elements in the array.
Quicksort, also known as partition-exchange sort, uses these steps:
1 - Choose any element of the array to be the pivot.
2 - Divide all other elements (except the pivot) into two partitions.
3 - All elements less than the pivot must be in the first partition.
4 - All elements greater than the pivot must be in the second partition.
5 - Use recursion to sort both partitions.
6 - Join first sorted partition, the pivot, and the second sorted partition.
Quicksort has a reputation as the fastest sort. Optimized variants of quicksort
are common features of many languages and libraries. One often contrasts
quicksort with merge sort, because both sorts have an average time
of O(n log n).
"""
def partition(unsorted_list):
"""Partition array using a pivot value."""
return unsorted_list
def quick_sort(unsorted_list):
"""Sorts the input list using the quick sort algorithm.
>>> unsorted_list = [4, 5, 1, 6, 3]
>>> quick_sort(unsorted_list)
[1, 3, 4, 5, 6]
"""
unsorted_list = sorted(unsorted_list)
return unsorted_list
|
<commit_before><commit_msg>Add some info about Quick Sort<commit_after>
|
"""Quick Sort Algorithm.
The Quick Sort is a recursive sort of order in range from O(n log n) with
the best pivots, to O(n2) with the worst pivots, where n is the number
of elements in the array.
Quicksort, also known as partition-exchange sort, uses these steps:
1 - Choose any element of the array to be the pivot.
2 - Divide all other elements (except the pivot) into two partitions.
3 - All elements less than the pivot must be in the first partition.
4 - All elements greater than the pivot must be in the second partition.
5 - Use recursion to sort both partitions.
6 - Join first sorted partition, the pivot, and the second sorted partition.
Quicksort has a reputation as the fastest sort. Optimized variants of quicksort
are common features of many languages and libraries. One often contrasts
quicksort with merge sort, because both sorts have an average time
of O(n log n).
"""
def partition(unsorted_list):
"""Partition array using a pivot value."""
return unsorted_list
def quick_sort(unsorted_list):
"""Sorts the input list using the quick sort algorithm.
>>> unsorted_list = [4, 5, 1, 6, 3]
>>> quick_sort(unsorted_list)
[1, 3, 4, 5, 6]
"""
unsorted_list = sorted(unsorted_list)
return unsorted_list
|
Add some info about Quick Sort"""Quick Sort Algorithm.
The Quick Sort is a recursive sort of order in range from O(n log n) with
the best pivots, to O(n2) with the worst pivots, where n is the number
of elements in the array.
Quicksort, also known as partition-exchange sort, uses these steps:
1 - Choose any element of the array to be the pivot.
2 - Divide all other elements (except the pivot) into two partitions.
3 - All elements less than the pivot must be in the first partition.
4 - All elements greater than the pivot must be in the second partition.
5 - Use recursion to sort both partitions.
6 - Join first sorted partition, the pivot, and the second sorted partition.
Quicksort has a reputation as the fastest sort. Optimized variants of quicksort
are common features of many languages and libraries. One often contrasts
quicksort with merge sort, because both sorts have an average time
of O(n log n).
"""
def partition(unsorted_list):
"""Partition array using a pivot value."""
return unsorted_list
def quick_sort(unsorted_list):
"""Sorts the input list using the quick sort algorithm.
>>> unsorted_list = [4, 5, 1, 6, 3]
>>> quick_sort(unsorted_list)
[1, 3, 4, 5, 6]
"""
unsorted_list = sorted(unsorted_list)
return unsorted_list
|
<commit_before><commit_msg>Add some info about Quick Sort<commit_after>"""Quick Sort Algorithm.
The Quick Sort is a recursive sort of order in range from O(n log n) with
the best pivots, to O(n2) with the worst pivots, where n is the number
of elements in the array.
Quicksort, also known as partition-exchange sort, uses these steps:
1 - Choose any element of the array to be the pivot.
2 - Divide all other elements (except the pivot) into two partitions.
3 - All elements less than the pivot must be in the first partition.
4 - All elements greater than the pivot must be in the second partition.
5 - Use recursion to sort both partitions.
6 - Join first sorted partition, the pivot, and the second sorted partition.
Quicksort has a reputation as the fastest sort. Optimized variants of quicksort
are common features of many languages and libraries. One often contrasts
quicksort with merge sort, because both sorts have an average time
of O(n log n).
"""
def partition(unsorted_list):
"""Partition array using a pivot value."""
return unsorted_list
def quick_sort(unsorted_list):
"""Sorts the input list using the quick sort algorithm.
>>> unsorted_list = [4, 5, 1, 6, 3]
>>> quick_sort(unsorted_list)
[1, 3, 4, 5, 6]
"""
unsorted_list = sorted(unsorted_list)
return unsorted_list
|
|
7d27c40961fa80525d73942742fbe4ff8930be9a
|
python_script/get_token.py
|
python_script/get_token.py
|
from keystoneauth1.identity import v3
from keystoneauth1 import session
from config import *
auth = v3.Password(auth_url=AUTH_URL,
user_domain_name='default',
username=USERNAME,
password=PASSWORD,
project_domain_name='default',
project_name=PROJECT_NAME)
session = session.Session(auth=auth)
TOKEN = session.get_token()
|
Add missing get token file
|
Add missing get token file
|
Python
|
mit
|
daikk115/openstack_upgrade_test,daikk115/openstack_upgrade_test
|
Add missing get token file
|
from keystoneauth1.identity import v3
from keystoneauth1 import session
from config import *
auth = v3.Password(auth_url=AUTH_URL,
user_domain_name='default',
username=USERNAME,
password=PASSWORD,
project_domain_name='default',
project_name=PROJECT_NAME)
session = session.Session(auth=auth)
TOKEN = session.get_token()
|
<commit_before><commit_msg>Add missing get token file<commit_after>
|
from keystoneauth1.identity import v3
from keystoneauth1 import session
from config import *
auth = v3.Password(auth_url=AUTH_URL,
user_domain_name='default',
username=USERNAME,
password=PASSWORD,
project_domain_name='default',
project_name=PROJECT_NAME)
session = session.Session(auth=auth)
TOKEN = session.get_token()
|
Add missing get token filefrom keystoneauth1.identity import v3
from keystoneauth1 import session
from config import *
auth = v3.Password(auth_url=AUTH_URL,
user_domain_name='default',
username=USERNAME,
password=PASSWORD,
project_domain_name='default',
project_name=PROJECT_NAME)
session = session.Session(auth=auth)
TOKEN = session.get_token()
|
<commit_before><commit_msg>Add missing get token file<commit_after>from keystoneauth1.identity import v3
from keystoneauth1 import session
from config import *
auth = v3.Password(auth_url=AUTH_URL,
user_domain_name='default',
username=USERNAME,
password=PASSWORD,
project_domain_name='default',
project_name=PROJECT_NAME)
session = session.Session(auth=auth)
TOKEN = session.get_token()
|
|
735c3378d7cb150dcf2257f737a443fff9974b1a
|
chapter02/fahrenheitToCelsiusTable.py
|
chapter02/fahrenheitToCelsiusTable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def getCelsius(f):
return (f-32) * 5/ 9
for index in range(10, 130, 10):
fahrenheit = index
print "{0}ºF are {1}ºC".format(fahrenheit, getCelsius(fahrenheit))
|
Add Fahrenheit to Celsius table exercise
|
Add Fahrenheit to Celsius table exercise
|
Python
|
apache-2.0
|
MindCookin/python-exercises
|
Add Fahrenheit to Celsius table exercise
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def getCelsius(f):
return (f-32) * 5/ 9
for index in range(10, 130, 10):
fahrenheit = index
print "{0}ºF are {1}ºC".format(fahrenheit, getCelsius(fahrenheit))
|
<commit_before><commit_msg>Add Fahrenheit to Celsius table exercise<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def getCelsius(f):
return (f-32) * 5/ 9
for index in range(10, 130, 10):
fahrenheit = index
print "{0}ºF are {1}ºC".format(fahrenheit, getCelsius(fahrenheit))
|
Add Fahrenheit to Celsius table exercise#!/usr/bin/env python
# -*- coding: utf-8 -*-
def getCelsius(f):
return (f-32) * 5/ 9
for index in range(10, 130, 10):
fahrenheit = index
print "{0}ºF are {1}ºC".format(fahrenheit, getCelsius(fahrenheit))
|
<commit_before><commit_msg>Add Fahrenheit to Celsius table exercise<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
def getCelsius(f):
return (f-32) * 5/ 9
for index in range(10, 130, 10):
fahrenheit = index
print "{0}ºF are {1}ºC".format(fahrenheit, getCelsius(fahrenheit))
|
|
8fe6c1544298128fd0487da4669c373966bdae3d
|
adaptive/genpython.py
|
adaptive/genpython.py
|
import sys
py_reserved = set("id is class def".split()) # FIXME
def py_name(name):
if name in py_reserved:
return name + "_"
return name
sdl_to_python = {
"int32": "int",
"int64": "int",
"double": "float",
"string": "basestring"
}
def py_Typename(name):
return sdl_to_python.get(name, name)
class Pythonize(object):
"Add py_name node attributes"
def visit_DEFAULT(self, node):
print "DEFAULT", repr(node)
def visit_Module(self, node):
node.py_name = py_name(node.name)
def visit_Struct(self, node):
node.py_name = py_name(node.name)
def visit_Field(self, node):
node.py_name = py_name(node.name)
def visit_Parameter(self, node):
node.py_name = py_name(node.name)
if node.default:
if node.default.name == "null":
node.default.py_name = "None"
else:
node.default.py_name = repr(node.default.name)
def visit_Operation(self, node):
node.py_name = py_name(node.name)
def visit_Type(self, node):
node.py_name = py_Typename(node.name)
class PyOutput(object):
"Generate decent-looking Python code"
def __init__(self):
self.lines = []
self.main_indent_level = 0
self.ref_indent_level = 0
def out(self, line):
self.lines.append(self.main_indent_level * " " + line.strip())
def ref(self, line):
self.lines.append("## " + self.ref_indent_level * " " + line.strip())
def indent(self):
self.main_indent_level += 1
def dedent(self):
assert self.main_indent_level > 0, (self.main_indent_level, "\n".join(self.lines))
self.main_indent_level -= 1
def ref_indent(self):
self.ref_indent_level += 1
def ref_dedent(self):
assert self.ref_indent_level > 0, (self.ref_indent_level, "\n".join(self.lines))
self.ref_indent_level -= 1
def dump(self, fd=sys.stdout):
# Fancy print...
wasPassThru = True
for line in self.lines:
isPassThru = line.startswith("## ")
if isPassThru != wasPassThru:
fd.write("\n")
fd.write(line)
fd.write("\n")
wasPassThru = isPassThru
|
Add helpers for generating Python code
|
Add helpers for generating Python code
|
Python
|
apache-2.0
|
datawire/adaptive
|
Add helpers for generating Python code
|
import sys
py_reserved = set("id is class def".split()) # FIXME
def py_name(name):
if name in py_reserved:
return name + "_"
return name
sdl_to_python = {
"int32": "int",
"int64": "int",
"double": "float",
"string": "basestring"
}
def py_Typename(name):
return sdl_to_python.get(name, name)
class Pythonize(object):
"Add py_name node attributes"
def visit_DEFAULT(self, node):
print "DEFAULT", repr(node)
def visit_Module(self, node):
node.py_name = py_name(node.name)
def visit_Struct(self, node):
node.py_name = py_name(node.name)
def visit_Field(self, node):
node.py_name = py_name(node.name)
def visit_Parameter(self, node):
node.py_name = py_name(node.name)
if node.default:
if node.default.name == "null":
node.default.py_name = "None"
else:
node.default.py_name = repr(node.default.name)
def visit_Operation(self, node):
node.py_name = py_name(node.name)
def visit_Type(self, node):
node.py_name = py_Typename(node.name)
class PyOutput(object):
"Generate decent-looking Python code"
def __init__(self):
self.lines = []
self.main_indent_level = 0
self.ref_indent_level = 0
def out(self, line):
self.lines.append(self.main_indent_level * " " + line.strip())
def ref(self, line):
self.lines.append("## " + self.ref_indent_level * " " + line.strip())
def indent(self):
self.main_indent_level += 1
def dedent(self):
assert self.main_indent_level > 0, (self.main_indent_level, "\n".join(self.lines))
self.main_indent_level -= 1
def ref_indent(self):
self.ref_indent_level += 1
def ref_dedent(self):
assert self.ref_indent_level > 0, (self.ref_indent_level, "\n".join(self.lines))
self.ref_indent_level -= 1
def dump(self, fd=sys.stdout):
# Fancy print...
wasPassThru = True
for line in self.lines:
isPassThru = line.startswith("## ")
if isPassThru != wasPassThru:
fd.write("\n")
fd.write(line)
fd.write("\n")
wasPassThru = isPassThru
|
<commit_before><commit_msg>Add helpers for generating Python code<commit_after>
|
import sys
py_reserved = set("id is class def".split()) # FIXME
def py_name(name):
if name in py_reserved:
return name + "_"
return name
sdl_to_python = {
"int32": "int",
"int64": "int",
"double": "float",
"string": "basestring"
}
def py_Typename(name):
return sdl_to_python.get(name, name)
class Pythonize(object):
"Add py_name node attributes"
def visit_DEFAULT(self, node):
print "DEFAULT", repr(node)
def visit_Module(self, node):
node.py_name = py_name(node.name)
def visit_Struct(self, node):
node.py_name = py_name(node.name)
def visit_Field(self, node):
node.py_name = py_name(node.name)
def visit_Parameter(self, node):
node.py_name = py_name(node.name)
if node.default:
if node.default.name == "null":
node.default.py_name = "None"
else:
node.default.py_name = repr(node.default.name)
def visit_Operation(self, node):
node.py_name = py_name(node.name)
def visit_Type(self, node):
node.py_name = py_Typename(node.name)
class PyOutput(object):
"Generate decent-looking Python code"
def __init__(self):
self.lines = []
self.main_indent_level = 0
self.ref_indent_level = 0
def out(self, line):
self.lines.append(self.main_indent_level * " " + line.strip())
def ref(self, line):
self.lines.append("## " + self.ref_indent_level * " " + line.strip())
def indent(self):
self.main_indent_level += 1
def dedent(self):
assert self.main_indent_level > 0, (self.main_indent_level, "\n".join(self.lines))
self.main_indent_level -= 1
def ref_indent(self):
self.ref_indent_level += 1
def ref_dedent(self):
assert self.ref_indent_level > 0, (self.ref_indent_level, "\n".join(self.lines))
self.ref_indent_level -= 1
def dump(self, fd=sys.stdout):
# Fancy print...
wasPassThru = True
for line in self.lines:
isPassThru = line.startswith("## ")
if isPassThru != wasPassThru:
fd.write("\n")
fd.write(line)
fd.write("\n")
wasPassThru = isPassThru
|
Add helpers for generating Python codeimport sys
py_reserved = set("id is class def".split()) # FIXME
def py_name(name):
if name in py_reserved:
return name + "_"
return name
sdl_to_python = {
"int32": "int",
"int64": "int",
"double": "float",
"string": "basestring"
}
def py_Typename(name):
return sdl_to_python.get(name, name)
class Pythonize(object):
"Add py_name node attributes"
def visit_DEFAULT(self, node):
print "DEFAULT", repr(node)
def visit_Module(self, node):
node.py_name = py_name(node.name)
def visit_Struct(self, node):
node.py_name = py_name(node.name)
def visit_Field(self, node):
node.py_name = py_name(node.name)
def visit_Parameter(self, node):
node.py_name = py_name(node.name)
if node.default:
if node.default.name == "null":
node.default.py_name = "None"
else:
node.default.py_name = repr(node.default.name)
def visit_Operation(self, node):
node.py_name = py_name(node.name)
def visit_Type(self, node):
node.py_name = py_Typename(node.name)
class PyOutput(object):
"Generate decent-looking Python code"
def __init__(self):
self.lines = []
self.main_indent_level = 0
self.ref_indent_level = 0
def out(self, line):
self.lines.append(self.main_indent_level * " " + line.strip())
def ref(self, line):
self.lines.append("## " + self.ref_indent_level * " " + line.strip())
def indent(self):
self.main_indent_level += 1
def dedent(self):
assert self.main_indent_level > 0, (self.main_indent_level, "\n".join(self.lines))
self.main_indent_level -= 1
def ref_indent(self):
self.ref_indent_level += 1
def ref_dedent(self):
assert self.ref_indent_level > 0, (self.ref_indent_level, "\n".join(self.lines))
self.ref_indent_level -= 1
def dump(self, fd=sys.stdout):
# Fancy print...
wasPassThru = True
for line in self.lines:
isPassThru = line.startswith("## ")
if isPassThru != wasPassThru:
fd.write("\n")
fd.write(line)
fd.write("\n")
wasPassThru = isPassThru
|
<commit_before><commit_msg>Add helpers for generating Python code<commit_after>import sys
py_reserved = set("id is class def".split()) # FIXME
def py_name(name):
if name in py_reserved:
return name + "_"
return name
sdl_to_python = {
"int32": "int",
"int64": "int",
"double": "float",
"string": "basestring"
}
def py_Typename(name):
return sdl_to_python.get(name, name)
class Pythonize(object):
"Add py_name node attributes"
def visit_DEFAULT(self, node):
print "DEFAULT", repr(node)
def visit_Module(self, node):
node.py_name = py_name(node.name)
def visit_Struct(self, node):
node.py_name = py_name(node.name)
def visit_Field(self, node):
node.py_name = py_name(node.name)
def visit_Parameter(self, node):
node.py_name = py_name(node.name)
if node.default:
if node.default.name == "null":
node.default.py_name = "None"
else:
node.default.py_name = repr(node.default.name)
def visit_Operation(self, node):
node.py_name = py_name(node.name)
def visit_Type(self, node):
node.py_name = py_Typename(node.name)
class PyOutput(object):
"Generate decent-looking Python code"
def __init__(self):
self.lines = []
self.main_indent_level = 0
self.ref_indent_level = 0
def out(self, line):
self.lines.append(self.main_indent_level * " " + line.strip())
def ref(self, line):
self.lines.append("## " + self.ref_indent_level * " " + line.strip())
def indent(self):
self.main_indent_level += 1
def dedent(self):
assert self.main_indent_level > 0, (self.main_indent_level, "\n".join(self.lines))
self.main_indent_level -= 1
def ref_indent(self):
self.ref_indent_level += 1
def ref_dedent(self):
assert self.ref_indent_level > 0, (self.ref_indent_level, "\n".join(self.lines))
self.ref_indent_level -= 1
def dump(self, fd=sys.stdout):
# Fancy print...
wasPassThru = True
for line in self.lines:
isPassThru = line.startswith("## ")
if isPassThru != wasPassThru:
fd.write("\n")
fd.write(line)
fd.write("\n")
wasPassThru = isPassThru
|
|
213796305b89fa0e5de001006dbd741db0e83d36
|
tests/backends/events_test.py
|
tests/backends/events_test.py
|
import threading
import unittest
from pykka.actor import ThreadingActor
from pykka.registry import ActorRegistry
from mopidy.backends.dummy import DummyBackend
from mopidy.listeners import BackendListener
from mopidy.models import Track
class BackendEventsTest(unittest.TestCase):
def setUp(self):
self.events = {
'started_playing': threading.Event(),
'stopped_playing': threading.Event(),
}
self.backend = DummyBackend.start().proxy()
self.listener = DummyBackendListener.start(self.events).proxy()
def tearDown(self):
ActorRegistry.stop_all()
def test_play_sends_started_playing_event(self):
self.backend.current_playlist.add([Track(uri='a')])
self.backend.playback.play()
self.events['started_playing'].wait(timeout=1)
self.assertTrue(self.events['started_playing'].is_set())
def test_stop_sends_stopped_playing_event(self):
self.backend.current_playlist.add([Track(uri='a')])
self.backend.playback.play()
self.backend.playback.stop()
self.events['stopped_playing'].wait(timeout=1)
self.assertTrue(self.events['stopped_playing'].is_set())
class DummyBackendListener(ThreadingActor, BackendListener):
def __init__(self, events):
self.events = events
def started_playing(self, track):
self.events['started_playing'].set()
def stopped_playing(self, track, stop_position):
self.events['stopped_playing'].set()
|
Test that backend actually sends the events
|
Test that backend actually sends the events
|
Python
|
apache-2.0
|
vrs01/mopidy,ZenithDK/mopidy,swak/mopidy,quartz55/mopidy,bencevans/mopidy,kingosticks/mopidy,ali/mopidy,rawdlite/mopidy,vrs01/mopidy,diandiankan/mopidy,jcass77/mopidy,liamw9534/mopidy,diandiankan/mopidy,ZenithDK/mopidy,kingosticks/mopidy,jcass77/mopidy,SuperStarPL/mopidy,diandiankan/mopidy,glogiotatidis/mopidy,priestd09/mopidy,dbrgn/mopidy,bacontext/mopidy,woutervanwijk/mopidy,quartz55/mopidy,vrs01/mopidy,jmarsik/mopidy,hkariti/mopidy,mopidy/mopidy,quartz55/mopidy,hkariti/mopidy,bacontext/mopidy,woutervanwijk/mopidy,bacontext/mopidy,bencevans/mopidy,ali/mopidy,jodal/mopidy,rawdlite/mopidy,abarisain/mopidy,hkariti/mopidy,tkem/mopidy,glogiotatidis/mopidy,ali/mopidy,jodal/mopidy,vrs01/mopidy,abarisain/mopidy,adamcik/mopidy,pacificIT/mopidy,bacontext/mopidy,jcass77/mopidy,jmarsik/mopidy,bencevans/mopidy,quartz55/mopidy,mokieyue/mopidy,SuperStarPL/mopidy,dbrgn/mopidy,pacificIT/mopidy,rawdlite/mopidy,hkariti/mopidy,mokieyue/mopidy,swak/mopidy,tkem/mopidy,adamcik/mopidy,rawdlite/mopidy,mopidy/mopidy,dbrgn/mopidy,pacificIT/mopidy,ZenithDK/mopidy,jmarsik/mopidy,swak/mopidy,bencevans/mopidy,dbrgn/mopidy,adamcik/mopidy,jmarsik/mopidy,mopidy/mopidy,swak/mopidy,tkem/mopidy,SuperStarPL/mopidy,ali/mopidy,priestd09/mopidy,diandiankan/mopidy,tkem/mopidy,kingosticks/mopidy,glogiotatidis/mopidy,mokieyue/mopidy,glogiotatidis/mopidy,jodal/mopidy,SuperStarPL/mopidy,priestd09/mopidy,ZenithDK/mopidy,mokieyue/mopidy,pacificIT/mopidy,liamw9534/mopidy
|
Test that backend actually sends the events
|
import threading
import unittest
from pykka.actor import ThreadingActor
from pykka.registry import ActorRegistry
from mopidy.backends.dummy import DummyBackend
from mopidy.listeners import BackendListener
from mopidy.models import Track
class BackendEventsTest(unittest.TestCase):
def setUp(self):
self.events = {
'started_playing': threading.Event(),
'stopped_playing': threading.Event(),
}
self.backend = DummyBackend.start().proxy()
self.listener = DummyBackendListener.start(self.events).proxy()
def tearDown(self):
ActorRegistry.stop_all()
def test_play_sends_started_playing_event(self):
self.backend.current_playlist.add([Track(uri='a')])
self.backend.playback.play()
self.events['started_playing'].wait(timeout=1)
self.assertTrue(self.events['started_playing'].is_set())
def test_stop_sends_stopped_playing_event(self):
self.backend.current_playlist.add([Track(uri='a')])
self.backend.playback.play()
self.backend.playback.stop()
self.events['stopped_playing'].wait(timeout=1)
self.assertTrue(self.events['stopped_playing'].is_set())
class DummyBackendListener(ThreadingActor, BackendListener):
def __init__(self, events):
self.events = events
def started_playing(self, track):
self.events['started_playing'].set()
def stopped_playing(self, track, stop_position):
self.events['stopped_playing'].set()
|
<commit_before><commit_msg>Test that backend actually sends the events<commit_after>
|
import threading
import unittest
from pykka.actor import ThreadingActor
from pykka.registry import ActorRegistry
from mopidy.backends.dummy import DummyBackend
from mopidy.listeners import BackendListener
from mopidy.models import Track
class BackendEventsTest(unittest.TestCase):
def setUp(self):
self.events = {
'started_playing': threading.Event(),
'stopped_playing': threading.Event(),
}
self.backend = DummyBackend.start().proxy()
self.listener = DummyBackendListener.start(self.events).proxy()
def tearDown(self):
ActorRegistry.stop_all()
def test_play_sends_started_playing_event(self):
self.backend.current_playlist.add([Track(uri='a')])
self.backend.playback.play()
self.events['started_playing'].wait(timeout=1)
self.assertTrue(self.events['started_playing'].is_set())
def test_stop_sends_stopped_playing_event(self):
self.backend.current_playlist.add([Track(uri='a')])
self.backend.playback.play()
self.backend.playback.stop()
self.events['stopped_playing'].wait(timeout=1)
self.assertTrue(self.events['stopped_playing'].is_set())
class DummyBackendListener(ThreadingActor, BackendListener):
def __init__(self, events):
self.events = events
def started_playing(self, track):
self.events['started_playing'].set()
def stopped_playing(self, track, stop_position):
self.events['stopped_playing'].set()
|
Test that backend actually sends the eventsimport threading
import unittest
from pykka.actor import ThreadingActor
from pykka.registry import ActorRegistry
from mopidy.backends.dummy import DummyBackend
from mopidy.listeners import BackendListener
from mopidy.models import Track
class BackendEventsTest(unittest.TestCase):
def setUp(self):
self.events = {
'started_playing': threading.Event(),
'stopped_playing': threading.Event(),
}
self.backend = DummyBackend.start().proxy()
self.listener = DummyBackendListener.start(self.events).proxy()
def tearDown(self):
ActorRegistry.stop_all()
def test_play_sends_started_playing_event(self):
self.backend.current_playlist.add([Track(uri='a')])
self.backend.playback.play()
self.events['started_playing'].wait(timeout=1)
self.assertTrue(self.events['started_playing'].is_set())
def test_stop_sends_stopped_playing_event(self):
self.backend.current_playlist.add([Track(uri='a')])
self.backend.playback.play()
self.backend.playback.stop()
self.events['stopped_playing'].wait(timeout=1)
self.assertTrue(self.events['stopped_playing'].is_set())
class DummyBackendListener(ThreadingActor, BackendListener):
def __init__(self, events):
self.events = events
def started_playing(self, track):
self.events['started_playing'].set()
def stopped_playing(self, track, stop_position):
self.events['stopped_playing'].set()
|
<commit_before><commit_msg>Test that backend actually sends the events<commit_after>import threading
import unittest
from pykka.actor import ThreadingActor
from pykka.registry import ActorRegistry
from mopidy.backends.dummy import DummyBackend
from mopidy.listeners import BackendListener
from mopidy.models import Track
class BackendEventsTest(unittest.TestCase):
def setUp(self):
self.events = {
'started_playing': threading.Event(),
'stopped_playing': threading.Event(),
}
self.backend = DummyBackend.start().proxy()
self.listener = DummyBackendListener.start(self.events).proxy()
def tearDown(self):
ActorRegistry.stop_all()
def test_play_sends_started_playing_event(self):
self.backend.current_playlist.add([Track(uri='a')])
self.backend.playback.play()
self.events['started_playing'].wait(timeout=1)
self.assertTrue(self.events['started_playing'].is_set())
def test_stop_sends_stopped_playing_event(self):
self.backend.current_playlist.add([Track(uri='a')])
self.backend.playback.play()
self.backend.playback.stop()
self.events['stopped_playing'].wait(timeout=1)
self.assertTrue(self.events['stopped_playing'].is_set())
class DummyBackendListener(ThreadingActor, BackendListener):
def __init__(self, events):
self.events = events
def started_playing(self, track):
self.events['started_playing'].set()
def stopped_playing(self, track, stop_position):
self.events['stopped_playing'].set()
|
|
a034a90c16d20a75b74cc326629ed2a46d72e05c
|
tests/test_forgot_password.py
|
tests/test_forgot_password.py
|
from . import TheInternetTestCase
from helium.api import start_chrome, S, get_driver, set_driver, write, click, \
Text, Link, kill_browser, wait_until
class ForgotPasswordTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/forgot_password"
def test_retrieve_password(self):
email_address = self._get_temporary_email_address()
write(email_address, into="E-mail")
click("Retrieve Password")
self.assertTrue(Text("Your e-mail's been sent!").exists())
set_driver(self.emailbox_driver)
wait_until(
self._refresh_and_check_if_exists,
timeout_secs=60, interval_secs=1
)
self.assertTrue(Text("no-reply@the-internet.herokuapp.com").exists())
kill_browser()
set_driver(self.test_case_driver)
def _get_temporary_email_address(self):
self.test_case_driver = get_driver()
start_chrome("http://temp-mail.org/")
self.emailbox_driver = get_driver()
email_address = S("#email").web_element.text
set_driver(self.test_case_driver)
return email_address
def _refresh_and_check_if_exists(self):
click("Refresh")
return Link("Forgot Password from the").exists()
|
Add test case for forgot password.
|
Add test case for forgot password.
|
Python
|
mit
|
bugfree-software/the-internet-solution-python
|
Add test case for forgot password.
|
from . import TheInternetTestCase
from helium.api import start_chrome, S, get_driver, set_driver, write, click, \
Text, Link, kill_browser, wait_until
class ForgotPasswordTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/forgot_password"
def test_retrieve_password(self):
email_address = self._get_temporary_email_address()
write(email_address, into="E-mail")
click("Retrieve Password")
self.assertTrue(Text("Your e-mail's been sent!").exists())
set_driver(self.emailbox_driver)
wait_until(
self._refresh_and_check_if_exists,
timeout_secs=60, interval_secs=1
)
self.assertTrue(Text("no-reply@the-internet.herokuapp.com").exists())
kill_browser()
set_driver(self.test_case_driver)
def _get_temporary_email_address(self):
self.test_case_driver = get_driver()
start_chrome("http://temp-mail.org/")
self.emailbox_driver = get_driver()
email_address = S("#email").web_element.text
set_driver(self.test_case_driver)
return email_address
def _refresh_and_check_if_exists(self):
click("Refresh")
return Link("Forgot Password from the").exists()
|
<commit_before><commit_msg>Add test case for forgot password.<commit_after>
|
from . import TheInternetTestCase
from helium.api import start_chrome, S, get_driver, set_driver, write, click, \
Text, Link, kill_browser, wait_until
class ForgotPasswordTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/forgot_password"
def test_retrieve_password(self):
email_address = self._get_temporary_email_address()
write(email_address, into="E-mail")
click("Retrieve Password")
self.assertTrue(Text("Your e-mail's been sent!").exists())
set_driver(self.emailbox_driver)
wait_until(
self._refresh_and_check_if_exists,
timeout_secs=60, interval_secs=1
)
self.assertTrue(Text("no-reply@the-internet.herokuapp.com").exists())
kill_browser()
set_driver(self.test_case_driver)
def _get_temporary_email_address(self):
self.test_case_driver = get_driver()
start_chrome("http://temp-mail.org/")
self.emailbox_driver = get_driver()
email_address = S("#email").web_element.text
set_driver(self.test_case_driver)
return email_address
def _refresh_and_check_if_exists(self):
click("Refresh")
return Link("Forgot Password from the").exists()
|
Add test case for forgot password.from . import TheInternetTestCase
from helium.api import start_chrome, S, get_driver, set_driver, write, click, \
Text, Link, kill_browser, wait_until
class ForgotPasswordTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/forgot_password"
def test_retrieve_password(self):
email_address = self._get_temporary_email_address()
write(email_address, into="E-mail")
click("Retrieve Password")
self.assertTrue(Text("Your e-mail's been sent!").exists())
set_driver(self.emailbox_driver)
wait_until(
self._refresh_and_check_if_exists,
timeout_secs=60, interval_secs=1
)
self.assertTrue(Text("no-reply@the-internet.herokuapp.com").exists())
kill_browser()
set_driver(self.test_case_driver)
def _get_temporary_email_address(self):
self.test_case_driver = get_driver()
start_chrome("http://temp-mail.org/")
self.emailbox_driver = get_driver()
email_address = S("#email").web_element.text
set_driver(self.test_case_driver)
return email_address
def _refresh_and_check_if_exists(self):
click("Refresh")
return Link("Forgot Password from the").exists()
|
<commit_before><commit_msg>Add test case for forgot password.<commit_after>from . import TheInternetTestCase
from helium.api import start_chrome, S, get_driver, set_driver, write, click, \
Text, Link, kill_browser, wait_until
class ForgotPasswordTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/forgot_password"
def test_retrieve_password(self):
email_address = self._get_temporary_email_address()
write(email_address, into="E-mail")
click("Retrieve Password")
self.assertTrue(Text("Your e-mail's been sent!").exists())
set_driver(self.emailbox_driver)
wait_until(
self._refresh_and_check_if_exists,
timeout_secs=60, interval_secs=1
)
self.assertTrue(Text("no-reply@the-internet.herokuapp.com").exists())
kill_browser()
set_driver(self.test_case_driver)
def _get_temporary_email_address(self):
self.test_case_driver = get_driver()
start_chrome("http://temp-mail.org/")
self.emailbox_driver = get_driver()
email_address = S("#email").web_element.text
set_driver(self.test_case_driver)
return email_address
def _refresh_and_check_if_exists(self):
click("Refresh")
return Link("Forgot Password from the").exists()
|
|
ec62de0b476f931be0fc629c4634cc76ea1acfdb
|
scripts/separate_mlsp2013_test_files.py
|
scripts/separate_mlsp2013_test_files.py
|
import os
import glob
import csv
allFiles = ["" + x for x in glob.glob("*.wav")];
trainFiles = []
with open("file2labels.csv", newline='') as csvfile:
file2labelsReader = csv.reader(csvfile);
for r in file2labelsReader:
f = r[0] + ".wav"
trainFiles.append(f)
#print(allFiles)
a = len(allFiles)
print(trainFiles)
b = len(trainFiles)
for train in trainFiles:
print("Train : ", train)
allFiles.remove(train)
print("Count : ", train, " : ", trainFiles.count(train))
c = len(allFiles)
print("All files: ", a)
print("Train files: ", b)
print("Test files: ", c)
for f in allFiles:
print("rename: ", f, " to ", os.path.join("test", f))
os.rename(f, os.path.join("test", f))
|
Add script which divides mlsp data into train/test
|
Add script which divides mlsp data into train/test
|
Python
|
mit
|
johnmartinsson/bird-species-classification,johnmartinsson/bird-species-classification
|
Add script which divides mlsp data into train/test
|
import os
import glob
import csv
allFiles = ["" + x for x in glob.glob("*.wav")];
trainFiles = []
with open("file2labels.csv", newline='') as csvfile:
file2labelsReader = csv.reader(csvfile);
for r in file2labelsReader:
f = r[0] + ".wav"
trainFiles.append(f)
#print(allFiles)
a = len(allFiles)
print(trainFiles)
b = len(trainFiles)
for train in trainFiles:
print("Train : ", train)
allFiles.remove(train)
print("Count : ", train, " : ", trainFiles.count(train))
c = len(allFiles)
print("All files: ", a)
print("Train files: ", b)
print("Test files: ", c)
for f in allFiles:
print("rename: ", f, " to ", os.path.join("test", f))
os.rename(f, os.path.join("test", f))
|
<commit_before><commit_msg>Add script which divides mlsp data into train/test<commit_after>
|
import os
import glob
import csv
allFiles = ["" + x for x in glob.glob("*.wav")];
trainFiles = []
with open("file2labels.csv", newline='') as csvfile:
file2labelsReader = csv.reader(csvfile);
for r in file2labelsReader:
f = r[0] + ".wav"
trainFiles.append(f)
#print(allFiles)
a = len(allFiles)
print(trainFiles)
b = len(trainFiles)
for train in trainFiles:
print("Train : ", train)
allFiles.remove(train)
print("Count : ", train, " : ", trainFiles.count(train))
c = len(allFiles)
print("All files: ", a)
print("Train files: ", b)
print("Test files: ", c)
for f in allFiles:
print("rename: ", f, " to ", os.path.join("test", f))
os.rename(f, os.path.join("test", f))
|
Add script which divides mlsp data into train/testimport os
import glob
import csv
allFiles = ["" + x for x in glob.glob("*.wav")];
trainFiles = []
with open("file2labels.csv", newline='') as csvfile:
file2labelsReader = csv.reader(csvfile);
for r in file2labelsReader:
f = r[0] + ".wav"
trainFiles.append(f)
#print(allFiles)
a = len(allFiles)
print(trainFiles)
b = len(trainFiles)
for train in trainFiles:
print("Train : ", train)
allFiles.remove(train)
print("Count : ", train, " : ", trainFiles.count(train))
c = len(allFiles)
print("All files: ", a)
print("Train files: ", b)
print("Test files: ", c)
for f in allFiles:
print("rename: ", f, " to ", os.path.join("test", f))
os.rename(f, os.path.join("test", f))
|
<commit_before><commit_msg>Add script which divides mlsp data into train/test<commit_after>import os
import glob
import csv
allFiles = ["" + x for x in glob.glob("*.wav")];
trainFiles = []
with open("file2labels.csv", newline='') as csvfile:
file2labelsReader = csv.reader(csvfile);
for r in file2labelsReader:
f = r[0] + ".wav"
trainFiles.append(f)
#print(allFiles)
a = len(allFiles)
print(trainFiles)
b = len(trainFiles)
for train in trainFiles:
print("Train : ", train)
allFiles.remove(train)
print("Count : ", train, " : ", trainFiles.count(train))
c = len(allFiles)
print("All files: ", a)
print("Train files: ", b)
print("Test files: ", c)
for f in allFiles:
print("rename: ", f, " to ", os.path.join("test", f))
os.rename(f, os.path.join("test", f))
|
|
23ead4960cdfe5eb443ba8d624e5dfe2fb1fd9a0
|
feincms/module/medialibrary/fields.py
|
feincms/module/medialibrary/fields.py
|
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
from django.db import models
from django.utils.html import escape
from django.utils.text import truncate_words
from django.utils.translation import ugettext_lazy as _
from feincms.admin.item_editor import FeinCMSInline
from feincms.module.medialibrary.models import MediaFile
from feincms.templatetags import feincms_thumbnail
__all__ = ('MediaFileForeignKey', 'ContentWithMediaFile')
class MediaFileForeignKeyRawIdWidget(ForeignKeyRawIdWidget):
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.to._default_manager.using(self.db).get(**{key: value})
label = [u' <strong>%s</strong>' % escape(truncate_words(obj, 14))]
if obj.type == 'image':
image = feincms_thumbnail.thumbnail(obj.file.name, '240x120')
label.append(u'<br /><img src="%s" alt="" style="margin:1em 0 0 10em" />' % image)
return u''.join(label)
except (ValueError, self.rel.to.DoesNotExist):
return ''
class MediaFileForeignKey(models.ForeignKey):
def formfield(self, **kwargs):
kwargs['widget'] = MediaFileForeignKeyRawIdWidget(self.rel, kwargs.get('using'))
return super(MediaFileForeignKey, self).formfield(**kwargs)
class ContentWithMediaFile(models.Model):
class feincms_item_editor_inline(FeinCMSInline):
raw_id_fields = ('file',)
file = MediaFileForeignKey(MediaFile, verbose_name=_('media file'),
related_name='+')
class Meta:
abstract = True
|
Add a MediaFileForeignKey which automatically adds thumbnail previews to the administration
|
Add a MediaFileForeignKey which automatically adds thumbnail previews to the administration
|
Python
|
bsd-3-clause
|
matthiask/django-content-editor,nickburlett/feincms,joshuajonah/feincms,feincms/feincms,mjl/feincms,joshuajonah/feincms,nickburlett/feincms,joshuajonah/feincms,mjl/feincms,matthiask/feincms2-content,pjdelport/feincms,pjdelport/feincms,nickburlett/feincms,michaelkuty/feincms,matthiask/django-content-editor,michaelkuty/feincms,pjdelport/feincms,matthiask/django-content-editor,mjl/feincms,nickburlett/feincms,matthiask/feincms2-content,matthiask/django-content-editor,joshuajonah/feincms,michaelkuty/feincms,matthiask/feincms2-content,feincms/feincms,michaelkuty/feincms,feincms/feincms
|
Add a MediaFileForeignKey which automatically adds thumbnail previews to the administration
|
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
from django.db import models
from django.utils.html import escape
from django.utils.text import truncate_words
from django.utils.translation import ugettext_lazy as _
from feincms.admin.item_editor import FeinCMSInline
from feincms.module.medialibrary.models import MediaFile
from feincms.templatetags import feincms_thumbnail
__all__ = ('MediaFileForeignKey', 'ContentWithMediaFile')
class MediaFileForeignKeyRawIdWidget(ForeignKeyRawIdWidget):
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.to._default_manager.using(self.db).get(**{key: value})
label = [u' <strong>%s</strong>' % escape(truncate_words(obj, 14))]
if obj.type == 'image':
image = feincms_thumbnail.thumbnail(obj.file.name, '240x120')
label.append(u'<br /><img src="%s" alt="" style="margin:1em 0 0 10em" />' % image)
return u''.join(label)
except (ValueError, self.rel.to.DoesNotExist):
return ''
class MediaFileForeignKey(models.ForeignKey):
def formfield(self, **kwargs):
kwargs['widget'] = MediaFileForeignKeyRawIdWidget(self.rel, kwargs.get('using'))
return super(MediaFileForeignKey, self).formfield(**kwargs)
class ContentWithMediaFile(models.Model):
class feincms_item_editor_inline(FeinCMSInline):
raw_id_fields = ('file',)
file = MediaFileForeignKey(MediaFile, verbose_name=_('media file'),
related_name='+')
class Meta:
abstract = True
|
<commit_before><commit_msg>Add a MediaFileForeignKey which automatically adds thumbnail previews to the administration<commit_after>
|
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
from django.db import models
from django.utils.html import escape
from django.utils.text import truncate_words
from django.utils.translation import ugettext_lazy as _
from feincms.admin.item_editor import FeinCMSInline
from feincms.module.medialibrary.models import MediaFile
from feincms.templatetags import feincms_thumbnail
__all__ = ('MediaFileForeignKey', 'ContentWithMediaFile')
class MediaFileForeignKeyRawIdWidget(ForeignKeyRawIdWidget):
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.to._default_manager.using(self.db).get(**{key: value})
label = [u' <strong>%s</strong>' % escape(truncate_words(obj, 14))]
if obj.type == 'image':
image = feincms_thumbnail.thumbnail(obj.file.name, '240x120')
label.append(u'<br /><img src="%s" alt="" style="margin:1em 0 0 10em" />' % image)
return u''.join(label)
except (ValueError, self.rel.to.DoesNotExist):
return ''
class MediaFileForeignKey(models.ForeignKey):
def formfield(self, **kwargs):
kwargs['widget'] = MediaFileForeignKeyRawIdWidget(self.rel, kwargs.get('using'))
return super(MediaFileForeignKey, self).formfield(**kwargs)
class ContentWithMediaFile(models.Model):
class feincms_item_editor_inline(FeinCMSInline):
raw_id_fields = ('file',)
file = MediaFileForeignKey(MediaFile, verbose_name=_('media file'),
related_name='+')
class Meta:
abstract = True
|
Add a MediaFileForeignKey which automatically adds thumbnail previews to the administrationfrom django.contrib.admin.widgets import ForeignKeyRawIdWidget
from django.db import models
from django.utils.html import escape
from django.utils.text import truncate_words
from django.utils.translation import ugettext_lazy as _
from feincms.admin.item_editor import FeinCMSInline
from feincms.module.medialibrary.models import MediaFile
from feincms.templatetags import feincms_thumbnail
__all__ = ('MediaFileForeignKey', 'ContentWithMediaFile')
class MediaFileForeignKeyRawIdWidget(ForeignKeyRawIdWidget):
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.to._default_manager.using(self.db).get(**{key: value})
label = [u' <strong>%s</strong>' % escape(truncate_words(obj, 14))]
if obj.type == 'image':
image = feincms_thumbnail.thumbnail(obj.file.name, '240x120')
label.append(u'<br /><img src="%s" alt="" style="margin:1em 0 0 10em" />' % image)
return u''.join(label)
except (ValueError, self.rel.to.DoesNotExist):
return ''
class MediaFileForeignKey(models.ForeignKey):
def formfield(self, **kwargs):
kwargs['widget'] = MediaFileForeignKeyRawIdWidget(self.rel, kwargs.get('using'))
return super(MediaFileForeignKey, self).formfield(**kwargs)
class ContentWithMediaFile(models.Model):
class feincms_item_editor_inline(FeinCMSInline):
raw_id_fields = ('file',)
file = MediaFileForeignKey(MediaFile, verbose_name=_('media file'),
related_name='+')
class Meta:
abstract = True
|
<commit_before><commit_msg>Add a MediaFileForeignKey which automatically adds thumbnail previews to the administration<commit_after>from django.contrib.admin.widgets import ForeignKeyRawIdWidget
from django.db import models
from django.utils.html import escape
from django.utils.text import truncate_words
from django.utils.translation import ugettext_lazy as _
from feincms.admin.item_editor import FeinCMSInline
from feincms.module.medialibrary.models import MediaFile
from feincms.templatetags import feincms_thumbnail
__all__ = ('MediaFileForeignKey', 'ContentWithMediaFile')
class MediaFileForeignKeyRawIdWidget(ForeignKeyRawIdWidget):
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.to._default_manager.using(self.db).get(**{key: value})
label = [u' <strong>%s</strong>' % escape(truncate_words(obj, 14))]
if obj.type == 'image':
image = feincms_thumbnail.thumbnail(obj.file.name, '240x120')
label.append(u'<br /><img src="%s" alt="" style="margin:1em 0 0 10em" />' % image)
return u''.join(label)
except (ValueError, self.rel.to.DoesNotExist):
return ''
class MediaFileForeignKey(models.ForeignKey):
def formfield(self, **kwargs):
kwargs['widget'] = MediaFileForeignKeyRawIdWidget(self.rel, kwargs.get('using'))
return super(MediaFileForeignKey, self).formfield(**kwargs)
class ContentWithMediaFile(models.Model):
class feincms_item_editor_inline(FeinCMSInline):
raw_id_fields = ('file',)
file = MediaFileForeignKey(MediaFile, verbose_name=_('media file'),
related_name='+')
class Meta:
abstract = True
|
|
2b63bca267794d85544218386103d9a42530293e
|
tests/acceptance/test_dcos_agent.py
|
tests/acceptance/test_dcos_agent.py
|
import socket
from shakedown import *
def test_get_public_agents():
public_agents = get_public_agents()
assert isinstance(public_agents, list)
try:
assert socket.inet_aton(public_agents[0])
except:
assert False
def test_get_private_agents():
private_agents = get_private_agents()
assert isinstance(private_agents, list)
try:
assert socket.inet_aton(private_agents[0])
except:
assert False
def test_get_agents():
agents = get_agents()
assert isinstance(agents, list)
try:
assert socket.inet_aton(agents[0])
except:
assert False
|
Add tests for agent-based functions
|
Add tests for agent-based functions
|
Python
|
apache-2.0
|
dcos/shakedown
|
Add tests for agent-based functions
|
import socket
from shakedown import *
def test_get_public_agents():
public_agents = get_public_agents()
assert isinstance(public_agents, list)
try:
assert socket.inet_aton(public_agents[0])
except:
assert False
def test_get_private_agents():
private_agents = get_private_agents()
assert isinstance(private_agents, list)
try:
assert socket.inet_aton(private_agents[0])
except:
assert False
def test_get_agents():
agents = get_agents()
assert isinstance(agents, list)
try:
assert socket.inet_aton(agents[0])
except:
assert False
|
<commit_before><commit_msg>Add tests for agent-based functions<commit_after>
|
import socket
from shakedown import *
def test_get_public_agents():
public_agents = get_public_agents()
assert isinstance(public_agents, list)
try:
assert socket.inet_aton(public_agents[0])
except:
assert False
def test_get_private_agents():
private_agents = get_private_agents()
assert isinstance(private_agents, list)
try:
assert socket.inet_aton(private_agents[0])
except:
assert False
def test_get_agents():
agents = get_agents()
assert isinstance(agents, list)
try:
assert socket.inet_aton(agents[0])
except:
assert False
|
Add tests for agent-based functionsimport socket
from shakedown import *
def test_get_public_agents():
public_agents = get_public_agents()
assert isinstance(public_agents, list)
try:
assert socket.inet_aton(public_agents[0])
except:
assert False
def test_get_private_agents():
private_agents = get_private_agents()
assert isinstance(private_agents, list)
try:
assert socket.inet_aton(private_agents[0])
except:
assert False
def test_get_agents():
agents = get_agents()
assert isinstance(agents, list)
try:
assert socket.inet_aton(agents[0])
except:
assert False
|
<commit_before><commit_msg>Add tests for agent-based functions<commit_after>import socket
from shakedown import *
def test_get_public_agents():
public_agents = get_public_agents()
assert isinstance(public_agents, list)
try:
assert socket.inet_aton(public_agents[0])
except:
assert False
def test_get_private_agents():
private_agents = get_private_agents()
assert isinstance(private_agents, list)
try:
assert socket.inet_aton(private_agents[0])
except:
assert False
def test_get_agents():
agents = get_agents()
assert isinstance(agents, list)
try:
assert socket.inet_aton(agents[0])
except:
assert False
|
|
bb721a008c09dc0510681cdd0faa8e98a6e60cef
|
conf_site/api/test/test_sponsor.py
|
conf_site/api/test/test_sponsor.py
|
from django.core.urlresolvers import reverse
from rest_framework import status
from symposion.sponsorship.models import Sponsor
from .base import TestBase
class TestSponsor(TestBase):
@classmethod
def setUpTestData(cls):
super(TestSponsor, cls).setUpTestData()
cls.sponsor = Sponsor.objects.first()
def test_speaker_list_api_anonymous_user(self):
response = self.client.get(reverse('sponsor-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn(
{
'name': self.sponsor.name,
'external_url': self.sponsor.external_url,
'contact_name': self.sponsor.contact_name,
'contact_email': self.sponsor.contact_email,
'level': str(self.sponsor.level),
'absolute_url': self.sponsor.get_absolute_url(),
'annotation': self.sponsor.annotation,
},
response.data
)
def test_speaker_detail_api_anonymous_user(self):
response = self.client.get(
reverse('sponsor-detail',args=[self.sponsor.pk])
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
{
'name': self.sponsor.name,
'external_url': self.sponsor.external_url,
'contact_name': self.sponsor.contact_name,
'contact_email': self.sponsor.contact_email,
'level': str(self.sponsor.level),
'absolute_url': self.sponsor.get_absolute_url(),
'annotation': self.sponsor.annotation,
},
response.data
)
|
Add tests against Sponsor viewset
|
Add tests against Sponsor viewset
|
Python
|
mit
|
pydata/conf_site,pydata/conf_site,pydata/conf_site
|
Add tests against Sponsor viewset
|
from django.core.urlresolvers import reverse
from rest_framework import status
from symposion.sponsorship.models import Sponsor
from .base import TestBase
class TestSponsor(TestBase):
@classmethod
def setUpTestData(cls):
super(TestSponsor, cls).setUpTestData()
cls.sponsor = Sponsor.objects.first()
def test_speaker_list_api_anonymous_user(self):
response = self.client.get(reverse('sponsor-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn(
{
'name': self.sponsor.name,
'external_url': self.sponsor.external_url,
'contact_name': self.sponsor.contact_name,
'contact_email': self.sponsor.contact_email,
'level': str(self.sponsor.level),
'absolute_url': self.sponsor.get_absolute_url(),
'annotation': self.sponsor.annotation,
},
response.data
)
def test_speaker_detail_api_anonymous_user(self):
response = self.client.get(
reverse('sponsor-detail',args=[self.sponsor.pk])
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
{
'name': self.sponsor.name,
'external_url': self.sponsor.external_url,
'contact_name': self.sponsor.contact_name,
'contact_email': self.sponsor.contact_email,
'level': str(self.sponsor.level),
'absolute_url': self.sponsor.get_absolute_url(),
'annotation': self.sponsor.annotation,
},
response.data
)
|
<commit_before><commit_msg>Add tests against Sponsor viewset<commit_after>
|
from django.core.urlresolvers import reverse
from rest_framework import status
from symposion.sponsorship.models import Sponsor
from .base import TestBase
class TestSponsor(TestBase):
@classmethod
def setUpTestData(cls):
super(TestSponsor, cls).setUpTestData()
cls.sponsor = Sponsor.objects.first()
def test_speaker_list_api_anonymous_user(self):
response = self.client.get(reverse('sponsor-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn(
{
'name': self.sponsor.name,
'external_url': self.sponsor.external_url,
'contact_name': self.sponsor.contact_name,
'contact_email': self.sponsor.contact_email,
'level': str(self.sponsor.level),
'absolute_url': self.sponsor.get_absolute_url(),
'annotation': self.sponsor.annotation,
},
response.data
)
def test_speaker_detail_api_anonymous_user(self):
response = self.client.get(
reverse('sponsor-detail',args=[self.sponsor.pk])
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
{
'name': self.sponsor.name,
'external_url': self.sponsor.external_url,
'contact_name': self.sponsor.contact_name,
'contact_email': self.sponsor.contact_email,
'level': str(self.sponsor.level),
'absolute_url': self.sponsor.get_absolute_url(),
'annotation': self.sponsor.annotation,
},
response.data
)
|
Add tests against Sponsor viewsetfrom django.core.urlresolvers import reverse
from rest_framework import status
from symposion.sponsorship.models import Sponsor
from .base import TestBase
class TestSponsor(TestBase):
@classmethod
def setUpTestData(cls):
super(TestSponsor, cls).setUpTestData()
cls.sponsor = Sponsor.objects.first()
def test_speaker_list_api_anonymous_user(self):
response = self.client.get(reverse('sponsor-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn(
{
'name': self.sponsor.name,
'external_url': self.sponsor.external_url,
'contact_name': self.sponsor.contact_name,
'contact_email': self.sponsor.contact_email,
'level': str(self.sponsor.level),
'absolute_url': self.sponsor.get_absolute_url(),
'annotation': self.sponsor.annotation,
},
response.data
)
def test_speaker_detail_api_anonymous_user(self):
response = self.client.get(
reverse('sponsor-detail',args=[self.sponsor.pk])
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
{
'name': self.sponsor.name,
'external_url': self.sponsor.external_url,
'contact_name': self.sponsor.contact_name,
'contact_email': self.sponsor.contact_email,
'level': str(self.sponsor.level),
'absolute_url': self.sponsor.get_absolute_url(),
'annotation': self.sponsor.annotation,
},
response.data
)
|
<commit_before><commit_msg>Add tests against Sponsor viewset<commit_after>from django.core.urlresolvers import reverse
from rest_framework import status
from symposion.sponsorship.models import Sponsor
from .base import TestBase
class TestSponsor(TestBase):
@classmethod
def setUpTestData(cls):
super(TestSponsor, cls).setUpTestData()
cls.sponsor = Sponsor.objects.first()
def test_speaker_list_api_anonymous_user(self):
response = self.client.get(reverse('sponsor-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn(
{
'name': self.sponsor.name,
'external_url': self.sponsor.external_url,
'contact_name': self.sponsor.contact_name,
'contact_email': self.sponsor.contact_email,
'level': str(self.sponsor.level),
'absolute_url': self.sponsor.get_absolute_url(),
'annotation': self.sponsor.annotation,
},
response.data
)
def test_speaker_detail_api_anonymous_user(self):
response = self.client.get(
reverse('sponsor-detail',args=[self.sponsor.pk])
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
{
'name': self.sponsor.name,
'external_url': self.sponsor.external_url,
'contact_name': self.sponsor.contact_name,
'contact_email': self.sponsor.contact_email,
'level': str(self.sponsor.level),
'absolute_url': self.sponsor.get_absolute_url(),
'annotation': self.sponsor.annotation,
},
response.data
)
|
|
6c571c88f60a761f398054ddca3d407c6010023b
|
docker/transform-pem.py
|
docker/transform-pem.py
|
#!/usr/bin/env python
"""Script to transform letsencrypt certificate files into string that can be inserted into
environment variable for Tutum to pick up.
Put your private key in certs/privkey.pem and your certificate in certs/fullchain.pem,
then run this script in order to obtain a certificate string compatible with HAProxy and ready
for inserting into an environment variable.
"""
import re
with open('certs/privkey.pem', 'rb') as f:
private_key = f.read().replace('\n', '\\n')
with open('certs/fullchain.pem', 'rb') as f:
certificate = f.read().replace('\n', '\\n')
print("""{}{}""".format(private_key, certificate))
|
Add script for transforming letsencrypt certificate
|
Add script for transforming letsencrypt certificate
|
Python
|
mit
|
muzhack/muzhack,muzhack/muzhack,muzhack/musitechhub,muzhack/muzhack,muzhack/musitechhub,muzhack/musitechhub,muzhack/musitechhub,muzhack/muzhack
|
Add script for transforming letsencrypt certificate
|
#!/usr/bin/env python
"""Script to transform letsencrypt certificate files into string that can be inserted into
environment variable for Tutum to pick up.
Put your private key in certs/privkey.pem and your certificate in certs/fullchain.pem,
then run this script in order to obtain a certificate string compatible with HAProxy and ready
for inserting into an environment variable.
"""
import re
with open('certs/privkey.pem', 'rb') as f:
private_key = f.read().replace('\n', '\\n')
with open('certs/fullchain.pem', 'rb') as f:
certificate = f.read().replace('\n', '\\n')
print("""{}{}""".format(private_key, certificate))
|
<commit_before><commit_msg>Add script for transforming letsencrypt certificate<commit_after>
|
#!/usr/bin/env python
"""Script to transform letsencrypt certificate files into string that can be inserted into
environment variable for Tutum to pick up.
Put your private key in certs/privkey.pem and your certificate in certs/fullchain.pem,
then run this script in order to obtain a certificate string compatible with HAProxy and ready
for inserting into an environment variable.
"""
import re
with open('certs/privkey.pem', 'rb') as f:
private_key = f.read().replace('\n', '\\n')
with open('certs/fullchain.pem', 'rb') as f:
certificate = f.read().replace('\n', '\\n')
print("""{}{}""".format(private_key, certificate))
|
Add script for transforming letsencrypt certificate#!/usr/bin/env python
"""Script to transform letsencrypt certificate files into string that can be inserted into
environment variable for Tutum to pick up.
Put your private key in certs/privkey.pem and your certificate in certs/fullchain.pem,
then run this script in order to obtain a certificate string compatible with HAProxy and ready
for inserting into an environment variable.
"""
import re
with open('certs/privkey.pem', 'rb') as f:
private_key = f.read().replace('\n', '\\n')
with open('certs/fullchain.pem', 'rb') as f:
certificate = f.read().replace('\n', '\\n')
print("""{}{}""".format(private_key, certificate))
|
<commit_before><commit_msg>Add script for transforming letsencrypt certificate<commit_after>#!/usr/bin/env python
"""Script to transform letsencrypt certificate files into string that can be inserted into
environment variable for Tutum to pick up.
Put your private key in certs/privkey.pem and your certificate in certs/fullchain.pem,
then run this script in order to obtain a certificate string compatible with HAProxy and ready
for inserting into an environment variable.
"""
import re
with open('certs/privkey.pem', 'rb') as f:
private_key = f.read().replace('\n', '\\n')
with open('certs/fullchain.pem', 'rb') as f:
certificate = f.read().replace('\n', '\\n')
print("""{}{}""".format(private_key, certificate))
|
|
fe9758c21af1870cd8c387aaf7f706bccf4be3f0
|
solutions/uri/1020/1020.py
|
solutions/uri/1020/1020.py
|
import sys
for t in sys.stdin:
t = int(t)
a = 0
m = 0
if t >= 365:
a = t // 365
t %= 365
if t >= 30:
m = t // 30
t %= 30
print(f"{a} ano(s)")
print(f"{m} mes(es)")
print(f"{t} dia(s)")
|
Solve Age in Days in python
|
Solve Age in Days in python
|
Python
|
mit
|
deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground
|
Solve Age in Days in python
|
import sys
for t in sys.stdin:
t = int(t)
a = 0
m = 0
if t >= 365:
a = t // 365
t %= 365
if t >= 30:
m = t // 30
t %= 30
print(f"{a} ano(s)")
print(f"{m} mes(es)")
print(f"{t} dia(s)")
|
<commit_before><commit_msg>Solve Age in Days in python<commit_after>
|
import sys
for t in sys.stdin:
t = int(t)
a = 0
m = 0
if t >= 365:
a = t // 365
t %= 365
if t >= 30:
m = t // 30
t %= 30
print(f"{a} ano(s)")
print(f"{m} mes(es)")
print(f"{t} dia(s)")
|
Solve Age in Days in pythonimport sys
for t in sys.stdin:
t = int(t)
a = 0
m = 0
if t >= 365:
a = t // 365
t %= 365
if t >= 30:
m = t // 30
t %= 30
print(f"{a} ano(s)")
print(f"{m} mes(es)")
print(f"{t} dia(s)")
|
<commit_before><commit_msg>Solve Age in Days in python<commit_after>import sys
for t in sys.stdin:
t = int(t)
a = 0
m = 0
if t >= 365:
a = t // 365
t %= 365
if t >= 30:
m = t // 30
t %= 30
print(f"{a} ano(s)")
print(f"{m} mes(es)")
print(f"{t} dia(s)")
|
|
a29c10fab4e1e0006eb7334f4dd0a359ca473a23
|
test_parameters/__init__.py
|
test_parameters/__init__.py
|
# Copyright (C) 2013 by Alex Brandt <alunduil@alunduil.com>
#
# parameters is freely distributable under the terms of an MIT-style license.
# See COPYING or http://www.opensource.org/licenses/mit-license.php.
|
Add stub module for test suite.
|
Add stub module for test suite.
|
Python
|
mit
|
alunduil/crumbs
|
Add stub module for test suite.
|
# Copyright (C) 2013 by Alex Brandt <alunduil@alunduil.com>
#
# parameters is freely distributable under the terms of an MIT-style license.
# See COPYING or http://www.opensource.org/licenses/mit-license.php.
|
<commit_before><commit_msg>Add stub module for test suite.<commit_after>
|
# Copyright (C) 2013 by Alex Brandt <alunduil@alunduil.com>
#
# parameters is freely distributable under the terms of an MIT-style license.
# See COPYING or http://www.opensource.org/licenses/mit-license.php.
|
Add stub module for test suite.# Copyright (C) 2013 by Alex Brandt <alunduil@alunduil.com>
#
# parameters is freely distributable under the terms of an MIT-style license.
# See COPYING or http://www.opensource.org/licenses/mit-license.php.
|
<commit_before><commit_msg>Add stub module for test suite.<commit_after># Copyright (C) 2013 by Alex Brandt <alunduil@alunduil.com>
#
# parameters is freely distributable under the terms of an MIT-style license.
# See COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
9cce74337194e10beddf8f5f6daaa139e44e8d70
|
scripts/support/mirnas/microrna-qc.py
|
scripts/support/mirnas/microrna-qc.py
|
"""
A script for checking data integrity of the Rfam-miRBase sync.
"""
from utils import db_utils as db
def verify_thresholds():
"""
Manually export a file with manually assigned thresholds for each family,
then compare with the thresholds of microRNA families in the database.
Example data:
60 RF00253
52 RF00658
57 RF00664
70 RF00666
"""
data = {}
with open('rfam-microrna-manual-thresholds.tsv') as f:
for line in f:
if not 'RF' in line:
continue
parts = line.strip().split('\t')
data[parts[1]] = float(parts[0])
print('Found {} manual thresholds'.format(len(data.keys())))
rfam_data_temp = db.fetch_mirna_families()
rfam_data = {}
for entry in rfam_data_temp:
rfam_data[entry['rfam_acc']] = entry['gathering_cutoff']
matches = 0
for rfam_acc in data.keys():
if rfam_acc == 'RF00273':
continue
if data[rfam_acc] != rfam_data[rfam_acc]:
print('{}: {} does not match {}'.format(rfam_acc, data[rfam_acc], rfam_data[rfam_acc]))
else:
matches += 1
print('Found {} matches'.format(matches))
def main():
verify_thresholds()
if __name__ == '__main__':
main()
|
Add a new script for checking data integrity
|
Add a new script for checking data integrity
|
Python
|
apache-2.0
|
Rfam/rfam-production,Rfam/rfam-production,Rfam/rfam-production
|
Add a new script for checking data integrity
|
"""
A script for checking data integrity of the Rfam-miRBase sync.
"""
from utils import db_utils as db
def verify_thresholds():
"""
Manually export a file with manually assigned thresholds for each family,
then compare with the thresholds of microRNA families in the database.
Example data:
60 RF00253
52 RF00658
57 RF00664
70 RF00666
"""
data = {}
with open('rfam-microrna-manual-thresholds.tsv') as f:
for line in f:
if not 'RF' in line:
continue
parts = line.strip().split('\t')
data[parts[1]] = float(parts[0])
print('Found {} manual thresholds'.format(len(data.keys())))
rfam_data_temp = db.fetch_mirna_families()
rfam_data = {}
for entry in rfam_data_temp:
rfam_data[entry['rfam_acc']] = entry['gathering_cutoff']
matches = 0
for rfam_acc in data.keys():
if rfam_acc == 'RF00273':
continue
if data[rfam_acc] != rfam_data[rfam_acc]:
print('{}: {} does not match {}'.format(rfam_acc, data[rfam_acc], rfam_data[rfam_acc]))
else:
matches += 1
print('Found {} matches'.format(matches))
def main():
verify_thresholds()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a new script for checking data integrity<commit_after>
|
"""
A script for checking data integrity of the Rfam-miRBase sync.
"""
from utils import db_utils as db
def verify_thresholds():
"""
Manually export a file with manually assigned thresholds for each family,
then compare with the thresholds of microRNA families in the database.
Example data:
60 RF00253
52 RF00658
57 RF00664
70 RF00666
"""
data = {}
with open('rfam-microrna-manual-thresholds.tsv') as f:
for line in f:
if not 'RF' in line:
continue
parts = line.strip().split('\t')
data[parts[1]] = float(parts[0])
print('Found {} manual thresholds'.format(len(data.keys())))
rfam_data_temp = db.fetch_mirna_families()
rfam_data = {}
for entry in rfam_data_temp:
rfam_data[entry['rfam_acc']] = entry['gathering_cutoff']
matches = 0
for rfam_acc in data.keys():
if rfam_acc == 'RF00273':
continue
if data[rfam_acc] != rfam_data[rfam_acc]:
print('{}: {} does not match {}'.format(rfam_acc, data[rfam_acc], rfam_data[rfam_acc]))
else:
matches += 1
print('Found {} matches'.format(matches))
def main():
verify_thresholds()
if __name__ == '__main__':
main()
|
Add a new script for checking data integrity"""
A script for checking data integrity of the Rfam-miRBase sync.
"""
from utils import db_utils as db
def verify_thresholds():
"""
Manually export a file with manually assigned thresholds for each family,
then compare with the thresholds of microRNA families in the database.
Example data:
60 RF00253
52 RF00658
57 RF00664
70 RF00666
"""
data = {}
with open('rfam-microrna-manual-thresholds.tsv') as f:
for line in f:
if not 'RF' in line:
continue
parts = line.strip().split('\t')
data[parts[1]] = float(parts[0])
print('Found {} manual thresholds'.format(len(data.keys())))
rfam_data_temp = db.fetch_mirna_families()
rfam_data = {}
for entry in rfam_data_temp:
rfam_data[entry['rfam_acc']] = entry['gathering_cutoff']
matches = 0
for rfam_acc in data.keys():
if rfam_acc == 'RF00273':
continue
if data[rfam_acc] != rfam_data[rfam_acc]:
print('{}: {} does not match {}'.format(rfam_acc, data[rfam_acc], rfam_data[rfam_acc]))
else:
matches += 1
print('Found {} matches'.format(matches))
def main():
verify_thresholds()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a new script for checking data integrity<commit_after>"""
A script for checking data integrity of the Rfam-miRBase sync.
"""
from utils import db_utils as db
def verify_thresholds():
"""
Manually export a file with manually assigned thresholds for each family,
then compare with the thresholds of microRNA families in the database.
Example data:
60 RF00253
52 RF00658
57 RF00664
70 RF00666
"""
data = {}
with open('rfam-microrna-manual-thresholds.tsv') as f:
for line in f:
if not 'RF' in line:
continue
parts = line.strip().split('\t')
data[parts[1]] = float(parts[0])
print('Found {} manual thresholds'.format(len(data.keys())))
rfam_data_temp = db.fetch_mirna_families()
rfam_data = {}
for entry in rfam_data_temp:
rfam_data[entry['rfam_acc']] = entry['gathering_cutoff']
matches = 0
for rfam_acc in data.keys():
if rfam_acc == 'RF00273':
continue
if data[rfam_acc] != rfam_data[rfam_acc]:
print('{}: {} does not match {}'.format(rfam_acc, data[rfam_acc], rfam_data[rfam_acc]))
else:
matches += 1
print('Found {} matches'.format(matches))
def main():
verify_thresholds()
if __name__ == '__main__':
main()
|
|
b0925bcdf1fc50cf41764b6eb24431661d655820
|
examples/pipes-reply.py
|
examples/pipes-reply.py
|
import sys
import code
from diesel import Application, Pipe, until
def readcb():
print 'Diesel Console'
while 1:
sys.stdout.write('>>> ')
sys.stdout.flush()
input = yield until("\n")
ret = code.compile_command(input)
out = eval(ret)
if out:
print 'Out: %r' % out
a = Application()
a.add_loop(Pipe(sys.stdin, readcb))
a.run()
|
Add example of using stdin pipe and creating a very simple repl
|
Add example of using stdin pipe and creating a very simple repl
|
Python
|
bsd-3-clause
|
dieseldev/diesel
|
Add example of using stdin pipe and creating a very simple repl
|
import sys
import code
from diesel import Application, Pipe, until
def readcb():
print 'Diesel Console'
while 1:
sys.stdout.write('>>> ')
sys.stdout.flush()
input = yield until("\n")
ret = code.compile_command(input)
out = eval(ret)
if out:
print 'Out: %r' % out
a = Application()
a.add_loop(Pipe(sys.stdin, readcb))
a.run()
|
<commit_before><commit_msg>Add example of using stdin pipe and creating a very simple repl<commit_after>
|
import sys
import code
from diesel import Application, Pipe, until
def readcb():
print 'Diesel Console'
while 1:
sys.stdout.write('>>> ')
sys.stdout.flush()
input = yield until("\n")
ret = code.compile_command(input)
out = eval(ret)
if out:
print 'Out: %r' % out
a = Application()
a.add_loop(Pipe(sys.stdin, readcb))
a.run()
|
Add example of using stdin pipe and creating a very simple replimport sys
import code
from diesel import Application, Pipe, until
def readcb():
print 'Diesel Console'
while 1:
sys.stdout.write('>>> ')
sys.stdout.flush()
input = yield until("\n")
ret = code.compile_command(input)
out = eval(ret)
if out:
print 'Out: %r' % out
a = Application()
a.add_loop(Pipe(sys.stdin, readcb))
a.run()
|
<commit_before><commit_msg>Add example of using stdin pipe and creating a very simple repl<commit_after>import sys
import code
from diesel import Application, Pipe, until
def readcb():
print 'Diesel Console'
while 1:
sys.stdout.write('>>> ')
sys.stdout.flush()
input = yield until("\n")
ret = code.compile_command(input)
out = eval(ret)
if out:
print 'Out: %r' % out
a = Application()
a.add_loop(Pipe(sys.stdin, readcb))
a.run()
|
|
9989744da92fe16f2bb84e3db55282eba250ad8f
|
web/tests/test_templates.py
|
web/tests/test_templates.py
|
from django.test import TestCase
from django.template.loader import render_to_string
class TestTemplates(TestCase):
def test_comparecard(self):
# no code and no comment
rendered_template_1 = render_to_string("comparecard.html", {}).strip()
self.assertEquals(
rendered_template_1,
"<div class=\"card\">\n"
" <div class=\"card-body\">\n"
" \n"
" \n"
" </div>\n"
"</div>"
)
# code (marked as safe), no comment
rendered_template_2 = render_to_string(
"comparecard.html", {"code": "<b>I am bold!</b>"}
).strip()
self.assertEquals(
rendered_template_2,
"<div class=\"card\">\n"
" <div class=\"card-body\">\n"
" \n"
" <div class=\"syntax\"><b>I am bold!</b></div>\n"
" \n"
" \n"
" </div>\n"
"</div>"
)
# no code, comment (with markdown format)
rendered_template_3 = render_to_string(
"comparecard.html", {"comment": "I am **bold** and *italic*, `let x = 1`."}
).strip()
self.assertEquals(
rendered_template_3,
"<div class=\"card\">\n"
" <div class=\"card-body\">\n"
" \n"
" \n"
" <div>\n"
" I am <strong>bold</strong> and <em>italic</em>, <code>let x = 1</code>.\n"
" </div>\n"
" \n"
" </div>\n"
"</div>"
)
# code and comment
rendered_template_4 = render_to_string(
"comparecard.html",
{
"code": "<b>I am bold!</b>",
"comment": "I am **bold** and *italic*, `let x = 1`."
}
).strip()
self.assertEquals(
rendered_template_4,
"<div class=\"card\">\n"
" <div class=\"card-body\">\n"
" \n"
" <div class=\"syntax\"><b>I am bold!</b></div>\n"
" \n"
" \n"
" <div>\n"
" I am <strong>bold</strong> and <em>italic</em>, <code>let x = 1</code>.\n"
" </div>\n"
" \n"
" </div>\n"
"</div>"
)
|
Add test file for templates with initial test for comparecard.html
|
Add test file for templates with initial test for comparecard.html
|
Python
|
agpl-3.0
|
codethesaurus/codethesaur.us,codethesaurus/codethesaur.us
|
Add test file for templates with initial test for comparecard.html
|
from django.test import TestCase
from django.template.loader import render_to_string
class TestTemplates(TestCase):
def test_comparecard(self):
# no code and no comment
rendered_template_1 = render_to_string("comparecard.html", {}).strip()
self.assertEquals(
rendered_template_1,
"<div class=\"card\">\n"
" <div class=\"card-body\">\n"
" \n"
" \n"
" </div>\n"
"</div>"
)
# code (marked as safe), no comment
rendered_template_2 = render_to_string(
"comparecard.html", {"code": "<b>I am bold!</b>"}
).strip()
self.assertEquals(
rendered_template_2,
"<div class=\"card\">\n"
" <div class=\"card-body\">\n"
" \n"
" <div class=\"syntax\"><b>I am bold!</b></div>\n"
" \n"
" \n"
" </div>\n"
"</div>"
)
# no code, comment (with markdown format)
rendered_template_3 = render_to_string(
"comparecard.html", {"comment": "I am **bold** and *italic*, `let x = 1`."}
).strip()
self.assertEquals(
rendered_template_3,
"<div class=\"card\">\n"
" <div class=\"card-body\">\n"
" \n"
" \n"
" <div>\n"
" I am <strong>bold</strong> and <em>italic</em>, <code>let x = 1</code>.\n"
" </div>\n"
" \n"
" </div>\n"
"</div>"
)
# code and comment
rendered_template_4 = render_to_string(
"comparecard.html",
{
"code": "<b>I am bold!</b>",
"comment": "I am **bold** and *italic*, `let x = 1`."
}
).strip()
self.assertEquals(
rendered_template_4,
"<div class=\"card\">\n"
" <div class=\"card-body\">\n"
" \n"
" <div class=\"syntax\"><b>I am bold!</b></div>\n"
" \n"
" \n"
" <div>\n"
" I am <strong>bold</strong> and <em>italic</em>, <code>let x = 1</code>.\n"
" </div>\n"
" \n"
" </div>\n"
"</div>"
)
|
<commit_before><commit_msg>Add test file for templates with initial test for comparecard.html<commit_after>
|
from django.test import TestCase
from django.template.loader import render_to_string
class TestTemplates(TestCase):
def test_comparecard(self):
# no code and no comment
rendered_template_1 = render_to_string("comparecard.html", {}).strip()
self.assertEquals(
rendered_template_1,
"<div class=\"card\">\n"
" <div class=\"card-body\">\n"
" \n"
" \n"
" </div>\n"
"</div>"
)
# code (marked as safe), no comment
rendered_template_2 = render_to_string(
"comparecard.html", {"code": "<b>I am bold!</b>"}
).strip()
self.assertEquals(
rendered_template_2,
"<div class=\"card\">\n"
" <div class=\"card-body\">\n"
" \n"
" <div class=\"syntax\"><b>I am bold!</b></div>\n"
" \n"
" \n"
" </div>\n"
"</div>"
)
# no code, comment (with markdown format)
rendered_template_3 = render_to_string(
"comparecard.html", {"comment": "I am **bold** and *italic*, `let x = 1`."}
).strip()
self.assertEquals(
rendered_template_3,
"<div class=\"card\">\n"
" <div class=\"card-body\">\n"
" \n"
" \n"
" <div>\n"
" I am <strong>bold</strong> and <em>italic</em>, <code>let x = 1</code>.\n"
" </div>\n"
" \n"
" </div>\n"
"</div>"
)
# code and comment
rendered_template_4 = render_to_string(
"comparecard.html",
{
"code": "<b>I am bold!</b>",
"comment": "I am **bold** and *italic*, `let x = 1`."
}
).strip()
self.assertEquals(
rendered_template_4,
"<div class=\"card\">\n"
" <div class=\"card-body\">\n"
" \n"
" <div class=\"syntax\"><b>I am bold!</b></div>\n"
" \n"
" \n"
" <div>\n"
" I am <strong>bold</strong> and <em>italic</em>, <code>let x = 1</code>.\n"
" </div>\n"
" \n"
" </div>\n"
"</div>"
)
|
Add test file for templates with initial test for comparecard.htmlfrom django.test import TestCase
from django.template.loader import render_to_string
class TestTemplates(TestCase):
def test_comparecard(self):
# no code and no comment
rendered_template_1 = render_to_string("comparecard.html", {}).strip()
self.assertEquals(
rendered_template_1,
"<div class=\"card\">\n"
" <div class=\"card-body\">\n"
" \n"
" \n"
" </div>\n"
"</div>"
)
# code (marked as safe), no comment
rendered_template_2 = render_to_string(
"comparecard.html", {"code": "<b>I am bold!</b>"}
).strip()
self.assertEquals(
rendered_template_2,
"<div class=\"card\">\n"
" <div class=\"card-body\">\n"
" \n"
" <div class=\"syntax\"><b>I am bold!</b></div>\n"
" \n"
" \n"
" </div>\n"
"</div>"
)
# no code, comment (with markdown format)
rendered_template_3 = render_to_string(
"comparecard.html", {"comment": "I am **bold** and *italic*, `let x = 1`."}
).strip()
self.assertEquals(
rendered_template_3,
"<div class=\"card\">\n"
" <div class=\"card-body\">\n"
" \n"
" \n"
" <div>\n"
" I am <strong>bold</strong> and <em>italic</em>, <code>let x = 1</code>.\n"
" </div>\n"
" \n"
" </div>\n"
"</div>"
)
# code and comment
rendered_template_4 = render_to_string(
"comparecard.html",
{
"code": "<b>I am bold!</b>",
"comment": "I am **bold** and *italic*, `let x = 1`."
}
).strip()
self.assertEquals(
rendered_template_4,
"<div class=\"card\">\n"
" <div class=\"card-body\">\n"
" \n"
" <div class=\"syntax\"><b>I am bold!</b></div>\n"
" \n"
" \n"
" <div>\n"
" I am <strong>bold</strong> and <em>italic</em>, <code>let x = 1</code>.\n"
" </div>\n"
" \n"
" </div>\n"
"</div>"
)
|
<commit_before><commit_msg>Add test file for templates with initial test for comparecard.html<commit_after>from django.test import TestCase
from django.template.loader import render_to_string
class TestTemplates(TestCase):
def test_comparecard(self):
# no code and no comment
rendered_template_1 = render_to_string("comparecard.html", {}).strip()
self.assertEquals(
rendered_template_1,
"<div class=\"card\">\n"
" <div class=\"card-body\">\n"
" \n"
" \n"
" </div>\n"
"</div>"
)
# code (marked as safe), no comment
rendered_template_2 = render_to_string(
"comparecard.html", {"code": "<b>I am bold!</b>"}
).strip()
self.assertEquals(
rendered_template_2,
"<div class=\"card\">\n"
" <div class=\"card-body\">\n"
" \n"
" <div class=\"syntax\"><b>I am bold!</b></div>\n"
" \n"
" \n"
" </div>\n"
"</div>"
)
# no code, comment (with markdown format)
rendered_template_3 = render_to_string(
"comparecard.html", {"comment": "I am **bold** and *italic*, `let x = 1`."}
).strip()
self.assertEquals(
rendered_template_3,
"<div class=\"card\">\n"
" <div class=\"card-body\">\n"
" \n"
" \n"
" <div>\n"
" I am <strong>bold</strong> and <em>italic</em>, <code>let x = 1</code>.\n"
" </div>\n"
" \n"
" </div>\n"
"</div>"
)
# code and comment
rendered_template_4 = render_to_string(
"comparecard.html",
{
"code": "<b>I am bold!</b>",
"comment": "I am **bold** and *italic*, `let x = 1`."
}
).strip()
self.assertEquals(
rendered_template_4,
"<div class=\"card\">\n"
" <div class=\"card-body\">\n"
" \n"
" <div class=\"syntax\"><b>I am bold!</b></div>\n"
" \n"
" \n"
" <div>\n"
" I am <strong>bold</strong> and <em>italic</em>, <code>let x = 1</code>.\n"
" </div>\n"
" \n"
" </div>\n"
"</div>"
)
|
|
441396ff44ab816ef26816eb3f8e0ec61851e9ba
|
ensemble/ctf/tests/test_editor.py
|
ensemble/ctf/tests/test_editor.py
|
import unittest
from enable.testing import EnableTestAssistant
from enable.window import Window
from ensemble.ctf.editor import CtfEditor
def get_color(starting_color=None):
if starting_color:
return starting_color
return (0.0, 1.0, 0.0)
def get_filename(action='save'):
return 'temp.json'
class TestCtfEditor(EnableTestAssistant, unittest.TestCase):
def setUp(self):
tool = CtfEditor(bounds=(400, 100),
prompt_color_selection=get_color,
prompt_file_selection=get_filename)
tool.add_function_node(tool.opacities, (0.5, 0.5))
tool.add_function_node(tool.colors, (0.25, 1.0, 0.0, 0.0))
self.tool = tool
self.window = Window(None, size=(100, 100), component=tool)
self.window.control.show()
def test_mouse_drag_alpha(self):
tool = self.tool
self.press_move_release(tool, [(50, 50), (51, 50), (52, 50),
(53, 50), (54, 50), (55, 50)],
window=self.window)
def test_mouse_drag_color(self):
tool = self.tool
self.press_move_release(tool, [(25, 10), (25, 10), (40, 10)],
window=self.window)
if __name__ == '__main__':
unittest.main()
|
Add some unit tests for CtfEditor
|
Add some unit tests for CtfEditor
|
Python
|
bsd-3-clause
|
dmsurti/ensemble
|
Add some unit tests for CtfEditor
|
import unittest
from enable.testing import EnableTestAssistant
from enable.window import Window
from ensemble.ctf.editor import CtfEditor
def get_color(starting_color=None):
if starting_color:
return starting_color
return (0.0, 1.0, 0.0)
def get_filename(action='save'):
return 'temp.json'
class TestCtfEditor(EnableTestAssistant, unittest.TestCase):
def setUp(self):
tool = CtfEditor(bounds=(400, 100),
prompt_color_selection=get_color,
prompt_file_selection=get_filename)
tool.add_function_node(tool.opacities, (0.5, 0.5))
tool.add_function_node(tool.colors, (0.25, 1.0, 0.0, 0.0))
self.tool = tool
self.window = Window(None, size=(100, 100), component=tool)
self.window.control.show()
def test_mouse_drag_alpha(self):
tool = self.tool
self.press_move_release(tool, [(50, 50), (51, 50), (52, 50),
(53, 50), (54, 50), (55, 50)],
window=self.window)
def test_mouse_drag_color(self):
tool = self.tool
self.press_move_release(tool, [(25, 10), (25, 10), (40, 10)],
window=self.window)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add some unit tests for CtfEditor<commit_after>
|
import unittest
from enable.testing import EnableTestAssistant
from enable.window import Window
from ensemble.ctf.editor import CtfEditor
def get_color(starting_color=None):
if starting_color:
return starting_color
return (0.0, 1.0, 0.0)
def get_filename(action='save'):
return 'temp.json'
class TestCtfEditor(EnableTestAssistant, unittest.TestCase):
def setUp(self):
tool = CtfEditor(bounds=(400, 100),
prompt_color_selection=get_color,
prompt_file_selection=get_filename)
tool.add_function_node(tool.opacities, (0.5, 0.5))
tool.add_function_node(tool.colors, (0.25, 1.0, 0.0, 0.0))
self.tool = tool
self.window = Window(None, size=(100, 100), component=tool)
self.window.control.show()
def test_mouse_drag_alpha(self):
tool = self.tool
self.press_move_release(tool, [(50, 50), (51, 50), (52, 50),
(53, 50), (54, 50), (55, 50)],
window=self.window)
def test_mouse_drag_color(self):
tool = self.tool
self.press_move_release(tool, [(25, 10), (25, 10), (40, 10)],
window=self.window)
if __name__ == '__main__':
unittest.main()
|
Add some unit tests for CtfEditorimport unittest
from enable.testing import EnableTestAssistant
from enable.window import Window
from ensemble.ctf.editor import CtfEditor
def get_color(starting_color=None):
if starting_color:
return starting_color
return (0.0, 1.0, 0.0)
def get_filename(action='save'):
return 'temp.json'
class TestCtfEditor(EnableTestAssistant, unittest.TestCase):
def setUp(self):
tool = CtfEditor(bounds=(400, 100),
prompt_color_selection=get_color,
prompt_file_selection=get_filename)
tool.add_function_node(tool.opacities, (0.5, 0.5))
tool.add_function_node(tool.colors, (0.25, 1.0, 0.0, 0.0))
self.tool = tool
self.window = Window(None, size=(100, 100), component=tool)
self.window.control.show()
def test_mouse_drag_alpha(self):
tool = self.tool
self.press_move_release(tool, [(50, 50), (51, 50), (52, 50),
(53, 50), (54, 50), (55, 50)],
window=self.window)
def test_mouse_drag_color(self):
tool = self.tool
self.press_move_release(tool, [(25, 10), (25, 10), (40, 10)],
window=self.window)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add some unit tests for CtfEditor<commit_after>import unittest
from enable.testing import EnableTestAssistant
from enable.window import Window
from ensemble.ctf.editor import CtfEditor
def get_color(starting_color=None):
if starting_color:
return starting_color
return (0.0, 1.0, 0.0)
def get_filename(action='save'):
return 'temp.json'
class TestCtfEditor(EnableTestAssistant, unittest.TestCase):
def setUp(self):
tool = CtfEditor(bounds=(400, 100),
prompt_color_selection=get_color,
prompt_file_selection=get_filename)
tool.add_function_node(tool.opacities, (0.5, 0.5))
tool.add_function_node(tool.colors, (0.25, 1.0, 0.0, 0.0))
self.tool = tool
self.window = Window(None, size=(100, 100), component=tool)
self.window.control.show()
def test_mouse_drag_alpha(self):
tool = self.tool
self.press_move_release(tool, [(50, 50), (51, 50), (52, 50),
(53, 50), (54, 50), (55, 50)],
window=self.window)
def test_mouse_drag_color(self):
tool = self.tool
self.press_move_release(tool, [(25, 10), (25, 10), (40, 10)],
window=self.window)
if __name__ == '__main__':
unittest.main()
|
|
a7e4a476936d47435806b9a79877653e25b2a624
|
tests/test_serialize.py
|
tests/test_serialize.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Check if pickle can serialize seabird's data objects
"""
import os
from glob import glob
import pickle
import numpy as np
from seabird import cnv
def test_serialize_CNV():
""" Serialize CNV
"""
datadir = os.path.join(os.path.dirname(__file__), 'test_data')
for f in glob(os.path.join(datadir, "*.cnv.OK")):
profile = cnv.CNV(open(f).read())
profile2 = pickle.loads(pickle.dumps(profile))
assert profile.attributes == profile2.attributes
assert (profile.data == profile.data)
def test_serialize_fCNV():
""" Serialize fCNV
"""
datadir = os.path.join(os.path.dirname(__file__), 'test_data')
for f in glob(os.path.join(datadir, "*.cnv.OK")):
profile = cnv.fCNV(f)
profile2 = pickle.loads(pickle.dumps(profile))
assert profile.attributes == profile2.attributes
assert (profile.data == profile.data)
|
Test if pickle can serialize CNV/fCNV
|
Test if pickle can serialize CNV/fCNV
This is crucial to use seabird objects in multiprocessings.
|
Python
|
bsd-3-clause
|
castelao/seabird
|
Test if pickle can serialize CNV/fCNV
This is crucial to use seabird objects in multiprocessings.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Check if pickle can serialize seabird's data objects
"""
import os
from glob import glob
import pickle
import numpy as np
from seabird import cnv
def test_serialize_CNV():
""" Serialize CNV
"""
datadir = os.path.join(os.path.dirname(__file__), 'test_data')
for f in glob(os.path.join(datadir, "*.cnv.OK")):
profile = cnv.CNV(open(f).read())
profile2 = pickle.loads(pickle.dumps(profile))
assert profile.attributes == profile2.attributes
assert (profile.data == profile.data)
def test_serialize_fCNV():
""" Serialize fCNV
"""
datadir = os.path.join(os.path.dirname(__file__), 'test_data')
for f in glob(os.path.join(datadir, "*.cnv.OK")):
profile = cnv.fCNV(f)
profile2 = pickle.loads(pickle.dumps(profile))
assert profile.attributes == profile2.attributes
assert (profile.data == profile.data)
|
<commit_before><commit_msg>Test if pickle can serialize CNV/fCNV
This is crucial to use seabird objects in multiprocessings.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Check if pickle can serialize seabird's data objects
"""
import os
from glob import glob
import pickle
import numpy as np
from seabird import cnv
def test_serialize_CNV():
""" Serialize CNV
"""
datadir = os.path.join(os.path.dirname(__file__), 'test_data')
for f in glob(os.path.join(datadir, "*.cnv.OK")):
profile = cnv.CNV(open(f).read())
profile2 = pickle.loads(pickle.dumps(profile))
assert profile.attributes == profile2.attributes
assert (profile.data == profile.data)
def test_serialize_fCNV():
""" Serialize fCNV
"""
datadir = os.path.join(os.path.dirname(__file__), 'test_data')
for f in glob(os.path.join(datadir, "*.cnv.OK")):
profile = cnv.fCNV(f)
profile2 = pickle.loads(pickle.dumps(profile))
assert profile.attributes == profile2.attributes
assert (profile.data == profile.data)
|
Test if pickle can serialize CNV/fCNV
This is crucial to use seabird objects in multiprocessings.#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Check if pickle can serialize seabird's data objects
"""
import os
from glob import glob
import pickle
import numpy as np
from seabird import cnv
def test_serialize_CNV():
""" Serialize CNV
"""
datadir = os.path.join(os.path.dirname(__file__), 'test_data')
for f in glob(os.path.join(datadir, "*.cnv.OK")):
profile = cnv.CNV(open(f).read())
profile2 = pickle.loads(pickle.dumps(profile))
assert profile.attributes == profile2.attributes
assert (profile.data == profile.data)
def test_serialize_fCNV():
""" Serialize fCNV
"""
datadir = os.path.join(os.path.dirname(__file__), 'test_data')
for f in glob(os.path.join(datadir, "*.cnv.OK")):
profile = cnv.fCNV(f)
profile2 = pickle.loads(pickle.dumps(profile))
assert profile.attributes == profile2.attributes
assert (profile.data == profile.data)
|
<commit_before><commit_msg>Test if pickle can serialize CNV/fCNV
This is crucial to use seabird objects in multiprocessings.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Check if pickle can serialize seabird's data objects
"""
import os
from glob import glob
import pickle
import numpy as np
from seabird import cnv
def test_serialize_CNV():
""" Serialize CNV
"""
datadir = os.path.join(os.path.dirname(__file__), 'test_data')
for f in glob(os.path.join(datadir, "*.cnv.OK")):
profile = cnv.CNV(open(f).read())
profile2 = pickle.loads(pickle.dumps(profile))
assert profile.attributes == profile2.attributes
assert (profile.data == profile.data)
def test_serialize_fCNV():
""" Serialize fCNV
"""
datadir = os.path.join(os.path.dirname(__file__), 'test_data')
for f in glob(os.path.join(datadir, "*.cnv.OK")):
profile = cnv.fCNV(f)
profile2 = pickle.loads(pickle.dumps(profile))
assert profile.attributes == profile2.attributes
assert (profile.data == profile.data)
|
|
77d253afd27503767958ddc2ef41b8732d04eace
|
apps/domain/src/main/core/manager/database_manager.py
|
apps/domain/src/main/core/manager/database_manager.py
|
from typing import Dict
from typing import Type
from typing import Union
from typing import List
from ..database import BaseModel, db
class DatabaseManager:
def register(self, **kwargs) -> BaseModel:
"""Register e new object into the database.
Args:
parameters : List of object parameters.
Returns:
object: Database Object
"""
_obj = self._schema(**kwargs)
self.db.session.add(_obj)
self.db.session.commit()
return _obj
def query(self, **kwargs) -> Union[None, BaseModel]:
"""Query db objects filtering by parameters
Args:
parameters : List of parameters used to filter.
"""
objects = self.db.session.query(self._schema).filter_by(**kwargs).all()
return objects
def all(self) -> List[BaseModel]:
return list(self.db.session.query(self._schema).all())
def delete(self, **kwargs):
"""Delete an object from the database.
Args:
parameters: Parameters used to filter the object.
"""
object_to_delete = self.query(**kwargs)
self.db.session.delete(object_to_delete)
self.db.session.commit()
def modify(self, query, values):
"""Modifies one or many records."""
self.db.session.query(self._schema).filter_by(**query).update(values)
self.db.session.commit()
def contain(self, **kwargs) -> bool:
objects = self.db.session.query(self._schema).filter_by(**kwargs).all()
return len(objects) != 0
def __len__(self) -> int:
return self.db.session.query(self._schema).count()
|
ADD Abstract Database Manager class
|
ADD Abstract Database Manager class
|
Python
|
apache-2.0
|
OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft
|
ADD Abstract Database Manager class
|
from typing import Dict
from typing import Type
from typing import Union
from typing import List
from ..database import BaseModel, db
class DatabaseManager:
def register(self, **kwargs) -> BaseModel:
"""Register e new object into the database.
Args:
parameters : List of object parameters.
Returns:
object: Database Object
"""
_obj = self._schema(**kwargs)
self.db.session.add(_obj)
self.db.session.commit()
return _obj
def query(self, **kwargs) -> Union[None, BaseModel]:
"""Query db objects filtering by parameters
Args:
parameters : List of parameters used to filter.
"""
objects = self.db.session.query(self._schema).filter_by(**kwargs).all()
return objects
def all(self) -> List[BaseModel]:
return list(self.db.session.query(self._schema).all())
def delete(self, **kwargs):
"""Delete an object from the database.
Args:
parameters: Parameters used to filter the object.
"""
object_to_delete = self.query(**kwargs)
self.db.session.delete(object_to_delete)
self.db.session.commit()
def modify(self, query, values):
"""Modifies one or many records."""
self.db.session.query(self._schema).filter_by(**query).update(values)
self.db.session.commit()
def contain(self, **kwargs) -> bool:
objects = self.db.session.query(self._schema).filter_by(**kwargs).all()
return len(objects) != 0
def __len__(self) -> int:
return self.db.session.query(self._schema).count()
|
<commit_before><commit_msg>ADD Abstract Database Manager class<commit_after>
|
from typing import Dict
from typing import Type
from typing import Union
from typing import List
from ..database import BaseModel, db
class DatabaseManager:
def register(self, **kwargs) -> BaseModel:
"""Register e new object into the database.
Args:
parameters : List of object parameters.
Returns:
object: Database Object
"""
_obj = self._schema(**kwargs)
self.db.session.add(_obj)
self.db.session.commit()
return _obj
def query(self, **kwargs) -> Union[None, BaseModel]:
"""Query db objects filtering by parameters
Args:
parameters : List of parameters used to filter.
"""
objects = self.db.session.query(self._schema).filter_by(**kwargs).all()
return objects
def all(self) -> List[BaseModel]:
return list(self.db.session.query(self._schema).all())
def delete(self, **kwargs):
"""Delete an object from the database.
Args:
parameters: Parameters used to filter the object.
"""
object_to_delete = self.query(**kwargs)
self.db.session.delete(object_to_delete)
self.db.session.commit()
def modify(self, query, values):
"""Modifies one or many records."""
self.db.session.query(self._schema).filter_by(**query).update(values)
self.db.session.commit()
def contain(self, **kwargs) -> bool:
objects = self.db.session.query(self._schema).filter_by(**kwargs).all()
return len(objects) != 0
def __len__(self) -> int:
return self.db.session.query(self._schema).count()
|
ADD Abstract Database Manager classfrom typing import Dict
from typing import Type
from typing import Union
from typing import List
from ..database import BaseModel, db
class DatabaseManager:
def register(self, **kwargs) -> BaseModel:
"""Register e new object into the database.
Args:
parameters : List of object parameters.
Returns:
object: Database Object
"""
_obj = self._schema(**kwargs)
self.db.session.add(_obj)
self.db.session.commit()
return _obj
def query(self, **kwargs) -> Union[None, BaseModel]:
"""Query db objects filtering by parameters
Args:
parameters : List of parameters used to filter.
"""
objects = self.db.session.query(self._schema).filter_by(**kwargs).all()
return objects
def all(self) -> List[BaseModel]:
return list(self.db.session.query(self._schema).all())
def delete(self, **kwargs):
"""Delete an object from the database.
Args:
parameters: Parameters used to filter the object.
"""
object_to_delete = self.query(**kwargs)
self.db.session.delete(object_to_delete)
self.db.session.commit()
def modify(self, query, values):
"""Modifies one or many records."""
self.db.session.query(self._schema).filter_by(**query).update(values)
self.db.session.commit()
def contain(self, **kwargs) -> bool:
objects = self.db.session.query(self._schema).filter_by(**kwargs).all()
return len(objects) != 0
def __len__(self) -> int:
return self.db.session.query(self._schema).count()
|
<commit_before><commit_msg>ADD Abstract Database Manager class<commit_after>from typing import Dict
from typing import Type
from typing import Union
from typing import List
from ..database import BaseModel, db
class DatabaseManager:
def register(self, **kwargs) -> BaseModel:
"""Register e new object into the database.
Args:
parameters : List of object parameters.
Returns:
object: Database Object
"""
_obj = self._schema(**kwargs)
self.db.session.add(_obj)
self.db.session.commit()
return _obj
def query(self, **kwargs) -> Union[None, BaseModel]:
"""Query db objects filtering by parameters
Args:
parameters : List of parameters used to filter.
"""
objects = self.db.session.query(self._schema).filter_by(**kwargs).all()
return objects
def all(self) -> List[BaseModel]:
return list(self.db.session.query(self._schema).all())
def delete(self, **kwargs):
"""Delete an object from the database.
Args:
parameters: Parameters used to filter the object.
"""
object_to_delete = self.query(**kwargs)
self.db.session.delete(object_to_delete)
self.db.session.commit()
def modify(self, query, values):
"""Modifies one or many records."""
self.db.session.query(self._schema).filter_by(**query).update(values)
self.db.session.commit()
def contain(self, **kwargs) -> bool:
objects = self.db.session.query(self._schema).filter_by(**kwargs).all()
return len(objects) != 0
def __len__(self) -> int:
return self.db.session.query(self._schema).count()
|
|
0fae3ec1526e2f2c566d137389d978b566756d16
|
TWLight/resources/management/commands/migrate_tags_to_new_tags.py
|
TWLight/resources/management/commands/migrate_tags_to_new_tags.py
|
from django.core.management.base import BaseCommand
from TWLight.resources.models import Partner
class Command(BaseCommand):
help = "Migrates content from the tags column to the new_tags column"
def handle(self, *args, **options):
partners = Partner.objects.all()
for partner in partners:
new_tags_dict = {}
tag_names = []
tags = partner.tags.all()
for tag in tags:
if tag.name == "sciences" or tag.name == "social":
tag_name = "social-sciences_tag"
else:
tag_name = "{tag_name}_tag".format(tag_name=tag.name)
if tag_name not in tag_names:
tag_names.append(tag_name)
new_tags_dict["tags"] = tag_names
partner.new_tags = new_tags_dict
partner.save()
|
Add management command to migrate tags to new_tags field
|
Add management command to migrate tags to new_tags field
|
Python
|
mit
|
WikipediaLibrary/TWLight,WikipediaLibrary/TWLight,WikipediaLibrary/TWLight,WikipediaLibrary/TWLight,WikipediaLibrary/TWLight
|
Add management command to migrate tags to new_tags field
|
from django.core.management.base import BaseCommand
from TWLight.resources.models import Partner
class Command(BaseCommand):
help = "Migrates content from the tags column to the new_tags column"
def handle(self, *args, **options):
partners = Partner.objects.all()
for partner in partners:
new_tags_dict = {}
tag_names = []
tags = partner.tags.all()
for tag in tags:
if tag.name == "sciences" or tag.name == "social":
tag_name = "social-sciences_tag"
else:
tag_name = "{tag_name}_tag".format(tag_name=tag.name)
if tag_name not in tag_names:
tag_names.append(tag_name)
new_tags_dict["tags"] = tag_names
partner.new_tags = new_tags_dict
partner.save()
|
<commit_before><commit_msg>Add management command to migrate tags to new_tags field<commit_after>
|
from django.core.management.base import BaseCommand
from TWLight.resources.models import Partner
class Command(BaseCommand):
help = "Migrates content from the tags column to the new_tags column"
def handle(self, *args, **options):
partners = Partner.objects.all()
for partner in partners:
new_tags_dict = {}
tag_names = []
tags = partner.tags.all()
for tag in tags:
if tag.name == "sciences" or tag.name == "social":
tag_name = "social-sciences_tag"
else:
tag_name = "{tag_name}_tag".format(tag_name=tag.name)
if tag_name not in tag_names:
tag_names.append(tag_name)
new_tags_dict["tags"] = tag_names
partner.new_tags = new_tags_dict
partner.save()
|
Add management command to migrate tags to new_tags fieldfrom django.core.management.base import BaseCommand
from TWLight.resources.models import Partner
class Command(BaseCommand):
help = "Migrates content from the tags column to the new_tags column"
def handle(self, *args, **options):
partners = Partner.objects.all()
for partner in partners:
new_tags_dict = {}
tag_names = []
tags = partner.tags.all()
for tag in tags:
if tag.name == "sciences" or tag.name == "social":
tag_name = "social-sciences_tag"
else:
tag_name = "{tag_name}_tag".format(tag_name=tag.name)
if tag_name not in tag_names:
tag_names.append(tag_name)
new_tags_dict["tags"] = tag_names
partner.new_tags = new_tags_dict
partner.save()
|
<commit_before><commit_msg>Add management command to migrate tags to new_tags field<commit_after>from django.core.management.base import BaseCommand
from TWLight.resources.models import Partner
class Command(BaseCommand):
help = "Migrates content from the tags column to the new_tags column"
def handle(self, *args, **options):
partners = Partner.objects.all()
for partner in partners:
new_tags_dict = {}
tag_names = []
tags = partner.tags.all()
for tag in tags:
if tag.name == "sciences" or tag.name == "social":
tag_name = "social-sciences_tag"
else:
tag_name = "{tag_name}_tag".format(tag_name=tag.name)
if tag_name not in tag_names:
tag_names.append(tag_name)
new_tags_dict["tags"] = tag_names
partner.new_tags = new_tags_dict
partner.save()
|
|
2718b1accc110be6e85983c6ffc29d8aba0d72cf
|
analysis/sbx-patch-bug.py
|
analysis/sbx-patch-bug.py
|
#!/usr/bin/env python
# vim: set sw=2 ts=2 softtabstop=2 expandtab:
"""
This script is designed to patch old symbooglix
results where hitting speculative paths was incorrect
treated as BOUND_HIT
"""
import argparse
import os
import logging
import sys
import yaml
from br_util import FinalResultType, classifyResult
try:
# Try to use libyaml which is faster
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
# fall back on python implementation
from yaml import Loader, Dumper
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument("-l","--log-level",type=str, default="info", dest="log_level", choices=['debug','info','warning','error'])
parser.add_argument('input_yml', type=argparse.FileType('r'))
parser.add_argument('output_yml', type=str)
pargs = parser.parse_args(args)
logLevel = getattr(logging, pargs.log_level.upper(),None)
logging.basicConfig(level=logLevel)
if os.path.exists(pargs.output_yml):
logging.error('Refusing to overwrite "{}"'.format(pargs.output_yml))
return 1
results = yaml.load(pargs.input_yml, Loader=Loader)
assert isinstance(results, list)
if len(results) == 0:
logging.error('Result list is empty')
return 1
# Count
rewriteCount = 0
rTypeToResultMap = {}
for rType in FinalResultType:
rTypeToResultMap[rType] = []
for r in results:
rType = classifyResult(r)
logging.debug('Classified {} as {}'.format(r['program'], rType))
if rType == FinalResultType.BOUND_HIT:
logging.info('Classified {} as {}'.format(r['program'], rType))
logging.info('Doing rewrite')
rewriteCount += 1
# Sanity checks
assert r['failed'] == False
assert r['bound_hit'] == True
assert r['speculative_paths_nb'] == True
# Set new values
r['failed'] = True
r['bound_hit'] = False
assert classifyResult(r) == FinalResultType.UNKNOWN
rTypeToResultMap[classifyResult(r)].append(r)
print("Rewrite count: {}".format(rewriteCount))
print("Total: {}".format(len(results)))
for rType in FinalResultType:
name = rType.name
resultList = rTypeToResultMap[rType]
print("# of {}: {} ({:.2f}%)".format(name, len(resultList),
100*float(len(resultList))/len(results)))
# Write result out
with open(pargs.output_yml, 'w') as f:
yamlText = yaml.dump(results,
default_flow_style=False,
Dumper=Dumper)
f.write(yamlText)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Add script to patch old symbooglix results when boogie-runner classified hitting speculative paths as BOUND_HIT. This was a bad idea so this script changes those results to UNKNOWN but note we have the ``speculative_paths_nb`` attribute so that can be used to find the results where symbooglix hit speculative paths.
|
Add script to patch old symbooglix results when boogie-runner classified
hitting speculative paths as BOUND_HIT. This was a bad idea so this
script changes those results to UNKNOWN but note we have the
``speculative_paths_nb`` attribute so that can be used to find the
results where symbooglix hit speculative paths.
|
Python
|
bsd-3-clause
|
symbooglix/boogie-runner,symbooglix/boogie-runner
|
Add script to patch old symbooglix results when boogie-runner classified
hitting speculative paths as BOUND_HIT. This was a bad idea so this
script changes those results to UNKNOWN but note we have the
``speculative_paths_nb`` attribute so that can be used to find the
results where symbooglix hit speculative paths.
|
#!/usr/bin/env python
# vim: set sw=2 ts=2 softtabstop=2 expandtab:
"""
This script is designed to patch old symbooglix
results where hitting speculative paths was incorrect
treated as BOUND_HIT
"""
import argparse
import os
import logging
import sys
import yaml
from br_util import FinalResultType, classifyResult
try:
# Try to use libyaml which is faster
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
# fall back on python implementation
from yaml import Loader, Dumper
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument("-l","--log-level",type=str, default="info", dest="log_level", choices=['debug','info','warning','error'])
parser.add_argument('input_yml', type=argparse.FileType('r'))
parser.add_argument('output_yml', type=str)
pargs = parser.parse_args(args)
logLevel = getattr(logging, pargs.log_level.upper(),None)
logging.basicConfig(level=logLevel)
if os.path.exists(pargs.output_yml):
logging.error('Refusing to overwrite "{}"'.format(pargs.output_yml))
return 1
results = yaml.load(pargs.input_yml, Loader=Loader)
assert isinstance(results, list)
if len(results) == 0:
logging.error('Result list is empty')
return 1
# Count
rewriteCount = 0
rTypeToResultMap = {}
for rType in FinalResultType:
rTypeToResultMap[rType] = []
for r in results:
rType = classifyResult(r)
logging.debug('Classified {} as {}'.format(r['program'], rType))
if rType == FinalResultType.BOUND_HIT:
logging.info('Classified {} as {}'.format(r['program'], rType))
logging.info('Doing rewrite')
rewriteCount += 1
# Sanity checks
assert r['failed'] == False
assert r['bound_hit'] == True
assert r['speculative_paths_nb'] == True
# Set new values
r['failed'] = True
r['bound_hit'] = False
assert classifyResult(r) == FinalResultType.UNKNOWN
rTypeToResultMap[classifyResult(r)].append(r)
print("Rewrite count: {}".format(rewriteCount))
print("Total: {}".format(len(results)))
for rType in FinalResultType:
name = rType.name
resultList = rTypeToResultMap[rType]
print("# of {}: {} ({:.2f}%)".format(name, len(resultList),
100*float(len(resultList))/len(results)))
# Write result out
with open(pargs.output_yml, 'w') as f:
yamlText = yaml.dump(results,
default_flow_style=False,
Dumper=Dumper)
f.write(yamlText)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
<commit_before><commit_msg>Add script to patch old symbooglix results when boogie-runner classified
hitting speculative paths as BOUND_HIT. This was a bad idea so this
script changes those results to UNKNOWN but note we have the
``speculative_paths_nb`` attribute so that can be used to find the
results where symbooglix hit speculative paths.<commit_after>
|
#!/usr/bin/env python
# vim: set sw=2 ts=2 softtabstop=2 expandtab:
"""
This script is designed to patch old symbooglix
results where hitting speculative paths was incorrect
treated as BOUND_HIT
"""
import argparse
import os
import logging
import sys
import yaml
from br_util import FinalResultType, classifyResult
try:
# Try to use libyaml which is faster
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
# fall back on python implementation
from yaml import Loader, Dumper
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument("-l","--log-level",type=str, default="info", dest="log_level", choices=['debug','info','warning','error'])
parser.add_argument('input_yml', type=argparse.FileType('r'))
parser.add_argument('output_yml', type=str)
pargs = parser.parse_args(args)
logLevel = getattr(logging, pargs.log_level.upper(),None)
logging.basicConfig(level=logLevel)
if os.path.exists(pargs.output_yml):
logging.error('Refusing to overwrite "{}"'.format(pargs.output_yml))
return 1
results = yaml.load(pargs.input_yml, Loader=Loader)
assert isinstance(results, list)
if len(results) == 0:
logging.error('Result list is empty')
return 1
# Count
rewriteCount = 0
rTypeToResultMap = {}
for rType in FinalResultType:
rTypeToResultMap[rType] = []
for r in results:
rType = classifyResult(r)
logging.debug('Classified {} as {}'.format(r['program'], rType))
if rType == FinalResultType.BOUND_HIT:
logging.info('Classified {} as {}'.format(r['program'], rType))
logging.info('Doing rewrite')
rewriteCount += 1
# Sanity checks
assert r['failed'] == False
assert r['bound_hit'] == True
assert r['speculative_paths_nb'] == True
# Set new values
r['failed'] = True
r['bound_hit'] = False
assert classifyResult(r) == FinalResultType.UNKNOWN
rTypeToResultMap[classifyResult(r)].append(r)
print("Rewrite count: {}".format(rewriteCount))
print("Total: {}".format(len(results)))
for rType in FinalResultType:
name = rType.name
resultList = rTypeToResultMap[rType]
print("# of {}: {} ({:.2f}%)".format(name, len(resultList),
100*float(len(resultList))/len(results)))
# Write result out
with open(pargs.output_yml, 'w') as f:
yamlText = yaml.dump(results,
default_flow_style=False,
Dumper=Dumper)
f.write(yamlText)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Add script to patch old symbooglix results when boogie-runner classified
hitting speculative paths as BOUND_HIT. This was a bad idea so this
script changes those results to UNKNOWN but note we have the
``speculative_paths_nb`` attribute so that can be used to find the
results where symbooglix hit speculative paths.#!/usr/bin/env python
# vim: set sw=2 ts=2 softtabstop=2 expandtab:
"""
This script is designed to patch old symbooglix
results where hitting speculative paths was incorrect
treated as BOUND_HIT
"""
import argparse
import os
import logging
import sys
import yaml
from br_util import FinalResultType, classifyResult
try:
# Try to use libyaml which is faster
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
# fall back on python implementation
from yaml import Loader, Dumper
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument("-l","--log-level",type=str, default="info", dest="log_level", choices=['debug','info','warning','error'])
parser.add_argument('input_yml', type=argparse.FileType('r'))
parser.add_argument('output_yml', type=str)
pargs = parser.parse_args(args)
logLevel = getattr(logging, pargs.log_level.upper(),None)
logging.basicConfig(level=logLevel)
if os.path.exists(pargs.output_yml):
logging.error('Refusing to overwrite "{}"'.format(pargs.output_yml))
return 1
results = yaml.load(pargs.input_yml, Loader=Loader)
assert isinstance(results, list)
if len(results) == 0:
logging.error('Result list is empty')
return 1
# Count
rewriteCount = 0
rTypeToResultMap = {}
for rType in FinalResultType:
rTypeToResultMap[rType] = []
for r in results:
rType = classifyResult(r)
logging.debug('Classified {} as {}'.format(r['program'], rType))
if rType == FinalResultType.BOUND_HIT:
logging.info('Classified {} as {}'.format(r['program'], rType))
logging.info('Doing rewrite')
rewriteCount += 1
# Sanity checks
assert r['failed'] == False
assert r['bound_hit'] == True
assert r['speculative_paths_nb'] == True
# Set new values
r['failed'] = True
r['bound_hit'] = False
assert classifyResult(r) == FinalResultType.UNKNOWN
rTypeToResultMap[classifyResult(r)].append(r)
print("Rewrite count: {}".format(rewriteCount))
print("Total: {}".format(len(results)))
for rType in FinalResultType:
name = rType.name
resultList = rTypeToResultMap[rType]
print("# of {}: {} ({:.2f}%)".format(name, len(resultList),
100*float(len(resultList))/len(results)))
# Write result out
with open(pargs.output_yml, 'w') as f:
yamlText = yaml.dump(results,
default_flow_style=False,
Dumper=Dumper)
f.write(yamlText)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
<commit_before><commit_msg>Add script to patch old symbooglix results when boogie-runner classified
hitting speculative paths as BOUND_HIT. This was a bad idea so this
script changes those results to UNKNOWN but note we have the
``speculative_paths_nb`` attribute so that can be used to find the
results where symbooglix hit speculative paths.<commit_after>#!/usr/bin/env python
# vim: set sw=2 ts=2 softtabstop=2 expandtab:
"""
This script is designed to patch old symbooglix
results where hitting speculative paths was incorrect
treated as BOUND_HIT
"""
import argparse
import os
import logging
import sys
import yaml
from br_util import FinalResultType, classifyResult
try:
# Try to use libyaml which is faster
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
# fall back on python implementation
from yaml import Loader, Dumper
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument("-l","--log-level",type=str, default="info", dest="log_level", choices=['debug','info','warning','error'])
parser.add_argument('input_yml', type=argparse.FileType('r'))
parser.add_argument('output_yml', type=str)
pargs = parser.parse_args(args)
logLevel = getattr(logging, pargs.log_level.upper(),None)
logging.basicConfig(level=logLevel)
if os.path.exists(pargs.output_yml):
logging.error('Refusing to overwrite "{}"'.format(pargs.output_yml))
return 1
results = yaml.load(pargs.input_yml, Loader=Loader)
assert isinstance(results, list)
if len(results) == 0:
logging.error('Result list is empty')
return 1
# Count
rewriteCount = 0
rTypeToResultMap = {}
for rType in FinalResultType:
rTypeToResultMap[rType] = []
for r in results:
rType = classifyResult(r)
logging.debug('Classified {} as {}'.format(r['program'], rType))
if rType == FinalResultType.BOUND_HIT:
logging.info('Classified {} as {}'.format(r['program'], rType))
logging.info('Doing rewrite')
rewriteCount += 1
# Sanity checks
assert r['failed'] == False
assert r['bound_hit'] == True
assert r['speculative_paths_nb'] == True
# Set new values
r['failed'] = True
r['bound_hit'] = False
assert classifyResult(r) == FinalResultType.UNKNOWN
rTypeToResultMap[classifyResult(r)].append(r)
print("Rewrite count: {}".format(rewriteCount))
print("Total: {}".format(len(results)))
for rType in FinalResultType:
name = rType.name
resultList = rTypeToResultMap[rType]
print("# of {}: {} ({:.2f}%)".format(name, len(resultList),
100*float(len(resultList))/len(results)))
# Write result out
with open(pargs.output_yml, 'w') as f:
yamlText = yaml.dump(results,
default_flow_style=False,
Dumper=Dumper)
f.write(yamlText)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
eed235542f5b53245e36d3e07857f57eba4ad136
|
backend/projectfiles/MavenProjectFile.py
|
backend/projectfiles/MavenProjectFile.py
|
from backend.projectfiles import GenericProjectFile
from bs4 import BeautifulSoup
class MavenProjectFile(GenericProjectFile):
""" Maven project file implementation to extract dependencies """
def extract(self):
dependencies = []
root = BeautifulSoup(self.result.text)
for dependency in root.find_all('dependency'):
dependencies.append({"g": dependency.groupid.text,
"a": dependency.artifactid.text,
"v": dependency.version.text})
return dependencies
|
Add project file class for Maven
|
Add project file class for Maven
|
Python
|
apache-2.0
|
karllindmark/IsYourProjectUpToDate,karllindmark/IsYourProjectUpToDate
|
Add project file class for Maven
|
from backend.projectfiles import GenericProjectFile
from bs4 import BeautifulSoup
class MavenProjectFile(GenericProjectFile):
""" Maven project file implementation to extract dependencies """
def extract(self):
dependencies = []
root = BeautifulSoup(self.result.text)
for dependency in root.find_all('dependency'):
dependencies.append({"g": dependency.groupid.text,
"a": dependency.artifactid.text,
"v": dependency.version.text})
return dependencies
|
<commit_before><commit_msg>Add project file class for Maven<commit_after>
|
from backend.projectfiles import GenericProjectFile
from bs4 import BeautifulSoup
class MavenProjectFile(GenericProjectFile):
""" Maven project file implementation to extract dependencies """
def extract(self):
dependencies = []
root = BeautifulSoup(self.result.text)
for dependency in root.find_all('dependency'):
dependencies.append({"g": dependency.groupid.text,
"a": dependency.artifactid.text,
"v": dependency.version.text})
return dependencies
|
Add project file class for Mavenfrom backend.projectfiles import GenericProjectFile
from bs4 import BeautifulSoup
class MavenProjectFile(GenericProjectFile):
""" Maven project file implementation to extract dependencies """
def extract(self):
dependencies = []
root = BeautifulSoup(self.result.text)
for dependency in root.find_all('dependency'):
dependencies.append({"g": dependency.groupid.text,
"a": dependency.artifactid.text,
"v": dependency.version.text})
return dependencies
|
<commit_before><commit_msg>Add project file class for Maven<commit_after>from backend.projectfiles import GenericProjectFile
from bs4 import BeautifulSoup
class MavenProjectFile(GenericProjectFile):
""" Maven project file implementation to extract dependencies """
def extract(self):
dependencies = []
root = BeautifulSoup(self.result.text)
for dependency in root.find_all('dependency'):
dependencies.append({"g": dependency.groupid.text,
"a": dependency.artifactid.text,
"v": dependency.version.text})
return dependencies
|
|
4162271bf3c96188c43dab98f26c216347853ead
|
braubuddy/tests/thermometer/test_auto.py
|
braubuddy/tests/thermometer/test_auto.py
|
"""
Braubuddy Dummy thermometer unit tests.
"""
import unittest
from mock import patch, call, MagicMock
from braubuddy.thermometer import auto
from braubuddy.thermometer import dummy
from braubuddy.thermometer import ds18b20_gpio
from braubuddy.thermometer import temper_usb
from braubuddy.thermometer import DeviceError
from braubuddy.thermometer import ReadError
class TestAuto(unittest.TestCase):
@patch('braubuddy.thermometer.ds18b20_gpio.ds18b20')
@patch('braubuddy.thermometer.temper_usb.temperusb')
def test_dummy_returned_if_no_devices(self, mk_temperusb, mk_ds18b20):
"""Dummy thermometer is created if no real thermometers discovered."""
mk_ds18b20.DS18B20 = MagicMock(side_effect = Exception('Some Error'))
mk_temperusb.TemperHandler.return_value.get_devices.return_value = []
thermometer = auto.AutoThermometer()
self.assertIsInstance(thermometer, dummy.DummyThermometer)
|
Add test from dummy thermometer.
|
Add test from dummy thermometer.
|
Python
|
bsd-3-clause
|
amorphic/braubuddy,amorphic/braubuddy,amorphic/braubuddy
|
Add test from dummy thermometer.
|
"""
Braubuddy Dummy thermometer unit tests.
"""
import unittest
from mock import patch, call, MagicMock
from braubuddy.thermometer import auto
from braubuddy.thermometer import dummy
from braubuddy.thermometer import ds18b20_gpio
from braubuddy.thermometer import temper_usb
from braubuddy.thermometer import DeviceError
from braubuddy.thermometer import ReadError
class TestAuto(unittest.TestCase):
@patch('braubuddy.thermometer.ds18b20_gpio.ds18b20')
@patch('braubuddy.thermometer.temper_usb.temperusb')
def test_dummy_returned_if_no_devices(self, mk_temperusb, mk_ds18b20):
"""Dummy thermometer is created if no real thermometers discovered."""
mk_ds18b20.DS18B20 = MagicMock(side_effect = Exception('Some Error'))
mk_temperusb.TemperHandler.return_value.get_devices.return_value = []
thermometer = auto.AutoThermometer()
self.assertIsInstance(thermometer, dummy.DummyThermometer)
|
<commit_before><commit_msg>Add test from dummy thermometer.<commit_after>
|
"""
Braubuddy Dummy thermometer unit tests.
"""
import unittest
from mock import patch, call, MagicMock
from braubuddy.thermometer import auto
from braubuddy.thermometer import dummy
from braubuddy.thermometer import ds18b20_gpio
from braubuddy.thermometer import temper_usb
from braubuddy.thermometer import DeviceError
from braubuddy.thermometer import ReadError
class TestAuto(unittest.TestCase):
@patch('braubuddy.thermometer.ds18b20_gpio.ds18b20')
@patch('braubuddy.thermometer.temper_usb.temperusb')
def test_dummy_returned_if_no_devices(self, mk_temperusb, mk_ds18b20):
"""Dummy thermometer is created if no real thermometers discovered."""
mk_ds18b20.DS18B20 = MagicMock(side_effect = Exception('Some Error'))
mk_temperusb.TemperHandler.return_value.get_devices.return_value = []
thermometer = auto.AutoThermometer()
self.assertIsInstance(thermometer, dummy.DummyThermometer)
|
Add test from dummy thermometer."""
Braubuddy Dummy thermometer unit tests.
"""
import unittest
from mock import patch, call, MagicMock
from braubuddy.thermometer import auto
from braubuddy.thermometer import dummy
from braubuddy.thermometer import ds18b20_gpio
from braubuddy.thermometer import temper_usb
from braubuddy.thermometer import DeviceError
from braubuddy.thermometer import ReadError
class TestAuto(unittest.TestCase):
@patch('braubuddy.thermometer.ds18b20_gpio.ds18b20')
@patch('braubuddy.thermometer.temper_usb.temperusb')
def test_dummy_returned_if_no_devices(self, mk_temperusb, mk_ds18b20):
"""Dummy thermometer is created if no real thermometers discovered."""
mk_ds18b20.DS18B20 = MagicMock(side_effect = Exception('Some Error'))
mk_temperusb.TemperHandler.return_value.get_devices.return_value = []
thermometer = auto.AutoThermometer()
self.assertIsInstance(thermometer, dummy.DummyThermometer)
|
<commit_before><commit_msg>Add test from dummy thermometer.<commit_after>"""
Braubuddy Dummy thermometer unit tests.
"""
import unittest
from mock import patch, call, MagicMock
from braubuddy.thermometer import auto
from braubuddy.thermometer import dummy
from braubuddy.thermometer import ds18b20_gpio
from braubuddy.thermometer import temper_usb
from braubuddy.thermometer import DeviceError
from braubuddy.thermometer import ReadError
class TestAuto(unittest.TestCase):
@patch('braubuddy.thermometer.ds18b20_gpio.ds18b20')
@patch('braubuddy.thermometer.temper_usb.temperusb')
def test_dummy_returned_if_no_devices(self, mk_temperusb, mk_ds18b20):
"""Dummy thermometer is created if no real thermometers discovered."""
mk_ds18b20.DS18B20 = MagicMock(side_effect = Exception('Some Error'))
mk_temperusb.TemperHandler.return_value.get_devices.return_value = []
thermometer = auto.AutoThermometer()
self.assertIsInstance(thermometer, dummy.DummyThermometer)
|
|
4cdc120fbf654a6ce43bdb455ce89f7524ef9cd4
|
images/demo/ipython_notebook_config.py
|
images/demo/ipython_notebook_config.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Configuration file for ipython-notebook.
c = get_config()
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8888
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
c.NotebookApp.tornado_settings = {
'template_path':['/srv/ga/', '/srv/ipython/IPython/html',
'/srv/ipython/IPython/html/templates']
}
|
Configure the IPython notebook user settings
|
Configure the IPython notebook user settings
|
Python
|
bsd-3-clause
|
marscher/tmpnb,willjharmer/tmpnb,marscher/tmpnb,ianabc/tmpnb,willjharmer/tmpnb,parente/tmpnb,betatim/tmpnb,iamjakob/tmpnb,jupyter/tmpnb,ianabc/tmpnb,iamjakob/tmpnb,captainsafia/tmpnb,cannin/tmpnb,zischwartz/tmpnb,malev/tmpnb,ianabc/tmpnb,ianabc/tmpnb,parente/tmpnb,parente/tmpnb,jupyter/tmpnb,betatim/tmpnb,rgbkrk/tmpnb,marscher/tmpnb,parente/tmpnb,rgbkrk/tmpnb,captainsafia/tmpnb,marscher/tmpnb,cannin/tmpnb,cannin/tmpnb,jupyter/tmpnb,iamjakob/tmpnb,rgbkrk/tmpnb,malev/tmpnb,malev/tmpnb,cannin/tmpnb,jupyter/tmpnb,willjharmer/tmpnb,captainsafia/tmpnb,zischwartz/tmpnb,zischwartz/tmpnb,betatim/tmpnb
|
Configure the IPython notebook user settings
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Configuration file for ipython-notebook.
c = get_config()
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8888
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
c.NotebookApp.tornado_settings = {
'template_path':['/srv/ga/', '/srv/ipython/IPython/html',
'/srv/ipython/IPython/html/templates']
}
|
<commit_before><commit_msg>Configure the IPython notebook user settings<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Configuration file for ipython-notebook.
c = get_config()
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8888
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
c.NotebookApp.tornado_settings = {
'template_path':['/srv/ga/', '/srv/ipython/IPython/html',
'/srv/ipython/IPython/html/templates']
}
|
Configure the IPython notebook user settings#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Configuration file for ipython-notebook.
c = get_config()
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8888
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
c.NotebookApp.tornado_settings = {
'template_path':['/srv/ga/', '/srv/ipython/IPython/html',
'/srv/ipython/IPython/html/templates']
}
|
<commit_before><commit_msg>Configure the IPython notebook user settings<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Configuration file for ipython-notebook.
c = get_config()
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8888
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
c.NotebookApp.tornado_settings = {
'template_path':['/srv/ga/', '/srv/ipython/IPython/html',
'/srv/ipython/IPython/html/templates']
}
|
|
86a41fd90c6eb501d44a8bdca118f32cf5a1728c
|
ideascube/conf/kb_nic_nicarali.py
|
ideascube/conf/kb_nic_nicarali.py
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'es'
IDEASCUBE_NAME = 'Nicarali'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'gutenberg',
'lang': 'es',
},
{
'id': 'wikipedia',
'languages': ['es']
},
{
'id': 'khanacademy',
},
]
|
Add conf file for Nicarali project
|
Add conf file for Nicarali project
|
Python
|
agpl-3.0
|
ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube
|
Add conf file for Nicarali project
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'es'
IDEASCUBE_NAME = 'Nicarali'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'gutenberg',
'lang': 'es',
},
{
'id': 'wikipedia',
'languages': ['es']
},
{
'id': 'khanacademy',
},
]
|
<commit_before><commit_msg>Add conf file for Nicarali project<commit_after>
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'es'
IDEASCUBE_NAME = 'Nicarali'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'gutenberg',
'lang': 'es',
},
{
'id': 'wikipedia',
'languages': ['es']
},
{
'id': 'khanacademy',
},
]
|
Add conf file for Nicarali project# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'es'
IDEASCUBE_NAME = 'Nicarali'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'gutenberg',
'lang': 'es',
},
{
'id': 'wikipedia',
'languages': ['es']
},
{
'id': 'khanacademy',
},
]
|
<commit_before><commit_msg>Add conf file for Nicarali project<commit_after># -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'es'
IDEASCUBE_NAME = 'Nicarali'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'gutenberg',
'lang': 'es',
},
{
'id': 'wikipedia',
'languages': ['es']
},
{
'id': 'khanacademy',
},
]
|
|
2ec1007160dce53b5110f85b1529d2510f601c20
|
sympy/simplify/tests/test_function.py
|
sympy/simplify/tests/test_function.py
|
""" Unit tests for Hyper_Function"""
from sympy.core import symbols, Dummy, Tuple
from sympy.functions import hyper
from sympy.simplify.hyperexpand import Hyper_Function
def test_attrs():
a, b = symbols('a, b', cls=Dummy)
f = Hyper_Function([2, a], [b])
assert f.ap == Tuple(2, a)
assert f.bq == Tuple(b)
assert f.args == (Tuple(2, a), Tuple(b))
assert f.sizes == (2, 1)
def test_call():
a, b, x = symbols('a, b, x', cls=Dummy)
f = Hyper_Function([2, a], [b])
assert f(x) == hyper([2, a], [b], x)
def test_has():
a, b, c = symbols('a, b, c', cls=Dummy)
f = Hyper_Function([2, -a], [b])
assert f.has(a)
assert f.has(Tuple(b))
assert not f.has(c)
|
Add simple tests for Hyper_Function
|
Add simple tests for Hyper_Function
|
Python
|
bsd-3-clause
|
souravsingh/sympy,saurabhjn76/sympy,beni55/sympy,jaimahajan1997/sympy,lidavidm/sympy,Titan-C/sympy,wanglongqi/sympy,abloomston/sympy,madan96/sympy,MechCoder/sympy,Titan-C/sympy,saurabhjn76/sympy,liangjiaxing/sympy,AkademieOlympia/sympy,AkademieOlympia/sympy,cswiercz/sympy,toolforger/sympy,wyom/sympy,grevutiu-gabriel/sympy,yukoba/sympy,liangjiaxing/sympy,MridulS/sympy,sahilshekhawat/sympy,souravsingh/sympy,yashsharan/sympy,atsao72/sympy,wyom/sympy,meghana1995/sympy,moble/sympy,shipci/sympy,amitjamadagni/sympy,sahmed95/sympy,pandeyadarsh/sympy,pbrady/sympy,chaffra/sympy,jaimahajan1997/sympy,atsao72/sympy,jerli/sympy,cccfran/sympy,pbrady/sympy,VaibhavAgarwalVA/sympy,ga7g08/sympy,yukoba/sympy,pandeyadarsh/sympy,liangjiaxing/sympy,mcdaniel67/sympy,Davidjohnwilson/sympy,Shaswat27/sympy,Mitchkoens/sympy,sunny94/temp,garvitr/sympy,sampadsaha5/sympy,garvitr/sympy,hargup/sympy,hrashk/sympy,Curious72/sympy,maniteja123/sympy,VaibhavAgarwalVA/sympy,Vishluck/sympy,abloomston/sympy,postvakje/sympy,debugger22/sympy,asm666/sympy,jaimahajan1997/sympy,sampadsaha5/sympy,kmacinnis/sympy,drufat/sympy,pandeyadarsh/sympy,mafiya69/sympy,sampadsaha5/sympy,Davidjohnwilson/sympy,Vishluck/sympy,kumarkrishna/sympy,cccfran/sympy,rahuldan/sympy,emon10005/sympy,shipci/sympy,MridulS/sympy,Curious72/sympy,mcdaniel67/sympy,lindsayad/sympy,madan96/sympy,dqnykamp/sympy,meghana1995/sympy,farhaanbukhsh/sympy,vipulroxx/sympy,toolforger/sympy,farhaanbukhsh/sympy,garvitr/sympy,oliverlee/sympy,kmacinnis/sympy,Arafatk/sympy,beni55/sympy,sunny94/temp,abhiii5459/sympy,ahhda/sympy,lidavidm/sympy,vipulroxx/sympy,skidzo/sympy,jamesblunt/sympy,souravsingh/sympy,yukoba/sympy,dqnykamp/sympy,Designist/sympy,pbrady/sympy,rahuldan/sympy,skirpichev/omg,bukzor/sympy,lidavidm/sympy,ChristinaZografou/sympy,shikil/sympy,AunShiLord/sympy,mcdaniel67/sympy,shikil/sympy,rahuldan/sympy,wanglongqi/sympy,Shaswat27/sympy,kmacinnis/sympy,jerli/sympy,asm666/sympy,hargup/sympy,VaibhavAgarwalVA/sympy,Mitchkoens/sympy,skidzo/sympy,iamutkarshtiwari/sympy,Designist/sympy,hargup/sympy,chaffra/sympy,maniteja123/sympy,shipci/sympy,mafiya69/sympy,postvakje/sympy,AunShiLord/sympy,jbbskinny/sympy,emon10005/sympy,sahilshekhawat/sympy,Sumith1896/sympy,Vishluck/sympy,debugger22/sympy,atreyv/sympy,bukzor/sympy,drufat/sympy,abhiii5459/sympy,hrashk/sympy,hrashk/sympy,aktech/sympy,yashsharan/sympy,kumarkrishna/sympy,grevutiu-gabriel/sympy,Arafatk/sympy,debugger22/sympy,oliverlee/sympy,ga7g08/sympy,abloomston/sympy,ahhda/sympy,yashsharan/sympy,mafiya69/sympy,cswiercz/sympy,iamutkarshtiwari/sympy,AunShiLord/sympy,saurabhjn76/sympy,ahhda/sympy,kevalds51/sympy,jbbskinny/sympy,sahilshekhawat/sympy,kaichogami/sympy,wanglongqi/sympy,chaffra/sympy,kaushik94/sympy,Sumith1896/sympy,Curious72/sympy,kaushik94/sympy,Gadal/sympy,kumarkrishna/sympy,atsao72/sympy,iamutkarshtiwari/sympy,wyom/sympy,MridulS/sympy,kaichogami/sympy,jamesblunt/sympy,emon10005/sympy,Sumith1896/sympy,skidzo/sympy,jerli/sympy,MechCoder/sympy,MechCoder/sympy,amitjamadagni/sympy,postvakje/sympy,kaushik94/sympy,toolforger/sympy,Designist/sympy,Mitchkoens/sympy,Arafatk/sympy,dqnykamp/sympy,asm666/sympy,cswiercz/sympy,abhiii5459/sympy,sunny94/temp,oliverlee/sympy,drufat/sympy,grevutiu-gabriel/sympy,AkademieOlympia/sympy,shikil/sympy,madan96/sympy,ChristinaZografou/sympy,aktech/sympy,ChristinaZografou/sympy,meghana1995/sympy,ga7g08/sympy,moble/sympy,beni55/sympy,bukzor/sympy,lindsayad/sympy,farhaanbukhsh/sympy,atreyv/sympy,atreyv/sympy,maniteja123/sympy,kevalds51/sympy,moble/sympy,sahmed95/sympy,Gadal/sympy,cccfran/sympy,sahmed95/sympy,jbbskinny/sympy,lindsayad/sympy,diofant/diofant,vipulroxx/sympy,Shaswat27/sympy,jamesblunt/sympy,kaichogami/sympy,Titan-C/sympy,kevalds51/sympy,aktech/sympy,Gadal/sympy,Davidjohnwilson/sympy
|
Add simple tests for Hyper_Function
|
""" Unit tests for Hyper_Function"""
from sympy.core import symbols, Dummy, Tuple
from sympy.functions import hyper
from sympy.simplify.hyperexpand import Hyper_Function
def test_attrs():
a, b = symbols('a, b', cls=Dummy)
f = Hyper_Function([2, a], [b])
assert f.ap == Tuple(2, a)
assert f.bq == Tuple(b)
assert f.args == (Tuple(2, a), Tuple(b))
assert f.sizes == (2, 1)
def test_call():
a, b, x = symbols('a, b, x', cls=Dummy)
f = Hyper_Function([2, a], [b])
assert f(x) == hyper([2, a], [b], x)
def test_has():
a, b, c = symbols('a, b, c', cls=Dummy)
f = Hyper_Function([2, -a], [b])
assert f.has(a)
assert f.has(Tuple(b))
assert not f.has(c)
|
<commit_before><commit_msg>Add simple tests for Hyper_Function<commit_after>
|
""" Unit tests for Hyper_Function"""
from sympy.core import symbols, Dummy, Tuple
from sympy.functions import hyper
from sympy.simplify.hyperexpand import Hyper_Function
def test_attrs():
a, b = symbols('a, b', cls=Dummy)
f = Hyper_Function([2, a], [b])
assert f.ap == Tuple(2, a)
assert f.bq == Tuple(b)
assert f.args == (Tuple(2, a), Tuple(b))
assert f.sizes == (2, 1)
def test_call():
a, b, x = symbols('a, b, x', cls=Dummy)
f = Hyper_Function([2, a], [b])
assert f(x) == hyper([2, a], [b], x)
def test_has():
a, b, c = symbols('a, b, c', cls=Dummy)
f = Hyper_Function([2, -a], [b])
assert f.has(a)
assert f.has(Tuple(b))
assert not f.has(c)
|
Add simple tests for Hyper_Function""" Unit tests for Hyper_Function"""
from sympy.core import symbols, Dummy, Tuple
from sympy.functions import hyper
from sympy.simplify.hyperexpand import Hyper_Function
def test_attrs():
a, b = symbols('a, b', cls=Dummy)
f = Hyper_Function([2, a], [b])
assert f.ap == Tuple(2, a)
assert f.bq == Tuple(b)
assert f.args == (Tuple(2, a), Tuple(b))
assert f.sizes == (2, 1)
def test_call():
a, b, x = symbols('a, b, x', cls=Dummy)
f = Hyper_Function([2, a], [b])
assert f(x) == hyper([2, a], [b], x)
def test_has():
a, b, c = symbols('a, b, c', cls=Dummy)
f = Hyper_Function([2, -a], [b])
assert f.has(a)
assert f.has(Tuple(b))
assert not f.has(c)
|
<commit_before><commit_msg>Add simple tests for Hyper_Function<commit_after>""" Unit tests for Hyper_Function"""
from sympy.core import symbols, Dummy, Tuple
from sympy.functions import hyper
from sympy.simplify.hyperexpand import Hyper_Function
def test_attrs():
a, b = symbols('a, b', cls=Dummy)
f = Hyper_Function([2, a], [b])
assert f.ap == Tuple(2, a)
assert f.bq == Tuple(b)
assert f.args == (Tuple(2, a), Tuple(b))
assert f.sizes == (2, 1)
def test_call():
a, b, x = symbols('a, b, x', cls=Dummy)
f = Hyper_Function([2, a], [b])
assert f(x) == hyper([2, a], [b], x)
def test_has():
a, b, c = symbols('a, b, c', cls=Dummy)
f = Hyper_Function([2, -a], [b])
assert f.has(a)
assert f.has(Tuple(b))
assert not f.has(c)
|
|
42c79ec4fb98ee0964a70fa1872b674ec74e0b4e
|
vumi/scripts/tests/test_db_backup.py
|
vumi/scripts/tests/test_db_backup.py
|
"""Tests for vumi.scripts.db_backup."""
from twisted.trial.unittest import TestCase
from vumi.tests.utils import PersistenceMixin
from vumi.scripts.db_backup import ConfigHolder, Options
class TestConfigHolder(ConfigHolder):
def __init__(self, *args, **kwargs):
self.output = []
super(TestConfigHolder, self).__init__(*args, **kwargs)
def emit(self, s):
self.output.append(s)
def make_cfg(args):
options = Options()
options.parseOptions(args)
return TestConfigHolder(options)
class DbBackupBaseTestCase(TestCase, PersistenceMixin):
sync_persistence = True
def setUp(self):
self._persist_setUp()
# Make sure we start fresh.
self.get_redis_manager()._purge_all()
def tearDown(self):
return self._persist_tearDown()
class BackupDbCmdTestCase(DbBackupBaseTestCase):
def test_backup_db(self):
cfg = make_cfg(["backup", "db_config.yaml"])
cfg.run()
self.assertEqual(cfg.output, [
'Backing up dbs ...',
])
class RestoreDbCmdTestCase(DbBackupBaseTestCase):
def test_create_pool_range_tags(self):
cfg = make_cfg(["restore", "db_backup.json"])
cfg.run()
self.assertEqual(cfg.output, [
'Restoring dbs ...',
])
|
Test skeleton for db backup scripts.
|
Test skeleton for db backup scripts.
|
Python
|
bsd-3-clause
|
harrissoerja/vumi,vishwaprakashmishra/xmatrix,harrissoerja/vumi,vishwaprakashmishra/xmatrix,TouK/vumi,vishwaprakashmishra/xmatrix,TouK/vumi,TouK/vumi,harrissoerja/vumi
|
Test skeleton for db backup scripts.
|
"""Tests for vumi.scripts.db_backup."""
from twisted.trial.unittest import TestCase
from vumi.tests.utils import PersistenceMixin
from vumi.scripts.db_backup import ConfigHolder, Options
class TestConfigHolder(ConfigHolder):
def __init__(self, *args, **kwargs):
self.output = []
super(TestConfigHolder, self).__init__(*args, **kwargs)
def emit(self, s):
self.output.append(s)
def make_cfg(args):
options = Options()
options.parseOptions(args)
return TestConfigHolder(options)
class DbBackupBaseTestCase(TestCase, PersistenceMixin):
sync_persistence = True
def setUp(self):
self._persist_setUp()
# Make sure we start fresh.
self.get_redis_manager()._purge_all()
def tearDown(self):
return self._persist_tearDown()
class BackupDbCmdTestCase(DbBackupBaseTestCase):
def test_backup_db(self):
cfg = make_cfg(["backup", "db_config.yaml"])
cfg.run()
self.assertEqual(cfg.output, [
'Backing up dbs ...',
])
class RestoreDbCmdTestCase(DbBackupBaseTestCase):
def test_create_pool_range_tags(self):
cfg = make_cfg(["restore", "db_backup.json"])
cfg.run()
self.assertEqual(cfg.output, [
'Restoring dbs ...',
])
|
<commit_before><commit_msg>Test skeleton for db backup scripts.<commit_after>
|
"""Tests for vumi.scripts.db_backup."""
from twisted.trial.unittest import TestCase
from vumi.tests.utils import PersistenceMixin
from vumi.scripts.db_backup import ConfigHolder, Options
class TestConfigHolder(ConfigHolder):
def __init__(self, *args, **kwargs):
self.output = []
super(TestConfigHolder, self).__init__(*args, **kwargs)
def emit(self, s):
self.output.append(s)
def make_cfg(args):
options = Options()
options.parseOptions(args)
return TestConfigHolder(options)
class DbBackupBaseTestCase(TestCase, PersistenceMixin):
sync_persistence = True
def setUp(self):
self._persist_setUp()
# Make sure we start fresh.
self.get_redis_manager()._purge_all()
def tearDown(self):
return self._persist_tearDown()
class BackupDbCmdTestCase(DbBackupBaseTestCase):
def test_backup_db(self):
cfg = make_cfg(["backup", "db_config.yaml"])
cfg.run()
self.assertEqual(cfg.output, [
'Backing up dbs ...',
])
class RestoreDbCmdTestCase(DbBackupBaseTestCase):
def test_create_pool_range_tags(self):
cfg = make_cfg(["restore", "db_backup.json"])
cfg.run()
self.assertEqual(cfg.output, [
'Restoring dbs ...',
])
|
Test skeleton for db backup scripts."""Tests for vumi.scripts.db_backup."""
from twisted.trial.unittest import TestCase
from vumi.tests.utils import PersistenceMixin
from vumi.scripts.db_backup import ConfigHolder, Options
class TestConfigHolder(ConfigHolder):
def __init__(self, *args, **kwargs):
self.output = []
super(TestConfigHolder, self).__init__(*args, **kwargs)
def emit(self, s):
self.output.append(s)
def make_cfg(args):
options = Options()
options.parseOptions(args)
return TestConfigHolder(options)
class DbBackupBaseTestCase(TestCase, PersistenceMixin):
sync_persistence = True
def setUp(self):
self._persist_setUp()
# Make sure we start fresh.
self.get_redis_manager()._purge_all()
def tearDown(self):
return self._persist_tearDown()
class BackupDbCmdTestCase(DbBackupBaseTestCase):
def test_backup_db(self):
cfg = make_cfg(["backup", "db_config.yaml"])
cfg.run()
self.assertEqual(cfg.output, [
'Backing up dbs ...',
])
class RestoreDbCmdTestCase(DbBackupBaseTestCase):
def test_create_pool_range_tags(self):
cfg = make_cfg(["restore", "db_backup.json"])
cfg.run()
self.assertEqual(cfg.output, [
'Restoring dbs ...',
])
|
<commit_before><commit_msg>Test skeleton for db backup scripts.<commit_after>"""Tests for vumi.scripts.db_backup."""
from twisted.trial.unittest import TestCase
from vumi.tests.utils import PersistenceMixin
from vumi.scripts.db_backup import ConfigHolder, Options
class TestConfigHolder(ConfigHolder):
def __init__(self, *args, **kwargs):
self.output = []
super(TestConfigHolder, self).__init__(*args, **kwargs)
def emit(self, s):
self.output.append(s)
def make_cfg(args):
options = Options()
options.parseOptions(args)
return TestConfigHolder(options)
class DbBackupBaseTestCase(TestCase, PersistenceMixin):
sync_persistence = True
def setUp(self):
self._persist_setUp()
# Make sure we start fresh.
self.get_redis_manager()._purge_all()
def tearDown(self):
return self._persist_tearDown()
class BackupDbCmdTestCase(DbBackupBaseTestCase):
def test_backup_db(self):
cfg = make_cfg(["backup", "db_config.yaml"])
cfg.run()
self.assertEqual(cfg.output, [
'Backing up dbs ...',
])
class RestoreDbCmdTestCase(DbBackupBaseTestCase):
def test_create_pool_range_tags(self):
cfg = make_cfg(["restore", "db_backup.json"])
cfg.run()
self.assertEqual(cfg.output, [
'Restoring dbs ...',
])
|
|
1b2d39169b3c1f62c444dade33a0e89bcf63edff
|
lfs_paypal/migrations/0002_auto_20170309_0820.py
|
lfs_paypal/migrations/0002_auto_20170309_0820.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-09 08:20
from __future__ import unicode_literals
from django.db import migrations
def update_price_calculator(apps, schema_editor):
PaymentMethod = apps.get_model("payment", "PaymentMethod")
for payment_method in PaymentMethod.objects.all():
if payment_method.module == "lfs_paypal.PayPalProcessor":
payment_method.module = "lfs_paypal.processor.PayPalProcessor"
payment_method.save()
class Migration(migrations.Migration):
dependencies = [
('lfs_paypal', '0001_initial'),
]
operations = [
migrations.RunPython(update_price_calculator),
]
|
Add data migrations for new processor place
|
Add data migrations for new processor place
|
Python
|
bsd-3-clause
|
diefenbach/lfs-paypal
|
Add data migrations for new processor place
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-09 08:20
from __future__ import unicode_literals
from django.db import migrations
def update_price_calculator(apps, schema_editor):
PaymentMethod = apps.get_model("payment", "PaymentMethod")
for payment_method in PaymentMethod.objects.all():
if payment_method.module == "lfs_paypal.PayPalProcessor":
payment_method.module = "lfs_paypal.processor.PayPalProcessor"
payment_method.save()
class Migration(migrations.Migration):
dependencies = [
('lfs_paypal', '0001_initial'),
]
operations = [
migrations.RunPython(update_price_calculator),
]
|
<commit_before><commit_msg>Add data migrations for new processor place<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-09 08:20
from __future__ import unicode_literals
from django.db import migrations
def update_price_calculator(apps, schema_editor):
PaymentMethod = apps.get_model("payment", "PaymentMethod")
for payment_method in PaymentMethod.objects.all():
if payment_method.module == "lfs_paypal.PayPalProcessor":
payment_method.module = "lfs_paypal.processor.PayPalProcessor"
payment_method.save()
class Migration(migrations.Migration):
dependencies = [
('lfs_paypal', '0001_initial'),
]
operations = [
migrations.RunPython(update_price_calculator),
]
|
Add data migrations for new processor place# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-09 08:20
from __future__ import unicode_literals
from django.db import migrations
def update_price_calculator(apps, schema_editor):
PaymentMethod = apps.get_model("payment", "PaymentMethod")
for payment_method in PaymentMethod.objects.all():
if payment_method.module == "lfs_paypal.PayPalProcessor":
payment_method.module = "lfs_paypal.processor.PayPalProcessor"
payment_method.save()
class Migration(migrations.Migration):
dependencies = [
('lfs_paypal', '0001_initial'),
]
operations = [
migrations.RunPython(update_price_calculator),
]
|
<commit_before><commit_msg>Add data migrations for new processor place<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-09 08:20
from __future__ import unicode_literals
from django.db import migrations
def update_price_calculator(apps, schema_editor):
PaymentMethod = apps.get_model("payment", "PaymentMethod")
for payment_method in PaymentMethod.objects.all():
if payment_method.module == "lfs_paypal.PayPalProcessor":
payment_method.module = "lfs_paypal.processor.PayPalProcessor"
payment_method.save()
class Migration(migrations.Migration):
dependencies = [
('lfs_paypal', '0001_initial'),
]
operations = [
migrations.RunPython(update_price_calculator),
]
|
|
dcee3c876c470d5a339caf36ed888fc40245f70a
|
altair/examples/window_rank.py
|
altair/examples/window_rank.py
|
"""
Window Rank Line Chart
----------------------
This example shows the Group F rankings in the 2018 World Cup after each matchday.
A window transformation is used to rank each after each match day, sorting by points and difference.
"""
# category: case studies
import altair as alt
import pandas as pd
source = pd.DataFrame(
[
{"team": "Germany", "matchday": 1, "point": 0, "diff": -1},
{"team": "Germany", "matchday": 2, "point": 3, "diff": 0},
{"team": "Germany", "matchday": 3, "point": 3, "diff": -2},
{"team": "Mexico", "matchday": 1, "point": 3, "diff": 1},
{"team": "Mexico", "matchday": 2, "point": 6, "diff": 2},
{"team": "Mexico", "matchday": 3, "point": 6, "diff": -1},
{"team": "South Korea", "matchday": 1, "point": 0, "diff": -1},
{"team": "South Korea", "matchday": 2, "point": 0, "diff": -2},
{"team": "South Korea", "matchday": 3, "point": 3, "diff": 0},
{"team": "Sweden", "matchday": 1, "point": 3, "diff": 1},
{"team": "Sweden", "matchday": 2, "point": 3, "diff": 0},
{"team": "Sweden", "matchday": 3, "point": 6, "diff": 3},
]
)
color_scale = alt.Scale(
domain=["Germany", "Mexico", "South Korea", "Sweden"],
range=["#000000", "#127153", "#C91A3C", "#0C71AB"],
)
alt.Chart(source).mark_line().encode(
x="matchday:O", y="rank:O", color=alt.Color("team:N", scale=color_scale)
).transform_window(
rank="rank()",
sort=[
alt.SortField("point", order="descending"),
alt.SortField("diff", order="descending"),
],
groupby=["matchday"],
)
|
Add example for World Cup rankings with window transformation
|
DOC: Add example for World Cup rankings with window transformation
|
Python
|
bsd-3-clause
|
jakevdp/altair,altair-viz/altair
|
DOC: Add example for World Cup rankings with window transformation
|
"""
Window Rank Line Chart
----------------------
This example shows the Group F rankings in the 2018 World Cup after each matchday.
A window transformation is used to rank each after each match day, sorting by points and difference.
"""
# category: case studies
import altair as alt
import pandas as pd
source = pd.DataFrame(
[
{"team": "Germany", "matchday": 1, "point": 0, "diff": -1},
{"team": "Germany", "matchday": 2, "point": 3, "diff": 0},
{"team": "Germany", "matchday": 3, "point": 3, "diff": -2},
{"team": "Mexico", "matchday": 1, "point": 3, "diff": 1},
{"team": "Mexico", "matchday": 2, "point": 6, "diff": 2},
{"team": "Mexico", "matchday": 3, "point": 6, "diff": -1},
{"team": "South Korea", "matchday": 1, "point": 0, "diff": -1},
{"team": "South Korea", "matchday": 2, "point": 0, "diff": -2},
{"team": "South Korea", "matchday": 3, "point": 3, "diff": 0},
{"team": "Sweden", "matchday": 1, "point": 3, "diff": 1},
{"team": "Sweden", "matchday": 2, "point": 3, "diff": 0},
{"team": "Sweden", "matchday": 3, "point": 6, "diff": 3},
]
)
color_scale = alt.Scale(
domain=["Germany", "Mexico", "South Korea", "Sweden"],
range=["#000000", "#127153", "#C91A3C", "#0C71AB"],
)
alt.Chart(source).mark_line().encode(
x="matchday:O", y="rank:O", color=alt.Color("team:N", scale=color_scale)
).transform_window(
rank="rank()",
sort=[
alt.SortField("point", order="descending"),
alt.SortField("diff", order="descending"),
],
groupby=["matchday"],
)
|
<commit_before><commit_msg>DOC: Add example for World Cup rankings with window transformation<commit_after>
|
"""
Window Rank Line Chart
----------------------
This example shows the Group F rankings in the 2018 World Cup after each matchday.
A window transformation is used to rank each after each match day, sorting by points and difference.
"""
# category: case studies
import altair as alt
import pandas as pd
source = pd.DataFrame(
[
{"team": "Germany", "matchday": 1, "point": 0, "diff": -1},
{"team": "Germany", "matchday": 2, "point": 3, "diff": 0},
{"team": "Germany", "matchday": 3, "point": 3, "diff": -2},
{"team": "Mexico", "matchday": 1, "point": 3, "diff": 1},
{"team": "Mexico", "matchday": 2, "point": 6, "diff": 2},
{"team": "Mexico", "matchday": 3, "point": 6, "diff": -1},
{"team": "South Korea", "matchday": 1, "point": 0, "diff": -1},
{"team": "South Korea", "matchday": 2, "point": 0, "diff": -2},
{"team": "South Korea", "matchday": 3, "point": 3, "diff": 0},
{"team": "Sweden", "matchday": 1, "point": 3, "diff": 1},
{"team": "Sweden", "matchday": 2, "point": 3, "diff": 0},
{"team": "Sweden", "matchday": 3, "point": 6, "diff": 3},
]
)
color_scale = alt.Scale(
domain=["Germany", "Mexico", "South Korea", "Sweden"],
range=["#000000", "#127153", "#C91A3C", "#0C71AB"],
)
alt.Chart(source).mark_line().encode(
x="matchday:O", y="rank:O", color=alt.Color("team:N", scale=color_scale)
).transform_window(
rank="rank()",
sort=[
alt.SortField("point", order="descending"),
alt.SortField("diff", order="descending"),
],
groupby=["matchday"],
)
|
DOC: Add example for World Cup rankings with window transformation"""
Window Rank Line Chart
----------------------
This example shows the Group F rankings in the 2018 World Cup after each matchday.
A window transformation is used to rank each after each match day, sorting by points and difference.
"""
# category: case studies
import altair as alt
import pandas as pd
source = pd.DataFrame(
[
{"team": "Germany", "matchday": 1, "point": 0, "diff": -1},
{"team": "Germany", "matchday": 2, "point": 3, "diff": 0},
{"team": "Germany", "matchday": 3, "point": 3, "diff": -2},
{"team": "Mexico", "matchday": 1, "point": 3, "diff": 1},
{"team": "Mexico", "matchday": 2, "point": 6, "diff": 2},
{"team": "Mexico", "matchday": 3, "point": 6, "diff": -1},
{"team": "South Korea", "matchday": 1, "point": 0, "diff": -1},
{"team": "South Korea", "matchday": 2, "point": 0, "diff": -2},
{"team": "South Korea", "matchday": 3, "point": 3, "diff": 0},
{"team": "Sweden", "matchday": 1, "point": 3, "diff": 1},
{"team": "Sweden", "matchday": 2, "point": 3, "diff": 0},
{"team": "Sweden", "matchday": 3, "point": 6, "diff": 3},
]
)
color_scale = alt.Scale(
domain=["Germany", "Mexico", "South Korea", "Sweden"],
range=["#000000", "#127153", "#C91A3C", "#0C71AB"],
)
alt.Chart(source).mark_line().encode(
x="matchday:O", y="rank:O", color=alt.Color("team:N", scale=color_scale)
).transform_window(
rank="rank()",
sort=[
alt.SortField("point", order="descending"),
alt.SortField("diff", order="descending"),
],
groupby=["matchday"],
)
|
<commit_before><commit_msg>DOC: Add example for World Cup rankings with window transformation<commit_after>"""
Window Rank Line Chart
----------------------
This example shows the Group F rankings in the 2018 World Cup after each matchday.
A window transformation is used to rank each after each match day, sorting by points and difference.
"""
# category: case studies
import altair as alt
import pandas as pd
source = pd.DataFrame(
[
{"team": "Germany", "matchday": 1, "point": 0, "diff": -1},
{"team": "Germany", "matchday": 2, "point": 3, "diff": 0},
{"team": "Germany", "matchday": 3, "point": 3, "diff": -2},
{"team": "Mexico", "matchday": 1, "point": 3, "diff": 1},
{"team": "Mexico", "matchday": 2, "point": 6, "diff": 2},
{"team": "Mexico", "matchday": 3, "point": 6, "diff": -1},
{"team": "South Korea", "matchday": 1, "point": 0, "diff": -1},
{"team": "South Korea", "matchday": 2, "point": 0, "diff": -2},
{"team": "South Korea", "matchday": 3, "point": 3, "diff": 0},
{"team": "Sweden", "matchday": 1, "point": 3, "diff": 1},
{"team": "Sweden", "matchday": 2, "point": 3, "diff": 0},
{"team": "Sweden", "matchday": 3, "point": 6, "diff": 3},
]
)
color_scale = alt.Scale(
domain=["Germany", "Mexico", "South Korea", "Sweden"],
range=["#000000", "#127153", "#C91A3C", "#0C71AB"],
)
alt.Chart(source).mark_line().encode(
x="matchday:O", y="rank:O", color=alt.Color("team:N", scale=color_scale)
).transform_window(
rank="rank()",
sort=[
alt.SortField("point", order="descending"),
alt.SortField("diff", order="descending"),
],
groupby=["matchday"],
)
|
|
d722f788c231a8dedc6ed2a48efc97195c82d7b7
|
tests/test_elasticsearch_processor.py
|
tests/test_elasticsearch_processor.py
|
import utils
from scrapi.linter.document import NormalizedDocument, RawDocument
from scrapi.processing.elastic_search import es, ElasticsearchProcessor
test_db = ElasticsearchProcessor()
RAW = RawDocument(utils.RAW_DOC)
NORMALIZED = NormalizedDocument(utils.RECORD)
def test_process_normalized():
NORMALIZED['source'] = 'test'
NORMALIZED['_id'] = NORMALIZED['id']['serviceID']
test_db.process_normalized(RAW, NORMALIZED)
results = es.search(index='share', doc_type='test')
assert (len(results['hits']['hits']) == 1)
def test_versions():
NORMALIZED['source'] = 'test'
NORMALIZED['_id'] = NORMALIZED['id']['serviceID']
test_db.process_normalized(RAW, NORMALIZED)
old_title = NORMALIZED['title']
result = es.search(index='share', doc_type='test')['hits']['hits'][0]
assert (result['_source']['title'] == old_title)
NORMALIZED['title'] = 'a new title'
test_db.process_normalized(RAW, NORMALIZED)
results = es.search(index='share', doc_type='test')
assert (len(results['hits']['hits']) == 1)
assert (results['hits']['hits'][0]['_source']['title'] == 'a new title')
|
Add tests for new elasticsearch processor
|
Add tests for new elasticsearch processor
|
Python
|
apache-2.0
|
felliott/scrapi,jeffreyliu3230/scrapi,felliott/scrapi,CenterForOpenScience/scrapi,mehanig/scrapi,mehanig/scrapi,erinspace/scrapi,CenterForOpenScience/scrapi,icereval/scrapi,fabianvf/scrapi,alexgarciac/scrapi,fabianvf/scrapi,erinspace/scrapi,ostwald/scrapi
|
Add tests for new elasticsearch processor
|
import utils
from scrapi.linter.document import NormalizedDocument, RawDocument
from scrapi.processing.elastic_search import es, ElasticsearchProcessor
test_db = ElasticsearchProcessor()
RAW = RawDocument(utils.RAW_DOC)
NORMALIZED = NormalizedDocument(utils.RECORD)
def test_process_normalized():
NORMALIZED['source'] = 'test'
NORMALIZED['_id'] = NORMALIZED['id']['serviceID']
test_db.process_normalized(RAW, NORMALIZED)
results = es.search(index='share', doc_type='test')
assert (len(results['hits']['hits']) == 1)
def test_versions():
NORMALIZED['source'] = 'test'
NORMALIZED['_id'] = NORMALIZED['id']['serviceID']
test_db.process_normalized(RAW, NORMALIZED)
old_title = NORMALIZED['title']
result = es.search(index='share', doc_type='test')['hits']['hits'][0]
assert (result['_source']['title'] == old_title)
NORMALIZED['title'] = 'a new title'
test_db.process_normalized(RAW, NORMALIZED)
results = es.search(index='share', doc_type='test')
assert (len(results['hits']['hits']) == 1)
assert (results['hits']['hits'][0]['_source']['title'] == 'a new title')
|
<commit_before><commit_msg>Add tests for new elasticsearch processor<commit_after>
|
import utils
from scrapi.linter.document import NormalizedDocument, RawDocument
from scrapi.processing.elastic_search import es, ElasticsearchProcessor
test_db = ElasticsearchProcessor()
RAW = RawDocument(utils.RAW_DOC)
NORMALIZED = NormalizedDocument(utils.RECORD)
def test_process_normalized():
NORMALIZED['source'] = 'test'
NORMALIZED['_id'] = NORMALIZED['id']['serviceID']
test_db.process_normalized(RAW, NORMALIZED)
results = es.search(index='share', doc_type='test')
assert (len(results['hits']['hits']) == 1)
def test_versions():
NORMALIZED['source'] = 'test'
NORMALIZED['_id'] = NORMALIZED['id']['serviceID']
test_db.process_normalized(RAW, NORMALIZED)
old_title = NORMALIZED['title']
result = es.search(index='share', doc_type='test')['hits']['hits'][0]
assert (result['_source']['title'] == old_title)
NORMALIZED['title'] = 'a new title'
test_db.process_normalized(RAW, NORMALIZED)
results = es.search(index='share', doc_type='test')
assert (len(results['hits']['hits']) == 1)
assert (results['hits']['hits'][0]['_source']['title'] == 'a new title')
|
Add tests for new elasticsearch processorimport utils
from scrapi.linter.document import NormalizedDocument, RawDocument
from scrapi.processing.elastic_search import es, ElasticsearchProcessor
test_db = ElasticsearchProcessor()
RAW = RawDocument(utils.RAW_DOC)
NORMALIZED = NormalizedDocument(utils.RECORD)
def test_process_normalized():
NORMALIZED['source'] = 'test'
NORMALIZED['_id'] = NORMALIZED['id']['serviceID']
test_db.process_normalized(RAW, NORMALIZED)
results = es.search(index='share', doc_type='test')
assert (len(results['hits']['hits']) == 1)
def test_versions():
NORMALIZED['source'] = 'test'
NORMALIZED['_id'] = NORMALIZED['id']['serviceID']
test_db.process_normalized(RAW, NORMALIZED)
old_title = NORMALIZED['title']
result = es.search(index='share', doc_type='test')['hits']['hits'][0]
assert (result['_source']['title'] == old_title)
NORMALIZED['title'] = 'a new title'
test_db.process_normalized(RAW, NORMALIZED)
results = es.search(index='share', doc_type='test')
assert (len(results['hits']['hits']) == 1)
assert (results['hits']['hits'][0]['_source']['title'] == 'a new title')
|
<commit_before><commit_msg>Add tests for new elasticsearch processor<commit_after>import utils
from scrapi.linter.document import NormalizedDocument, RawDocument
from scrapi.processing.elastic_search import es, ElasticsearchProcessor
test_db = ElasticsearchProcessor()
RAW = RawDocument(utils.RAW_DOC)
NORMALIZED = NormalizedDocument(utils.RECORD)
def test_process_normalized():
NORMALIZED['source'] = 'test'
NORMALIZED['_id'] = NORMALIZED['id']['serviceID']
test_db.process_normalized(RAW, NORMALIZED)
results = es.search(index='share', doc_type='test')
assert (len(results['hits']['hits']) == 1)
def test_versions():
NORMALIZED['source'] = 'test'
NORMALIZED['_id'] = NORMALIZED['id']['serviceID']
test_db.process_normalized(RAW, NORMALIZED)
old_title = NORMALIZED['title']
result = es.search(index='share', doc_type='test')['hits']['hits'][0]
assert (result['_source']['title'] == old_title)
NORMALIZED['title'] = 'a new title'
test_db.process_normalized(RAW, NORMALIZED)
results = es.search(index='share', doc_type='test')
assert (len(results['hits']['hits']) == 1)
assert (results['hits']['hits'][0]['_source']['title'] == 'a new title')
|
|
00c45c62b8b58309dd4599c5ba9ccee6062c09a0
|
libcloud/test/storage/test_scaleway.py
|
libcloud/test/storage/test_scaleway.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.common.types import LibcloudError
from libcloud.storage.drivers.scaleway import ScalewayStorageDriver
from libcloud.storage.drivers.scaleway import ScalewayConnectionAWS4
from libcloud.test.secrets import STORAGE_S3_PARAMS
class ScalewayStorageDriverTestCase(unittest.TestCase):
driver_type = ScalewayStorageDriver
driver_args = STORAGE_S3_PARAMS
default_host = "libcloud-storage-test.s3.fr-par.scw.cloud"
@classmethod
def create_driver(self):
return self.driver_type(*self.driver_args, host=self.default_host)
def setUp(self):
self.driver = self.create_driver()
def test_connection_class_type(self):
self.assertEqual(self.driver.connectionCls, ScalewayConnectionAWS4)
def test_connection_class_default_host(self):
self.assertEqual(self.driver.connectionCls.host, self.default_host)
self.assertEqual(self.driver.connectionCls.port, 443)
self.assertEqual(self.driver.connectionCls.secure, True)
def test_empty_host_error(self):
self.assertRaisesRegex(
LibcloudError,
"host argument is required",
self.driver_type,
*self.driver_args,
)
if __name__ == "__main__":
sys.exit(unittest.main())
|
Add simple test for scalewa storage driver
|
Add simple test for scalewa storage driver
|
Python
|
apache-2.0
|
apache/libcloud,apache/libcloud,apache/libcloud,mistio/libcloud,mistio/libcloud,mistio/libcloud
|
Add simple test for scalewa storage driver
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.common.types import LibcloudError
from libcloud.storage.drivers.scaleway import ScalewayStorageDriver
from libcloud.storage.drivers.scaleway import ScalewayConnectionAWS4
from libcloud.test.secrets import STORAGE_S3_PARAMS
class ScalewayStorageDriverTestCase(unittest.TestCase):
driver_type = ScalewayStorageDriver
driver_args = STORAGE_S3_PARAMS
default_host = "libcloud-storage-test.s3.fr-par.scw.cloud"
@classmethod
def create_driver(self):
return self.driver_type(*self.driver_args, host=self.default_host)
def setUp(self):
self.driver = self.create_driver()
def test_connection_class_type(self):
self.assertEqual(self.driver.connectionCls, ScalewayConnectionAWS4)
def test_connection_class_default_host(self):
self.assertEqual(self.driver.connectionCls.host, self.default_host)
self.assertEqual(self.driver.connectionCls.port, 443)
self.assertEqual(self.driver.connectionCls.secure, True)
def test_empty_host_error(self):
self.assertRaisesRegex(
LibcloudError,
"host argument is required",
self.driver_type,
*self.driver_args,
)
if __name__ == "__main__":
sys.exit(unittest.main())
|
<commit_before><commit_msg>Add simple test for scalewa storage driver<commit_after>
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.common.types import LibcloudError
from libcloud.storage.drivers.scaleway import ScalewayStorageDriver
from libcloud.storage.drivers.scaleway import ScalewayConnectionAWS4
from libcloud.test.secrets import STORAGE_S3_PARAMS
class ScalewayStorageDriverTestCase(unittest.TestCase):
driver_type = ScalewayStorageDriver
driver_args = STORAGE_S3_PARAMS
default_host = "libcloud-storage-test.s3.fr-par.scw.cloud"
@classmethod
def create_driver(self):
return self.driver_type(*self.driver_args, host=self.default_host)
def setUp(self):
self.driver = self.create_driver()
def test_connection_class_type(self):
self.assertEqual(self.driver.connectionCls, ScalewayConnectionAWS4)
def test_connection_class_default_host(self):
self.assertEqual(self.driver.connectionCls.host, self.default_host)
self.assertEqual(self.driver.connectionCls.port, 443)
self.assertEqual(self.driver.connectionCls.secure, True)
def test_empty_host_error(self):
self.assertRaisesRegex(
LibcloudError,
"host argument is required",
self.driver_type,
*self.driver_args,
)
if __name__ == "__main__":
sys.exit(unittest.main())
|
Add simple test for scalewa storage driver# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.common.types import LibcloudError
from libcloud.storage.drivers.scaleway import ScalewayStorageDriver
from libcloud.storage.drivers.scaleway import ScalewayConnectionAWS4
from libcloud.test.secrets import STORAGE_S3_PARAMS
class ScalewayStorageDriverTestCase(unittest.TestCase):
driver_type = ScalewayStorageDriver
driver_args = STORAGE_S3_PARAMS
default_host = "libcloud-storage-test.s3.fr-par.scw.cloud"
@classmethod
def create_driver(self):
return self.driver_type(*self.driver_args, host=self.default_host)
def setUp(self):
self.driver = self.create_driver()
def test_connection_class_type(self):
self.assertEqual(self.driver.connectionCls, ScalewayConnectionAWS4)
def test_connection_class_default_host(self):
self.assertEqual(self.driver.connectionCls.host, self.default_host)
self.assertEqual(self.driver.connectionCls.port, 443)
self.assertEqual(self.driver.connectionCls.secure, True)
def test_empty_host_error(self):
self.assertRaisesRegex(
LibcloudError,
"host argument is required",
self.driver_type,
*self.driver_args,
)
if __name__ == "__main__":
sys.exit(unittest.main())
|
<commit_before><commit_msg>Add simple test for scalewa storage driver<commit_after># Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.common.types import LibcloudError
from libcloud.storage.drivers.scaleway import ScalewayStorageDriver
from libcloud.storage.drivers.scaleway import ScalewayConnectionAWS4
from libcloud.test.secrets import STORAGE_S3_PARAMS
class ScalewayStorageDriverTestCase(unittest.TestCase):
driver_type = ScalewayStorageDriver
driver_args = STORAGE_S3_PARAMS
default_host = "libcloud-storage-test.s3.fr-par.scw.cloud"
@classmethod
def create_driver(self):
return self.driver_type(*self.driver_args, host=self.default_host)
def setUp(self):
self.driver = self.create_driver()
def test_connection_class_type(self):
self.assertEqual(self.driver.connectionCls, ScalewayConnectionAWS4)
def test_connection_class_default_host(self):
self.assertEqual(self.driver.connectionCls.host, self.default_host)
self.assertEqual(self.driver.connectionCls.port, 443)
self.assertEqual(self.driver.connectionCls.secure, True)
def test_empty_host_error(self):
self.assertRaisesRegex(
LibcloudError,
"host argument is required",
self.driver_type,
*self.driver_args,
)
if __name__ == "__main__":
sys.exit(unittest.main())
|
|
9b26f0f6a08a289882bd8f51159c265e9e499a95
|
test/test_notification_integration.py
|
test/test_notification_integration.py
|
import os
import select
import groundstation.fs_watcher as fs_watcher
from groundstation.peer_socket import PeerSocket
from integration_fixture import StationIntegrationFixture, \
TestListener, \
TestClient
class StationFSWatcherIntegration(StationIntegrationFixture):
def test_notifies_peer(self):
read_sockets = []
write_sockets = []
def tick():
return select.select(read_sockets, write_sockets, [], 1)
addr = os.path.join(self.dir, "listener")
listener = TestListener(addr)
client = TestClient(addr)
peer = listener.accept(PeerSocket)
watcher = fs_watcher.FSWatcher(self.station.store.object_root)
read_sockets.append(client)
read_sockets.append(watcher)
self.station.write("trolololol")
(sread, _, _) = tick()
self.assertIn(watcher, sread)
obj_name = watcher.read()
client.notify_new_object(self.station, obj_name)
client.send()
peer.recv()
data = peer.packet_queue.pop()
gizmo = self.station.gizmo_factory.hydrate(data, peer)
assert gizmo is not None, "gizmo_factory returned None"
gizmo.process()
watcher.kill()
|
Test that watchers send newobject requests
|
Test that watchers send newobject requests
|
Python
|
mit
|
richo/groundstation,richo/groundstation,richo/groundstation,richo/groundstation,richo/groundstation
|
Test that watchers send newobject requests
|
import os
import select
import groundstation.fs_watcher as fs_watcher
from groundstation.peer_socket import PeerSocket
from integration_fixture import StationIntegrationFixture, \
TestListener, \
TestClient
class StationFSWatcherIntegration(StationIntegrationFixture):
def test_notifies_peer(self):
read_sockets = []
write_sockets = []
def tick():
return select.select(read_sockets, write_sockets, [], 1)
addr = os.path.join(self.dir, "listener")
listener = TestListener(addr)
client = TestClient(addr)
peer = listener.accept(PeerSocket)
watcher = fs_watcher.FSWatcher(self.station.store.object_root)
read_sockets.append(client)
read_sockets.append(watcher)
self.station.write("trolololol")
(sread, _, _) = tick()
self.assertIn(watcher, sread)
obj_name = watcher.read()
client.notify_new_object(self.station, obj_name)
client.send()
peer.recv()
data = peer.packet_queue.pop()
gizmo = self.station.gizmo_factory.hydrate(data, peer)
assert gizmo is not None, "gizmo_factory returned None"
gizmo.process()
watcher.kill()
|
<commit_before><commit_msg>Test that watchers send newobject requests<commit_after>
|
import os
import select
import groundstation.fs_watcher as fs_watcher
from groundstation.peer_socket import PeerSocket
from integration_fixture import StationIntegrationFixture, \
TestListener, \
TestClient
class StationFSWatcherIntegration(StationIntegrationFixture):
def test_notifies_peer(self):
read_sockets = []
write_sockets = []
def tick():
return select.select(read_sockets, write_sockets, [], 1)
addr = os.path.join(self.dir, "listener")
listener = TestListener(addr)
client = TestClient(addr)
peer = listener.accept(PeerSocket)
watcher = fs_watcher.FSWatcher(self.station.store.object_root)
read_sockets.append(client)
read_sockets.append(watcher)
self.station.write("trolololol")
(sread, _, _) = tick()
self.assertIn(watcher, sread)
obj_name = watcher.read()
client.notify_new_object(self.station, obj_name)
client.send()
peer.recv()
data = peer.packet_queue.pop()
gizmo = self.station.gizmo_factory.hydrate(data, peer)
assert gizmo is not None, "gizmo_factory returned None"
gizmo.process()
watcher.kill()
|
Test that watchers send newobject requestsimport os
import select
import groundstation.fs_watcher as fs_watcher
from groundstation.peer_socket import PeerSocket
from integration_fixture import StationIntegrationFixture, \
TestListener, \
TestClient
class StationFSWatcherIntegration(StationIntegrationFixture):
def test_notifies_peer(self):
read_sockets = []
write_sockets = []
def tick():
return select.select(read_sockets, write_sockets, [], 1)
addr = os.path.join(self.dir, "listener")
listener = TestListener(addr)
client = TestClient(addr)
peer = listener.accept(PeerSocket)
watcher = fs_watcher.FSWatcher(self.station.store.object_root)
read_sockets.append(client)
read_sockets.append(watcher)
self.station.write("trolololol")
(sread, _, _) = tick()
self.assertIn(watcher, sread)
obj_name = watcher.read()
client.notify_new_object(self.station, obj_name)
client.send()
peer.recv()
data = peer.packet_queue.pop()
gizmo = self.station.gizmo_factory.hydrate(data, peer)
assert gizmo is not None, "gizmo_factory returned None"
gizmo.process()
watcher.kill()
|
<commit_before><commit_msg>Test that watchers send newobject requests<commit_after>import os
import select
import groundstation.fs_watcher as fs_watcher
from groundstation.peer_socket import PeerSocket
from integration_fixture import StationIntegrationFixture, \
TestListener, \
TestClient
class StationFSWatcherIntegration(StationIntegrationFixture):
def test_notifies_peer(self):
read_sockets = []
write_sockets = []
def tick():
return select.select(read_sockets, write_sockets, [], 1)
addr = os.path.join(self.dir, "listener")
listener = TestListener(addr)
client = TestClient(addr)
peer = listener.accept(PeerSocket)
watcher = fs_watcher.FSWatcher(self.station.store.object_root)
read_sockets.append(client)
read_sockets.append(watcher)
self.station.write("trolololol")
(sread, _, _) = tick()
self.assertIn(watcher, sread)
obj_name = watcher.read()
client.notify_new_object(self.station, obj_name)
client.send()
peer.recv()
data = peer.packet_queue.pop()
gizmo = self.station.gizmo_factory.hydrate(data, peer)
assert gizmo is not None, "gizmo_factory returned None"
gizmo.process()
watcher.kill()
|
|
2937aeda6e30f3bddbd9bee660ee5390ebfebf42
|
app/soc/views/helper/url_patterns.py
|
app/soc/views/helper/url_patterns.py
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for constructing core URL patterns
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from soc.models import linkable
def captureLinkId(name):
"""Returns a capture group for a link id with the specified name.
"""
return r'(?P<%s>%s)' % (name, linkable.LINK_ID_PATTERN_CORE)
def namedLinkIdPattern(names):
"""Returns a link ID pattern consisting of named parts.
The returned pattern does not start or end with a /, the parts are however
concatenated with a /.
Args:
names: The names that should be given to the different parts.
"""
named_patterns = []
for name in names:
named_patterns.append(captureLinkId(name))
return r'/'.join(named_patterns)
USER = namedLinkIdPattern(['link_id'])
|
Add a url_pattern module for soc core views.
|
Add a url_pattern module for soc core views.
This module contains the URL patters for site-wide views like Host
related views.
--HG--
extra : rebase_source : 3b1398c5149664d63f0d84df2040791995461152
|
Python
|
apache-2.0
|
rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son
|
Add a url_pattern module for soc core views.
This module contains the URL patters for site-wide views like Host
related views.
--HG--
extra : rebase_source : 3b1398c5149664d63f0d84df2040791995461152
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for constructing core URL patterns
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from soc.models import linkable
def captureLinkId(name):
"""Returns a capture group for a link id with the specified name.
"""
return r'(?P<%s>%s)' % (name, linkable.LINK_ID_PATTERN_CORE)
def namedLinkIdPattern(names):
"""Returns a link ID pattern consisting of named parts.
The returned pattern does not start or end with a /, the parts are however
concatenated with a /.
Args:
names: The names that should be given to the different parts.
"""
named_patterns = []
for name in names:
named_patterns.append(captureLinkId(name))
return r'/'.join(named_patterns)
USER = namedLinkIdPattern(['link_id'])
|
<commit_before><commit_msg>Add a url_pattern module for soc core views.
This module contains the URL patters for site-wide views like Host
related views.
--HG--
extra : rebase_source : 3b1398c5149664d63f0d84df2040791995461152<commit_after>
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for constructing core URL patterns
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from soc.models import linkable
def captureLinkId(name):
"""Returns a capture group for a link id with the specified name.
"""
return r'(?P<%s>%s)' % (name, linkable.LINK_ID_PATTERN_CORE)
def namedLinkIdPattern(names):
"""Returns a link ID pattern consisting of named parts.
The returned pattern does not start or end with a /, the parts are however
concatenated with a /.
Args:
names: The names that should be given to the different parts.
"""
named_patterns = []
for name in names:
named_patterns.append(captureLinkId(name))
return r'/'.join(named_patterns)
USER = namedLinkIdPattern(['link_id'])
|
Add a url_pattern module for soc core views.
This module contains the URL patters for site-wide views like Host
related views.
--HG--
extra : rebase_source : 3b1398c5149664d63f0d84df2040791995461152#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for constructing core URL patterns
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from soc.models import linkable
def captureLinkId(name):
"""Returns a capture group for a link id with the specified name.
"""
return r'(?P<%s>%s)' % (name, linkable.LINK_ID_PATTERN_CORE)
def namedLinkIdPattern(names):
"""Returns a link ID pattern consisting of named parts.
The returned pattern does not start or end with a /, the parts are however
concatenated with a /.
Args:
names: The names that should be given to the different parts.
"""
named_patterns = []
for name in names:
named_patterns.append(captureLinkId(name))
return r'/'.join(named_patterns)
USER = namedLinkIdPattern(['link_id'])
|
<commit_before><commit_msg>Add a url_pattern module for soc core views.
This module contains the URL patters for site-wide views like Host
related views.
--HG--
extra : rebase_source : 3b1398c5149664d63f0d84df2040791995461152<commit_after>#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for constructing core URL patterns
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from soc.models import linkable
def captureLinkId(name):
"""Returns a capture group for a link id with the specified name.
"""
return r'(?P<%s>%s)' % (name, linkable.LINK_ID_PATTERN_CORE)
def namedLinkIdPattern(names):
"""Returns a link ID pattern consisting of named parts.
The returned pattern does not start or end with a /, the parts are however
concatenated with a /.
Args:
names: The names that should be given to the different parts.
"""
named_patterns = []
for name in names:
named_patterns.append(captureLinkId(name))
return r'/'.join(named_patterns)
USER = namedLinkIdPattern(['link_id'])
|
|
9f9c40a98ea058eec7b586a0e0b87acd1adf50f7
|
scoring/import_bucs.py
|
scoring/import_bucs.py
|
import csv
from django.template.defaultfilters import slugify
from core.models import Bowstyle, Club, Archer
from entries.models import CompetitionEntry, SessionEntry, SessionRound, Competition, TargetAllocation
competition = Competition.objects.get(slug='bucs-indoors-2012')
session_rounds = SessionRound.objects.filter(session__competition=competition)
clubs = set()
total = 0
for session_name, session_round in zip(['A', 'B', 'C'], session_rounds):
file_name = '/home/marc/Desktop/BUSA%s.csv' % session_name
f = open(file_name, 'r')
reader = csv.reader(f)
rows = filter(lambda r: r[1], reader)
rows = [row[:5] for row in rows]
for target, archer, bow, club, exp in rows:
bowstyle = Bowstyle.objects.get(name__iexact=bow)
#if exp[0] not in ['E', 'N']:
# print exp
exp = exp[0]
club = Club.objects.get(short_name__iexact=club.strip() + ' Uni')
# get or create archer
try:
archer = Archer.objects.get(name__iexact=archer, club=club)
except Archer.DoesNotExist:
archer = Archer(name=archer, club=club, novice=exp, age='S', bowstyle=bowstyle, gender='G')
archer.save()
# get or create competition entry
competition_entry, created = CompetitionEntry.objects.get_or_create(competition=competition, archer=archer, bowstyle=bowstyle, novice=exp, age='S', club=club)
session_entry, created = SessionEntry.objects.get_or_create(competition_entry=competition_entry, session_round=session_round)
target_allocation, created = TargetAllocation.objects.get_or_create(session_entry=session_entry, boss=target[:-1], target=target[-1])
print target[:-1], target[-1], archer
#clubs = sorted(list(clubs))
#for club in clubs:
# club = club + ' Uni'
# try:
# club = Club.objects.get(short_name__iexact=club)
# print club.pk, club.name
# continue
# except Club.DoesNotExist:
# full_name = club + 'versity Archery Club'
# abbreviation = ''.join([n[0] for n in full_name.split(' ')])
# club = Club(name=full_name, short_name=club, abbreviation=abbreviation)
# club.clean()
# club.save()
# print club.pk, club.name
#
|
Add a csv import script (specific to BUCS)
|
Add a csv import script (specific to BUCS)
|
Python
|
bsd-3-clause
|
mjtamlyn/archery-scoring,mjtamlyn/archery-scoring,mjtamlyn/archery-scoring,mjtamlyn/archery-scoring,mjtamlyn/archery-scoring
|
Add a csv import script (specific to BUCS)
|
import csv
from django.template.defaultfilters import slugify
from core.models import Bowstyle, Club, Archer
from entries.models import CompetitionEntry, SessionEntry, SessionRound, Competition, TargetAllocation
competition = Competition.objects.get(slug='bucs-indoors-2012')
session_rounds = SessionRound.objects.filter(session__competition=competition)
clubs = set()
total = 0
for session_name, session_round in zip(['A', 'B', 'C'], session_rounds):
file_name = '/home/marc/Desktop/BUSA%s.csv' % session_name
f = open(file_name, 'r')
reader = csv.reader(f)
rows = filter(lambda r: r[1], reader)
rows = [row[:5] for row in rows]
for target, archer, bow, club, exp in rows:
bowstyle = Bowstyle.objects.get(name__iexact=bow)
#if exp[0] not in ['E', 'N']:
# print exp
exp = exp[0]
club = Club.objects.get(short_name__iexact=club.strip() + ' Uni')
# get or create archer
try:
archer = Archer.objects.get(name__iexact=archer, club=club)
except Archer.DoesNotExist:
archer = Archer(name=archer, club=club, novice=exp, age='S', bowstyle=bowstyle, gender='G')
archer.save()
# get or create competition entry
competition_entry, created = CompetitionEntry.objects.get_or_create(competition=competition, archer=archer, bowstyle=bowstyle, novice=exp, age='S', club=club)
session_entry, created = SessionEntry.objects.get_or_create(competition_entry=competition_entry, session_round=session_round)
target_allocation, created = TargetAllocation.objects.get_or_create(session_entry=session_entry, boss=target[:-1], target=target[-1])
print target[:-1], target[-1], archer
#clubs = sorted(list(clubs))
#for club in clubs:
# club = club + ' Uni'
# try:
# club = Club.objects.get(short_name__iexact=club)
# print club.pk, club.name
# continue
# except Club.DoesNotExist:
# full_name = club + 'versity Archery Club'
# abbreviation = ''.join([n[0] for n in full_name.split(' ')])
# club = Club(name=full_name, short_name=club, abbreviation=abbreviation)
# club.clean()
# club.save()
# print club.pk, club.name
#
|
<commit_before><commit_msg>Add a csv import script (specific to BUCS)<commit_after>
|
import csv
from django.template.defaultfilters import slugify
from core.models import Bowstyle, Club, Archer
from entries.models import CompetitionEntry, SessionEntry, SessionRound, Competition, TargetAllocation
competition = Competition.objects.get(slug='bucs-indoors-2012')
session_rounds = SessionRound.objects.filter(session__competition=competition)
clubs = set()
total = 0
for session_name, session_round in zip(['A', 'B', 'C'], session_rounds):
file_name = '/home/marc/Desktop/BUSA%s.csv' % session_name
f = open(file_name, 'r')
reader = csv.reader(f)
rows = filter(lambda r: r[1], reader)
rows = [row[:5] for row in rows]
for target, archer, bow, club, exp in rows:
bowstyle = Bowstyle.objects.get(name__iexact=bow)
#if exp[0] not in ['E', 'N']:
# print exp
exp = exp[0]
club = Club.objects.get(short_name__iexact=club.strip() + ' Uni')
# get or create archer
try:
archer = Archer.objects.get(name__iexact=archer, club=club)
except Archer.DoesNotExist:
archer = Archer(name=archer, club=club, novice=exp, age='S', bowstyle=bowstyle, gender='G')
archer.save()
# get or create competition entry
competition_entry, created = CompetitionEntry.objects.get_or_create(competition=competition, archer=archer, bowstyle=bowstyle, novice=exp, age='S', club=club)
session_entry, created = SessionEntry.objects.get_or_create(competition_entry=competition_entry, session_round=session_round)
target_allocation, created = TargetAllocation.objects.get_or_create(session_entry=session_entry, boss=target[:-1], target=target[-1])
print target[:-1], target[-1], archer
#clubs = sorted(list(clubs))
#for club in clubs:
# club = club + ' Uni'
# try:
# club = Club.objects.get(short_name__iexact=club)
# print club.pk, club.name
# continue
# except Club.DoesNotExist:
# full_name = club + 'versity Archery Club'
# abbreviation = ''.join([n[0] for n in full_name.split(' ')])
# club = Club(name=full_name, short_name=club, abbreviation=abbreviation)
# club.clean()
# club.save()
# print club.pk, club.name
#
|
Add a csv import script (specific to BUCS)import csv
from django.template.defaultfilters import slugify
from core.models import Bowstyle, Club, Archer
from entries.models import CompetitionEntry, SessionEntry, SessionRound, Competition, TargetAllocation
competition = Competition.objects.get(slug='bucs-indoors-2012')
session_rounds = SessionRound.objects.filter(session__competition=competition)
clubs = set()
total = 0
for session_name, session_round in zip(['A', 'B', 'C'], session_rounds):
file_name = '/home/marc/Desktop/BUSA%s.csv' % session_name
f = open(file_name, 'r')
reader = csv.reader(f)
rows = filter(lambda r: r[1], reader)
rows = [row[:5] for row in rows]
for target, archer, bow, club, exp in rows:
bowstyle = Bowstyle.objects.get(name__iexact=bow)
#if exp[0] not in ['E', 'N']:
# print exp
exp = exp[0]
club = Club.objects.get(short_name__iexact=club.strip() + ' Uni')
# get or create archer
try:
archer = Archer.objects.get(name__iexact=archer, club=club)
except Archer.DoesNotExist:
archer = Archer(name=archer, club=club, novice=exp, age='S', bowstyle=bowstyle, gender='G')
archer.save()
# get or create competition entry
competition_entry, created = CompetitionEntry.objects.get_or_create(competition=competition, archer=archer, bowstyle=bowstyle, novice=exp, age='S', club=club)
session_entry, created = SessionEntry.objects.get_or_create(competition_entry=competition_entry, session_round=session_round)
target_allocation, created = TargetAllocation.objects.get_or_create(session_entry=session_entry, boss=target[:-1], target=target[-1])
print target[:-1], target[-1], archer
#clubs = sorted(list(clubs))
#for club in clubs:
# club = club + ' Uni'
# try:
# club = Club.objects.get(short_name__iexact=club)
# print club.pk, club.name
# continue
# except Club.DoesNotExist:
# full_name = club + 'versity Archery Club'
# abbreviation = ''.join([n[0] for n in full_name.split(' ')])
# club = Club(name=full_name, short_name=club, abbreviation=abbreviation)
# club.clean()
# club.save()
# print club.pk, club.name
#
|
<commit_before><commit_msg>Add a csv import script (specific to BUCS)<commit_after>import csv
from django.template.defaultfilters import slugify
from core.models import Bowstyle, Club, Archer
from entries.models import CompetitionEntry, SessionEntry, SessionRound, Competition, TargetAllocation
competition = Competition.objects.get(slug='bucs-indoors-2012')
session_rounds = SessionRound.objects.filter(session__competition=competition)
clubs = set()
total = 0
for session_name, session_round in zip(['A', 'B', 'C'], session_rounds):
file_name = '/home/marc/Desktop/BUSA%s.csv' % session_name
f = open(file_name, 'r')
reader = csv.reader(f)
rows = filter(lambda r: r[1], reader)
rows = [row[:5] for row in rows]
for target, archer, bow, club, exp in rows:
bowstyle = Bowstyle.objects.get(name__iexact=bow)
#if exp[0] not in ['E', 'N']:
# print exp
exp = exp[0]
club = Club.objects.get(short_name__iexact=club.strip() + ' Uni')
# get or create archer
try:
archer = Archer.objects.get(name__iexact=archer, club=club)
except Archer.DoesNotExist:
archer = Archer(name=archer, club=club, novice=exp, age='S', bowstyle=bowstyle, gender='G')
archer.save()
# get or create competition entry
competition_entry, created = CompetitionEntry.objects.get_or_create(competition=competition, archer=archer, bowstyle=bowstyle, novice=exp, age='S', club=club)
session_entry, created = SessionEntry.objects.get_or_create(competition_entry=competition_entry, session_round=session_round)
target_allocation, created = TargetAllocation.objects.get_or_create(session_entry=session_entry, boss=target[:-1], target=target[-1])
print target[:-1], target[-1], archer
#clubs = sorted(list(clubs))
#for club in clubs:
# club = club + ' Uni'
# try:
# club = Club.objects.get(short_name__iexact=club)
# print club.pk, club.name
# continue
# except Club.DoesNotExist:
# full_name = club + 'versity Archery Club'
# abbreviation = ''.join([n[0] for n in full_name.split(' ')])
# club = Club(name=full_name, short_name=club, abbreviation=abbreviation)
# club.clean()
# club.save()
# print club.pk, club.name
#
|
|
704d92ae4a371681254704757a01ab3c57b6b92a
|
oscar/templatetags/currency_filters.py
|
oscar/templatetags/currency_filters.py
|
import locale
from django import template
from django.conf import settings
register = template.Library()
@register.filter(name='currency')
def currency(value):
"""
Return value converted to a locale currency
"""
try:
locale.setlocale(locale.LC_ALL, settings.LOCALE)
except AttributeError:
locale.setlocale(locale.LC_ALL, '')
# We allow the currency symbol to be overridden
symbol = getattr(settings, 'CURRENCY_SYMBOL', None)
try:
if symbol:
return u"%s%s" % (symbol, locale.format("%.2f",
value, grouping=True))
else:
c = locale.currency(value, symbol=True, grouping=True)
return unicode(c, 'utf8')
except TypeError:
return ''
|
import locale
from django import template
from django.conf import settings
register = template.Library()
@register.filter(name='currency')
def currency(value):
"""
Format decimal value as currency
"""
try:
locale.setlocale(locale.LC_ALL, settings.LOCALE)
except AttributeError:
locale.setlocale(locale.LC_ALL, '')
# We allow the currency symbol to be overridden as the version in system
# locales if often not the desired one.
try:
symbol = getattr(settings, 'CURRENCY_SYMBOL', None)
if symbol:
# A custom currency symbol is specified. Check to see if a
# custom format is specified too - this allows the position of the
# currency symbol to be controlled.
format = getattr(
settings, 'CURRENCY_FORMAT', u"%(symbol)s%(value)s")
return format % {
'symbol': symbol,
'value': locale.format("%.2f", value, grouping=True)}
else:
# Use locale's currency format
c = locale.currency(value, symbol=True, grouping=True)
return unicode(c, 'utf8')
except TypeError:
return ''
|
Enhance currency filter to allow format string to be specified
|
Enhance currency filter to allow format string to be specified
This allows the positioning of the currency symbol to be controlled.
Fixes #311
|
Python
|
bsd-3-clause
|
nfletton/django-oscar,Idematica/django-oscar,manevant/django-oscar,nickpack/django-oscar,WillisXChen/django-oscar,ahmetdaglarbas/e-commerce,jinnykoo/christmas,bnprk/django-oscar,makielab/django-oscar,monikasulik/django-oscar,eddiep1101/django-oscar,pdonadeo/django-oscar,jinnykoo/christmas,bnprk/django-oscar,josesanch/django-oscar,itbabu/django-oscar,Bogh/django-oscar,solarissmoke/django-oscar,dongguangming/django-oscar,mexeniz/django-oscar,sasha0/django-oscar,jinnykoo/wuyisj.com,WadeYuChen/django-oscar,anentropic/django-oscar,john-parton/django-oscar,nfletton/django-oscar,jmt4/django-oscar,kapt/django-oscar,binarydud/django-oscar,ka7eh/django-oscar,mexeniz/django-oscar,saadatqadri/django-oscar,pdonadeo/django-oscar,sonofatailor/django-oscar,jinnykoo/wuyisj,dongguangming/django-oscar,thechampanurag/django-oscar,WadeYuChen/django-oscar,machtfit/django-oscar,ahmetdaglarbas/e-commerce,jlmadurga/django-oscar,ademuk/django-oscar,ademuk/django-oscar,QLGu/django-oscar,pasqualguerrero/django-oscar,solarissmoke/django-oscar,jinnykoo/christmas,jinnykoo/wuyisj.com,adamend/django-oscar,makielab/django-oscar,dongguangming/django-oscar,WillisXChen/django-oscar,jinnykoo/wuyisj,sasha0/django-oscar,taedori81/django-oscar,lijoantony/django-oscar,adamend/django-oscar,okfish/django-oscar,michaelkuty/django-oscar,jinnykoo/wuyisj,solarissmoke/django-oscar,kapt/django-oscar,nickpack/django-oscar,jmt4/django-oscar,Jannes123/django-oscar,lijoantony/django-oscar,django-oscar/django-oscar,nickpack/django-oscar,ahmetdaglarbas/e-commerce,jinnykoo/wuyisj,saadatqadri/django-oscar,MatthewWilkes/django-oscar,sonofatailor/django-oscar,ademuk/django-oscar,makielab/django-oscar,ka7eh/django-oscar,adamend/django-oscar,QLGu/django-oscar,manevant/django-oscar,bschuon/django-oscar,kapt/django-oscar,eddiep1101/django-oscar,adamend/django-oscar,jlmadurga/django-oscar,taedori81/django-oscar,jinnykoo/wuyisj.com,bschuon/django-oscar,vovanbo/django-oscar,taedori81/django-oscar,ka7eh/django-oscar,saadatqadri/django-oscar,binarydud/django-oscar,monikasulik/django-oscar,kapari/django-oscar,mexeniz/django-oscar,jlmadurga/django-oscar,makielab/django-oscar,DrOctogon/unwash_ecom,jmt4/django-oscar,machtfit/django-oscar,django-oscar/django-oscar,michaelkuty/django-oscar,DrOctogon/unwash_ecom,ademuk/django-oscar,sasha0/django-oscar,bschuon/django-oscar,thechampanurag/django-oscar,monikasulik/django-oscar,nfletton/django-oscar,ka7eh/django-oscar,bschuon/django-oscar,pasqualguerrero/django-oscar,Jannes123/django-oscar,okfish/django-oscar,rocopartners/django-oscar,django-oscar/django-oscar,Idematica/django-oscar,marcoantoniooliveira/labweb,itbabu/django-oscar,rocopartners/django-oscar,marcoantoniooliveira/labweb,Bogh/django-oscar,Idematica/django-oscar,john-parton/django-oscar,elliotthill/django-oscar,spartonia/django-oscar,sasha0/django-oscar,michaelkuty/django-oscar,manevant/django-oscar,DrOctogon/unwash_ecom,Jannes123/django-oscar,spartonia/django-oscar,machtfit/django-oscar,WillisXChen/django-oscar,eddiep1101/django-oscar,faratro/django-oscar,rocopartners/django-oscar,bnprk/django-oscar,nickpack/django-oscar,vovanbo/django-oscar,kapari/django-oscar,pdonadeo/django-oscar,elliotthill/django-oscar,Jannes123/django-oscar,ahmetdaglarbas/e-commerce,WadeYuChen/django-oscar,josesanch/django-oscar,itbabu/django-oscar,binarydud/django-oscar,kapari/django-oscar,jmt4/django-oscar,taedori81/django-oscar,nfletton/django-oscar,thechampanurag/django-oscar,marcoantoniooliveira/labweb,spartonia/django-oscar,Bogh/django-oscar,mexeniz/django-oscar,eddiep1101/django-oscar,WillisXChen/django-oscar,josesanch/django-oscar,manevant/django-oscar,john-parton/django-oscar,rocopartners/django-oscar,anentropic/django-oscar,WadeYuChen/django-oscar,faratro/django-oscar,pasqualguerrero/django-oscar,faratro/django-oscar,binarydud/django-oscar,faratro/django-oscar,QLGu/django-oscar,okfish/django-oscar,anentropic/django-oscar,WillisXChen/django-oscar,QLGu/django-oscar,michaelkuty/django-oscar,jlmadurga/django-oscar,vovanbo/django-oscar,jinnykoo/wuyisj.com,amirrpp/django-oscar,bnprk/django-oscar,john-parton/django-oscar,amirrpp/django-oscar,sonofatailor/django-oscar,amirrpp/django-oscar,thechampanurag/django-oscar,marcoantoniooliveira/labweb,dongguangming/django-oscar,lijoantony/django-oscar,itbabu/django-oscar,MatthewWilkes/django-oscar,vovanbo/django-oscar,okfish/django-oscar,monikasulik/django-oscar,Bogh/django-oscar,MatthewWilkes/django-oscar,saadatqadri/django-oscar,sonofatailor/django-oscar,anentropic/django-oscar,WillisXChen/django-oscar,pdonadeo/django-oscar,pasqualguerrero/django-oscar,MatthewWilkes/django-oscar,solarissmoke/django-oscar,lijoantony/django-oscar,elliotthill/django-oscar,django-oscar/django-oscar,kapari/django-oscar,spartonia/django-oscar,amirrpp/django-oscar
|
import locale
from django import template
from django.conf import settings
register = template.Library()
@register.filter(name='currency')
def currency(value):
"""
Return value converted to a locale currency
"""
try:
locale.setlocale(locale.LC_ALL, settings.LOCALE)
except AttributeError:
locale.setlocale(locale.LC_ALL, '')
# We allow the currency symbol to be overridden
symbol = getattr(settings, 'CURRENCY_SYMBOL', None)
try:
if symbol:
return u"%s%s" % (symbol, locale.format("%.2f",
value, grouping=True))
else:
c = locale.currency(value, symbol=True, grouping=True)
return unicode(c, 'utf8')
except TypeError:
return ''
Enhance currency filter to allow format string to be specified
This allows the positioning of the currency symbol to be controlled.
Fixes #311
|
import locale
from django import template
from django.conf import settings
register = template.Library()
@register.filter(name='currency')
def currency(value):
"""
Format decimal value as currency
"""
try:
locale.setlocale(locale.LC_ALL, settings.LOCALE)
except AttributeError:
locale.setlocale(locale.LC_ALL, '')
# We allow the currency symbol to be overridden as the version in system
# locales if often not the desired one.
try:
symbol = getattr(settings, 'CURRENCY_SYMBOL', None)
if symbol:
# A custom currency symbol is specified. Check to see if a
# custom format is specified too - this allows the position of the
# currency symbol to be controlled.
format = getattr(
settings, 'CURRENCY_FORMAT', u"%(symbol)s%(value)s")
return format % {
'symbol': symbol,
'value': locale.format("%.2f", value, grouping=True)}
else:
# Use locale's currency format
c = locale.currency(value, symbol=True, grouping=True)
return unicode(c, 'utf8')
except TypeError:
return ''
|
<commit_before>import locale
from django import template
from django.conf import settings
register = template.Library()
@register.filter(name='currency')
def currency(value):
"""
Return value converted to a locale currency
"""
try:
locale.setlocale(locale.LC_ALL, settings.LOCALE)
except AttributeError:
locale.setlocale(locale.LC_ALL, '')
# We allow the currency symbol to be overridden
symbol = getattr(settings, 'CURRENCY_SYMBOL', None)
try:
if symbol:
return u"%s%s" % (symbol, locale.format("%.2f",
value, grouping=True))
else:
c = locale.currency(value, symbol=True, grouping=True)
return unicode(c, 'utf8')
except TypeError:
return ''
<commit_msg>Enhance currency filter to allow format string to be specified
This allows the positioning of the currency symbol to be controlled.
Fixes #311<commit_after>
|
import locale
from django import template
from django.conf import settings
register = template.Library()
@register.filter(name='currency')
def currency(value):
"""
Format decimal value as currency
"""
try:
locale.setlocale(locale.LC_ALL, settings.LOCALE)
except AttributeError:
locale.setlocale(locale.LC_ALL, '')
# We allow the currency symbol to be overridden as the version in system
# locales if often not the desired one.
try:
symbol = getattr(settings, 'CURRENCY_SYMBOL', None)
if symbol:
# A custom currency symbol is specified. Check to see if a
# custom format is specified too - this allows the position of the
# currency symbol to be controlled.
format = getattr(
settings, 'CURRENCY_FORMAT', u"%(symbol)s%(value)s")
return format % {
'symbol': symbol,
'value': locale.format("%.2f", value, grouping=True)}
else:
# Use locale's currency format
c = locale.currency(value, symbol=True, grouping=True)
return unicode(c, 'utf8')
except TypeError:
return ''
|
import locale
from django import template
from django.conf import settings
register = template.Library()
@register.filter(name='currency')
def currency(value):
"""
Return value converted to a locale currency
"""
try:
locale.setlocale(locale.LC_ALL, settings.LOCALE)
except AttributeError:
locale.setlocale(locale.LC_ALL, '')
# We allow the currency symbol to be overridden
symbol = getattr(settings, 'CURRENCY_SYMBOL', None)
try:
if symbol:
return u"%s%s" % (symbol, locale.format("%.2f",
value, grouping=True))
else:
c = locale.currency(value, symbol=True, grouping=True)
return unicode(c, 'utf8')
except TypeError:
return ''
Enhance currency filter to allow format string to be specified
This allows the positioning of the currency symbol to be controlled.
Fixes #311import locale
from django import template
from django.conf import settings
register = template.Library()
@register.filter(name='currency')
def currency(value):
"""
Format decimal value as currency
"""
try:
locale.setlocale(locale.LC_ALL, settings.LOCALE)
except AttributeError:
locale.setlocale(locale.LC_ALL, '')
# We allow the currency symbol to be overridden as the version in system
# locales if often not the desired one.
try:
symbol = getattr(settings, 'CURRENCY_SYMBOL', None)
if symbol:
# A custom currency symbol is specified. Check to see if a
# custom format is specified too - this allows the position of the
# currency symbol to be controlled.
format = getattr(
settings, 'CURRENCY_FORMAT', u"%(symbol)s%(value)s")
return format % {
'symbol': symbol,
'value': locale.format("%.2f", value, grouping=True)}
else:
# Use locale's currency format
c = locale.currency(value, symbol=True, grouping=True)
return unicode(c, 'utf8')
except TypeError:
return ''
|
<commit_before>import locale
from django import template
from django.conf import settings
register = template.Library()
@register.filter(name='currency')
def currency(value):
"""
Return value converted to a locale currency
"""
try:
locale.setlocale(locale.LC_ALL, settings.LOCALE)
except AttributeError:
locale.setlocale(locale.LC_ALL, '')
# We allow the currency symbol to be overridden
symbol = getattr(settings, 'CURRENCY_SYMBOL', None)
try:
if symbol:
return u"%s%s" % (symbol, locale.format("%.2f",
value, grouping=True))
else:
c = locale.currency(value, symbol=True, grouping=True)
return unicode(c, 'utf8')
except TypeError:
return ''
<commit_msg>Enhance currency filter to allow format string to be specified
This allows the positioning of the currency symbol to be controlled.
Fixes #311<commit_after>import locale
from django import template
from django.conf import settings
register = template.Library()
@register.filter(name='currency')
def currency(value):
"""
Format decimal value as currency
"""
try:
locale.setlocale(locale.LC_ALL, settings.LOCALE)
except AttributeError:
locale.setlocale(locale.LC_ALL, '')
# We allow the currency symbol to be overridden as the version in system
# locales if often not the desired one.
try:
symbol = getattr(settings, 'CURRENCY_SYMBOL', None)
if symbol:
# A custom currency symbol is specified. Check to see if a
# custom format is specified too - this allows the position of the
# currency symbol to be controlled.
format = getattr(
settings, 'CURRENCY_FORMAT', u"%(symbol)s%(value)s")
return format % {
'symbol': symbol,
'value': locale.format("%.2f", value, grouping=True)}
else:
# Use locale's currency format
c = locale.currency(value, symbol=True, grouping=True)
return unicode(c, 'utf8')
except TypeError:
return ''
|
4395f0b6e708e7a18f1d25b0d897782e2df86d1c
|
migrations/versions/2439eea5b23c_.py
|
migrations/versions/2439eea5b23c_.py
|
"""empty message
Revision ID: 2439eea5b23c
Revises: 577ad345788e
Create Date: 2015-11-23 14:04:45.572638
"""
# revision identifiers, used by Alembic.
revision = '2439eea5b23c'
down_revision = '577ad345788e'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('questionnaire_responses',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('document', postgresql.JSONB(), nullable=True),
sa.Column('status', postgresql.ENUM('in-progress', 'completed', name='questionnaire_response_statuses'), nullable=True),
sa.Column('authored', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('questionnaire_responses')
### end Alembic commands ###
|
Add database migration for QuestionnaireResponse model
|
Add database migration for QuestionnaireResponse model
|
Python
|
bsd-3-clause
|
uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal
|
Add database migration for QuestionnaireResponse model
|
"""empty message
Revision ID: 2439eea5b23c
Revises: 577ad345788e
Create Date: 2015-11-23 14:04:45.572638
"""
# revision identifiers, used by Alembic.
revision = '2439eea5b23c'
down_revision = '577ad345788e'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('questionnaire_responses',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('document', postgresql.JSONB(), nullable=True),
sa.Column('status', postgresql.ENUM('in-progress', 'completed', name='questionnaire_response_statuses'), nullable=True),
sa.Column('authored', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('questionnaire_responses')
### end Alembic commands ###
|
<commit_before><commit_msg>Add database migration for QuestionnaireResponse model<commit_after>
|
"""empty message
Revision ID: 2439eea5b23c
Revises: 577ad345788e
Create Date: 2015-11-23 14:04:45.572638
"""
# revision identifiers, used by Alembic.
revision = '2439eea5b23c'
down_revision = '577ad345788e'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('questionnaire_responses',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('document', postgresql.JSONB(), nullable=True),
sa.Column('status', postgresql.ENUM('in-progress', 'completed', name='questionnaire_response_statuses'), nullable=True),
sa.Column('authored', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('questionnaire_responses')
### end Alembic commands ###
|
Add database migration for QuestionnaireResponse model"""empty message
Revision ID: 2439eea5b23c
Revises: 577ad345788e
Create Date: 2015-11-23 14:04:45.572638
"""
# revision identifiers, used by Alembic.
revision = '2439eea5b23c'
down_revision = '577ad345788e'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('questionnaire_responses',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('document', postgresql.JSONB(), nullable=True),
sa.Column('status', postgresql.ENUM('in-progress', 'completed', name='questionnaire_response_statuses'), nullable=True),
sa.Column('authored', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('questionnaire_responses')
### end Alembic commands ###
|
<commit_before><commit_msg>Add database migration for QuestionnaireResponse model<commit_after>"""empty message
Revision ID: 2439eea5b23c
Revises: 577ad345788e
Create Date: 2015-11-23 14:04:45.572638
"""
# revision identifiers, used by Alembic.
revision = '2439eea5b23c'
down_revision = '577ad345788e'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('questionnaire_responses',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('document', postgresql.JSONB(), nullable=True),
sa.Column('status', postgresql.ENUM('in-progress', 'completed', name='questionnaire_response_statuses'), nullable=True),
sa.Column('authored', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('questionnaire_responses')
### end Alembic commands ###
|
|
b7f3b2411085bf507d52888bc7f99f07c170a462
|
core/migrations/0048_helplink.py
|
core/migrations/0048_helplink.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0047_add_email_template'),
]
operations = [
migrations.CreateModel(
name='HelpLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('link_key', models.CharField(max_length=256)),
('topic', models.CharField(max_length=256)),
('context', models.TextField(default=b'', null=True, blank=True)),
('href', models.TextField()),
('created_date', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
],
),
]
|
Add 0048 - HelpLink migration
|
Add 0048 - HelpLink migration
|
Python
|
apache-2.0
|
CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend
|
Add 0048 - HelpLink migration
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0047_add_email_template'),
]
operations = [
migrations.CreateModel(
name='HelpLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('link_key', models.CharField(max_length=256)),
('topic', models.CharField(max_length=256)),
('context', models.TextField(default=b'', null=True, blank=True)),
('href', models.TextField()),
('created_date', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
],
),
]
|
<commit_before><commit_msg>Add 0048 - HelpLink migration<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0047_add_email_template'),
]
operations = [
migrations.CreateModel(
name='HelpLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('link_key', models.CharField(max_length=256)),
('topic', models.CharField(max_length=256)),
('context', models.TextField(default=b'', null=True, blank=True)),
('href', models.TextField()),
('created_date', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
],
),
]
|
Add 0048 - HelpLink migration# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0047_add_email_template'),
]
operations = [
migrations.CreateModel(
name='HelpLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('link_key', models.CharField(max_length=256)),
('topic', models.CharField(max_length=256)),
('context', models.TextField(default=b'', null=True, blank=True)),
('href', models.TextField()),
('created_date', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
],
),
]
|
<commit_before><commit_msg>Add 0048 - HelpLink migration<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0047_add_email_template'),
]
operations = [
migrations.CreateModel(
name='HelpLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('link_key', models.CharField(max_length=256)),
('topic', models.CharField(max_length=256)),
('context', models.TextField(default=b'', null=True, blank=True)),
('href', models.TextField()),
('created_date', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
],
),
]
|
|
3fedc491c93f96759ee18fc8a6b212b3f788a307
|
python/gaming-laptop-battery-life.py
|
python/gaming-laptop-battery-life.py
|
# Problem Solving (Basic) Skill Verification Test
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'getBattery' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY events as parameter.
#
def getBattery(events):
battery_level = 50
while events != []:
battery_level = perform_event(events.pop(0), battery_level)
return battery_level
def perform_event(event, battery_level):
potential_battery = battery_level + event
if potential_battery >= 100:
return 100
elif potential_battery <= 0:
return 0
else:
return potential_battery
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
events_count = int(input().strip())
events = []
for _ in range(events_count):
events_item = int(input().strip())
events.append(events_item)
result = getBattery(events)
fptr.write(str(result) + '\n')
fptr.close()
|
Solve gaming laptop battery life
|
Solve gaming laptop battery life
|
Python
|
mit
|
rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank
|
Solve gaming laptop battery life
|
# Problem Solving (Basic) Skill Verification Test
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'getBattery' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY events as parameter.
#
def getBattery(events):
battery_level = 50
while events != []:
battery_level = perform_event(events.pop(0), battery_level)
return battery_level
def perform_event(event, battery_level):
potential_battery = battery_level + event
if potential_battery >= 100:
return 100
elif potential_battery <= 0:
return 0
else:
return potential_battery
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
events_count = int(input().strip())
events = []
for _ in range(events_count):
events_item = int(input().strip())
events.append(events_item)
result = getBattery(events)
fptr.write(str(result) + '\n')
fptr.close()
|
<commit_before><commit_msg>Solve gaming laptop battery life<commit_after>
|
# Problem Solving (Basic) Skill Verification Test
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'getBattery' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY events as parameter.
#
def getBattery(events):
battery_level = 50
while events != []:
battery_level = perform_event(events.pop(0), battery_level)
return battery_level
def perform_event(event, battery_level):
potential_battery = battery_level + event
if potential_battery >= 100:
return 100
elif potential_battery <= 0:
return 0
else:
return potential_battery
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
events_count = int(input().strip())
events = []
for _ in range(events_count):
events_item = int(input().strip())
events.append(events_item)
result = getBattery(events)
fptr.write(str(result) + '\n')
fptr.close()
|
Solve gaming laptop battery life# Problem Solving (Basic) Skill Verification Test
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'getBattery' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY events as parameter.
#
def getBattery(events):
battery_level = 50
while events != []:
battery_level = perform_event(events.pop(0), battery_level)
return battery_level
def perform_event(event, battery_level):
potential_battery = battery_level + event
if potential_battery >= 100:
return 100
elif potential_battery <= 0:
return 0
else:
return potential_battery
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
events_count = int(input().strip())
events = []
for _ in range(events_count):
events_item = int(input().strip())
events.append(events_item)
result = getBattery(events)
fptr.write(str(result) + '\n')
fptr.close()
|
<commit_before><commit_msg>Solve gaming laptop battery life<commit_after># Problem Solving (Basic) Skill Verification Test
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'getBattery' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY events as parameter.
#
def getBattery(events):
battery_level = 50
while events != []:
battery_level = perform_event(events.pop(0), battery_level)
return battery_level
def perform_event(event, battery_level):
potential_battery = battery_level + event
if potential_battery >= 100:
return 100
elif potential_battery <= 0:
return 0
else:
return potential_battery
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
events_count = int(input().strip())
events = []
for _ in range(events_count):
events_item = int(input().strip())
events.append(events_item)
result = getBattery(events)
fptr.write(str(result) + '\n')
fptr.close()
|
|
7b2386f844abf8b2dcad6b540e88e065a5c091c8
|
scripts/consistency/fix_is_claimed.py
|
scripts/consistency/fix_is_claimed.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to migrate nodes with invalid categories."""
import sys
import logging
from modularodm import Q
from website.app import init_app
from website.models import User
from scripts import utils as script_utils
logger = logging.getLogger('fix_is_claimed')
def main(dry=True):
init_app(set_backends=True, routes=False)
count = 0
for user in User.find(Q('is_claimed', 'eq', None)):
is_claimed = bool(user.date_confirmed)
logger.info('User {}: setting is_claimed to {}'.format(user._id, is_claimed))
user.is_claimed = is_claimed
count += 1
if not dry:
user.save()
logger.info('Migrated {} users.'.format(count))
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
Add migration script to migrate users whose is_claimed field is None
|
Add migration script to migrate users whose is_claimed field is None
h/t @icereval
[skip ci]
|
Python
|
apache-2.0
|
cwisecarver/osf.io,RomanZWang/osf.io,kch8qx/osf.io,samchrisinger/osf.io,DanielSBrown/osf.io,billyhunt/osf.io,samchrisinger/osf.io,danielneis/osf.io,samchrisinger/osf.io,erinspace/osf.io,HarryRybacki/osf.io,caseyrollins/osf.io,saradbowman/osf.io,billyhunt/osf.io,SSJohns/osf.io,saradbowman/osf.io,petermalcolm/osf.io,fabianvf/osf.io,mfraezz/osf.io,asanfilippo7/osf.io,jmcarp/osf.io,sloria/osf.io,TomHeatwole/osf.io,mluke93/osf.io,TomBaxter/osf.io,dplorimer/osf,KAsante95/osf.io,crcresearch/osf.io,MerlinZhang/osf.io,icereval/osf.io,chennan47/osf.io,petermalcolm/osf.io,brandonPurvis/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,danielneis/osf.io,pattisdr/osf.io,samanehsan/osf.io,lyndsysimon/osf.io,rdhyee/osf.io,ZobairAlijan/osf.io,dplorimer/osf,caseyrygt/osf.io,jolene-esposito/osf.io,reinaH/osf.io,zachjanicki/osf.io,asanfilippo7/osf.io,aaxelb/osf.io,bdyetton/prettychart,HarryRybacki/osf.io,CenterForOpenScience/osf.io,SSJohns/osf.io,amyshi188/osf.io,petermalcolm/osf.io,brandonPurvis/osf.io,adlius/osf.io,jeffreyliu3230/osf.io,caseyrygt/osf.io,zamattiac/osf.io,emetsger/osf.io,bdyetton/prettychart,njantrania/osf.io,amyshi188/osf.io,samanehsan/osf.io,haoyuchen1992/osf.io,ZobairAlijan/osf.io,zachjanicki/osf.io,felliott/osf.io,Nesiehr/osf.io,brianjgeiger/osf.io,CenterForOpenScience/osf.io,njantrania/osf.io,cldershem/osf.io,bdyetton/prettychart,hmoco/osf.io,RomanZWang/osf.io,cwisecarver/osf.io,brianjgeiger/osf.io,TomHeatwole/osf.io,njantrania/osf.io,fabianvf/osf.io,cslzchen/osf.io,billyhunt/osf.io,emetsger/osf.io,cosenal/osf.io,asanfilippo7/osf.io,laurenrevere/osf.io,wearpants/osf.io,cosenal/osf.io,doublebits/osf.io,aaxelb/osf.io,alexschiller/osf.io,erinspace/osf.io,fabianvf/osf.io,icereval/osf.io,RomanZWang/osf.io,hmoco/osf.io,KAsante95/osf.io,Johnetordoff/osf.io,mluo613/osf.io,Nesiehr/osf.io,TomHeatwole/osf.io,jmcarp/osf.io,rdhyee/osf.io,DanielSBrown/osf.io,caneruguz/osf.io,arpitar/osf.io,leb2dg/osf.io,doublebits/osf.io,danielneis/osf.io,jmcarp/osf.io,wearpants/osf.io,jmcarp/osf.io,pattisdr/osf.io,haoyuchen1992/osf.io,crcresearch/osf.io,pattisdr/osf.io,baylee-d/osf.io,reinaH/osf.io,mfraezz/osf.io,amyshi188/osf.io,ticklemepierce/osf.io,acshi/osf.io,billyhunt/osf.io,chennan47/osf.io,reinaH/osf.io,caneruguz/osf.io,HarryRybacki/osf.io,brandonPurvis/osf.io,cslzchen/osf.io,monikagrabowska/osf.io,TomBaxter/osf.io,acshi/osf.io,leb2dg/osf.io,CenterForOpenScience/osf.io,petermalcolm/osf.io,erinspace/osf.io,mluke93/osf.io,ckc6cz/osf.io,jeffreyliu3230/osf.io,Nesiehr/osf.io,GageGaskins/osf.io,emetsger/osf.io,mluo613/osf.io,mluke93/osf.io,HalcyonChimera/osf.io,cldershem/osf.io,crcresearch/osf.io,samanehsan/osf.io,lyndsysimon/osf.io,aaxelb/osf.io,brianjgeiger/osf.io,hmoco/osf.io,bdyetton/prettychart,jolene-esposito/osf.io,monikagrabowska/osf.io,cosenal/osf.io,aaxelb/osf.io,jnayak1/osf.io,leb2dg/osf.io,brandonPurvis/osf.io,baylee-d/osf.io,Ghalko/osf.io,arpitar/osf.io,GageGaskins/osf.io,jolene-esposito/osf.io,binoculars/osf.io,binoculars/osf.io,monikagrabowska/osf.io,ZobairAlijan/osf.io,HalcyonChimera/osf.io,GageGaskins/osf.io,mluke93/osf.io,samchrisinger/osf.io,RomanZWang/osf.io,HarryRybacki/osf.io,acshi/osf.io,felliott/osf.io,lyndsysimon/osf.io,leb2dg/osf.io,zamattiac/osf.io,jnayak1/osf.io,mattclark/osf.io,mluo613/osf.io,TomHeatwole/osf.io,alexschiller/osf.io,chrisseto/osf.io,mattclark/osf.io,caseyrollins/osf.io,haoyuchen1992/osf.io,acshi/osf.io,ticklemepierce/osf.io,jnayak1/osf.io,ckc6cz/osf.io,alexschiller/osf.io,zachjanicki/osf.io,MerlinZhang/osf.io,abought/osf.io,laurenrevere/osf.io,adlius/osf.io,brianjgeiger/osf.io,monikagrabowska/osf.io,lyndsysimon/osf.io,doublebits/osf.io,DanielSBrown/osf.io,binoculars/osf.io,rdhyee/osf.io,mfraezz/osf.io,icereval/osf.io,sbt9uc/osf.io,samanehsan/osf.io,doublebits/osf.io,njantrania/osf.io,alexschiller/osf.io,Ghalko/osf.io,Ghalko/osf.io,emetsger/osf.io,arpitar/osf.io,Nesiehr/osf.io,Johnetordoff/osf.io,sloria/osf.io,cslzchen/osf.io,zachjanicki/osf.io,mfraezz/osf.io,mluo613/osf.io,jnayak1/osf.io,asanfilippo7/osf.io,CenterForOpenScience/osf.io,TomBaxter/osf.io,cldershem/osf.io,kwierman/osf.io,adlius/osf.io,fabianvf/osf.io,abought/osf.io,kch8qx/osf.io,cosenal/osf.io,zamattiac/osf.io,ticklemepierce/osf.io,abought/osf.io,SSJohns/osf.io,jolene-esposito/osf.io,doublebits/osf.io,wearpants/osf.io,sbt9uc/osf.io,ticklemepierce/osf.io,adlius/osf.io,baylee-d/osf.io,cwisecarver/osf.io,sbt9uc/osf.io,KAsante95/osf.io,RomanZWang/osf.io,chrisseto/osf.io,caneruguz/osf.io,zamattiac/osf.io,kch8qx/osf.io,kch8qx/osf.io,felliott/osf.io,danielneis/osf.io,wearpants/osf.io,rdhyee/osf.io,ckc6cz/osf.io,arpitar/osf.io,felliott/osf.io,mluo613/osf.io,jeffreyliu3230/osf.io,hmoco/osf.io,billyhunt/osf.io,kwierman/osf.io,kwierman/osf.io,dplorimer/osf,KAsante95/osf.io,chennan47/osf.io,chrisseto/osf.io,laurenrevere/osf.io,haoyuchen1992/osf.io,Ghalko/osf.io,KAsante95/osf.io,caneruguz/osf.io,MerlinZhang/osf.io,SSJohns/osf.io,acshi/osf.io,caseyrygt/osf.io,Johnetordoff/osf.io,jeffreyliu3230/osf.io,GageGaskins/osf.io,kch8qx/osf.io,cslzchen/osf.io,MerlinZhang/osf.io,alexschiller/osf.io,reinaH/osf.io,HalcyonChimera/osf.io,monikagrabowska/osf.io,cldershem/osf.io,mattclark/osf.io,ZobairAlijan/osf.io,DanielSBrown/osf.io,amyshi188/osf.io,sbt9uc/osf.io,chrisseto/osf.io,GageGaskins/osf.io,cwisecarver/osf.io,brandonPurvis/osf.io,caseyrollins/osf.io,caseyrygt/osf.io,dplorimer/osf,sloria/osf.io,ckc6cz/osf.io,kwierman/osf.io,abought/osf.io
|
Add migration script to migrate users whose is_claimed field is None
h/t @icereval
[skip ci]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to migrate nodes with invalid categories."""
import sys
import logging
from modularodm import Q
from website.app import init_app
from website.models import User
from scripts import utils as script_utils
logger = logging.getLogger('fix_is_claimed')
def main(dry=True):
init_app(set_backends=True, routes=False)
count = 0
for user in User.find(Q('is_claimed', 'eq', None)):
is_claimed = bool(user.date_confirmed)
logger.info('User {}: setting is_claimed to {}'.format(user._id, is_claimed))
user.is_claimed = is_claimed
count += 1
if not dry:
user.save()
logger.info('Migrated {} users.'.format(count))
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
<commit_before><commit_msg>Add migration script to migrate users whose is_claimed field is None
h/t @icereval
[skip ci]<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to migrate nodes with invalid categories."""
import sys
import logging
from modularodm import Q
from website.app import init_app
from website.models import User
from scripts import utils as script_utils
logger = logging.getLogger('fix_is_claimed')
def main(dry=True):
init_app(set_backends=True, routes=False)
count = 0
for user in User.find(Q('is_claimed', 'eq', None)):
is_claimed = bool(user.date_confirmed)
logger.info('User {}: setting is_claimed to {}'.format(user._id, is_claimed))
user.is_claimed = is_claimed
count += 1
if not dry:
user.save()
logger.info('Migrated {} users.'.format(count))
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
Add migration script to migrate users whose is_claimed field is None
h/t @icereval
[skip ci]#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to migrate nodes with invalid categories."""
import sys
import logging
from modularodm import Q
from website.app import init_app
from website.models import User
from scripts import utils as script_utils
logger = logging.getLogger('fix_is_claimed')
def main(dry=True):
init_app(set_backends=True, routes=False)
count = 0
for user in User.find(Q('is_claimed', 'eq', None)):
is_claimed = bool(user.date_confirmed)
logger.info('User {}: setting is_claimed to {}'.format(user._id, is_claimed))
user.is_claimed = is_claimed
count += 1
if not dry:
user.save()
logger.info('Migrated {} users.'.format(count))
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
<commit_before><commit_msg>Add migration script to migrate users whose is_claimed field is None
h/t @icereval
[skip ci]<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to migrate nodes with invalid categories."""
import sys
import logging
from modularodm import Q
from website.app import init_app
from website.models import User
from scripts import utils as script_utils
logger = logging.getLogger('fix_is_claimed')
def main(dry=True):
init_app(set_backends=True, routes=False)
count = 0
for user in User.find(Q('is_claimed', 'eq', None)):
is_claimed = bool(user.date_confirmed)
logger.info('User {}: setting is_claimed to {}'.format(user._id, is_claimed))
user.is_claimed = is_claimed
count += 1
if not dry:
user.save()
logger.info('Migrated {} users.'.format(count))
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
|
b1482368b19e75f1ac679ba398af31319f8beb6d
|
demos/benchmark/gen_benchmark.py
|
demos/benchmark/gen_benchmark.py
|
#!/usr/bin/env python
#
# A simple benchmark of the tornado.gen module.
# Runs in two modes, testing new-style (@coroutine and Futures)
# and old-style (@engine and Tasks) coroutines.
from timeit import Timer
from tornado import gen
from tornado.options import options, define, parse_command_line
define('num', default=10000, help='number of iterations')
# These benchmarks are delicate. They hit various fast-paths in the gen
# machinery in order to stay synchronous so we don't need an IOLoop.
# This removes noise from the results, but it's easy to change things
# in a way that completely invalidates the results.
@gen.engine
def e2(callback):
callback()
@gen.engine
def e1():
for i in range(10):
yield gen.Task(e2)
@gen.coroutine
def c2():
pass
@gen.coroutine
def c1():
for i in range(10):
yield c2()
def main():
parse_command_line()
t = Timer(e1)
results = t.timeit(options.num) / options.num
print 'engine: %0.3f ms per iteration' % (results * 1000)
t = Timer(c1)
results = t.timeit(options.num) / options.num
print 'coroutine: %0.3f ms per iteration' % (results * 1000)
if __name__ == '__main__':
main()
|
Add a benchmark for coroutine processing.
|
Add a benchmark for coroutine processing.
|
Python
|
apache-2.0
|
ZhuPeng/tornado,gwillem/tornado,wechasing/tornado,andyaguiar/tornado,Polyconseil/tornado,kevinge314gh/tornado,insflow/tornado,ZhuPeng/tornado,icejoywoo/tornado,Polyconseil/tornado,pombredanne/tornado,hzruandd/tornado,ajdavis/tornado,kaushik94/tornado,sunjeammy/tornado,jarrahwu/tornado,kevinge314gh/tornado,leekchan/tornado_test,QuanZag/tornado,ZhuPeng/tornado,ifduyue/tornado,0xkag/tornado,304471720/tornado,ydaniv/tornado,legnaleurc/tornado,drewmiller/tornado,nordaux/tornado,lsanotes/tornado,ZhuPeng/tornado,whip112/tornado,mehmetkose/tornado,futurechallenger/tornado,mlyundin/tornado,Callwoola/tornado,Windsooon/tornado,jsjohnst/tornado,takeshineshiro/tornado,kangbiao/tornado,304471720/tornado,felixonmars/tornado,dsseter/tornado,ColorFuzzy/tornado,0x73/tornado,anandology/tornado,bywbilly/tornado,ListFranz/tornado,ms7s/tornado,dongpinglai/my_tornado,icejoywoo/tornado,sevenguin/tornado,codeb2cc/tornado,dsseter/tornado,codecov/tornado,Acidburn0zzz/tornado,anandology/tornado,felixonmars/tornado,leekchan/tornado_test,Drooids/tornado,arthurdarcet/tornado,VShangxiao/tornado,z-fork/tornado,dongpinglai/my_tornado,NoyaInRain/tornado,futurechallenger/tornado,Batterfii/tornado,VShangxiao/tornado,xinyu7/tornado,anjan-srivastava/tornado,Aaron1992/tornado,ifduyue/tornado,takeshineshiro/tornado,sxfmol/tornado,kaushik94/tornado,mehmetkose/tornado,akalipetis/tornado,elelianghh/tornado,fengshao0907/tornado,Snamint/tornado,Windsooon/tornado,InverseLina/tornado,InverseLina/tornado,Acidburn0zzz/tornado,AlphaStaxLLC/tornado,cyrusin/tornado,jampp/tornado,mr-ping/tornado,Drooids/tornado,kippandrew/tornado,elijah513/tornado,noxiouz/tornado,yuyangit/tornado,Aaron1992/tornado,gitchs/tornado,coderhaoxin/tornado,ifduyue/tornado,whip112/tornado,sunjeammy/tornado,nordaux/tornado,futurechallenger/tornado,hhru/tornado,felixonmars/tornado,lujinda/tornado,ColorFuzzy/tornado,shashankbassi92/tornado,jonashagstedt/tornado,ColorFuzzy/tornado,eklitzke/tornado,SuminAndrew/tornado,jonashagstedt/tornado,wujuguang/tornado,andyaguiar/tornado,eklitzke/tornado,elijah513/tornado,codecov/tornado,ubear/tornado,pombredanne/tornado,nbargnesi/tornado,chenxiaba/tornado,djt5019/tornado,Geoion/tornado,eXcomm/tornado,zhuochenKIDD/tornado,LTD-Beget/tornado,allenl203/tornado,Callwoola/tornado,frtmelody/tornado,jarrahwu/tornado,nbargnesi/tornado,eXcomm/tornado,Polyconseil/tornado,hzruandd/tornado,whip112/tornado,fengsp/tornado,zhuochenKIDD/tornado,BencoLee/tornado,MjAbuz/tornado,mr-ping/tornado,coderhaoxin/tornado,kippandrew/tornado,ymero/tornado,elijah513/tornado,InverseLina/tornado,AlphaStaxLLC/tornado,johan--/tornado,BencoLee/tornado,InverseLina/tornado,hhru/tornado,lujinda/tornado,wsyzxcn/tornado,drewmiller/tornado,andyaguiar/tornado,jehiah/tornado,sxfmol/tornado,jarrahwu/tornado,AlphaStaxLLC/tornado,lujinda/tornado,lilydjwg/tornado,Aaron1992/tornado,obsh/tornado,Lancher/tornado,0xkag/tornado,fengshao0907/tornado,tianyk/tornado-research,frtmelody/tornado,tianyk/tornado-research,Aaron1992/tornado,lilydjwg/tornado,304471720/tornado,importcjj/tornado,wxhzk/tornado-1,codeb2cc/tornado,dsseter/tornado,Acidburn0zzz/tornado,takeshineshiro/tornado,whip112/tornado,zhuochenKIDD/tornado,icejoywoo/tornado,jparise/tornado,nbargnesi/tornado,leekchan/tornado_test,kevinge314gh/tornado,jehiah/tornado,obsh/tornado,yangkf1985/tornado,kevinge314gh/tornado,bdarnell/tornado,ajdavis/tornado,djt5019/tornado,kevinge314gh/tornado,Aaron1992/tornado,dongpinglai/my_tornado,NoyaInRain/tornado,frtmelody/tornado,z-fork/tornado,ListFranz/tornado,jampp/tornado,cyrilMargaria/tornado,Geoion/tornado,Lancher/tornado,wechasing/tornado,Fydot/tornado,erichuang1994/tornado,gitchs/tornado,obsh/tornado,sevenguin/tornado,yuezhonghua/tornado,anjan-srivastava/tornado,dsseter/tornado,gitchs/tornado,kippandrew/tornado,Geoion/tornado,Callwoola/tornado,insflow/tornado,johan--/tornado,yuezhonghua/tornado,liqueur/tornado,jehiah/tornado,tornadoweb/tornado,ms7s/tornado,mlyundin/tornado,yuyangit/tornado,coderhaoxin/tornado,ajdavis/tornado,lilydjwg/tornado,nbargnesi/tornado,legnaleurc/tornado,insflow/tornado,futurechallenger/tornado,zhuochenKIDD/tornado,InverseLina/tornado,whip112/tornado,shaohung001/tornado,NoyaInRain/tornado,kippandrew/tornado,gitchs/tornado,xinyu7/tornado,jsjohnst/tornado,anjan-srivastava/tornado,z-fork/tornado,ColorFuzzy/tornado,wechasing/tornado,fengsp/tornado,zguangyu/tornado,cyrusin/tornado,bdarnell/tornado,LTD-Beget/tornado,Drooids/tornado,futurechallenger/tornado,fengsp/tornado,liqueur/tornado,jsjohnst/tornado,elijah513/tornado,erichuang1994/tornado,yuezhonghua/tornado,wsyzxcn/tornado,noxiouz/tornado,yuezhonghua/tornado,jehiah/tornado,codecov/tornado,ymero/tornado,hzruandd/tornado,anjan-srivastava/tornado,lujinda/tornado,kangbiao/tornado,felixonmars/tornado,pombredanne/tornado,hzruandd/tornado,QuanZag/tornado,Windsooon/tornado,Polyconseil/tornado,lsanotes/tornado,tornadoweb/tornado,Drooids/tornado,ubear/tornado,ubear/tornado,elijah513/tornado,arthurdarcet/tornado,jehiah/tornado,Acidburn0zzz/tornado,0x73/tornado,allenl203/tornado,bdarnell/tornado,wsyzxcn/tornado,ymero/tornado,pombredanne/tornado,erichuang1994/tornado,bdarnell/tornado,ListFranz/tornado,mlyundin/tornado,Polyconseil/tornado,xinyu7/tornado,lsanotes/tornado,kippandrew/tornado,andyaguiar/tornado,yuezhonghua/tornado,sevenguin/tornado,kangbiao/tornado,akalipetis/tornado,lsanotes/tornado,frtmelody/tornado,ms7s/tornado,bywbilly/tornado,nbargnesi/tornado,hzruandd/tornado,bdarnell/tornado,obsh/tornado,zguangyu/tornado,Fydot/tornado,cyrusin/tornado,dsseter/tornado,chenxiaba/tornado,SuminAndrew/tornado,gwillem/tornado,lujinda/tornado,wxhzk/tornado-1,anandology/tornado,Geoion/tornado,cyrusin/tornado,QuanZag/tornado,mlyundin/tornado,ymero/tornado,Lancher/tornado,arthurdarcet/tornado,mr-ping/tornado,hzruandd/tornado,xinyu7/tornado,Windsooon/tornado,cyrusin/tornado,jparise/tornado,VShangxiao/tornado,kangbiao/tornado,erichuang1994/tornado,Fydot/tornado,bywbilly/tornado,sxfmol/tornado,tianyk/tornado-research,yangkf1985/tornado,mehmetkose/tornado,ovidiucp/tornado,erichuang1994/tornado,yangkf1985/tornado,obsh/tornado,Lancher/tornado,ovidiucp/tornado,Geoion/tornado,kevinge314gh/tornado,ydaniv/tornado,pombredanne/tornado,cyrilMargaria/tornado,shaohung001/tornado,akalipetis/tornado,elelianghh/tornado,hhru/tornado,Acidburn0zzz/tornado,ubear/tornado,eklitzke/tornado,importcjj/tornado,0xkag/tornado,sunjeammy/tornado,ColorFuzzy/tornado,elelianghh/tornado,johan--/tornado,yuyangit/tornado,coderhaoxin/tornado,Windsooon/tornado,wsyzxcn/tornado,allenl203/tornado,importcjj/tornado,shashankbassi92/tornado,ListFranz/tornado,yangkf1985/tornado,Batterfii/tornado,whip112/tornado,fengshao0907/tornado,0xkag/tornado,wujuguang/tornado,futurechallenger/tornado,jsjohnst/tornado,wxhzk/tornado-1,dongpinglai/my_tornado,Acidburn0zzz/tornado,AlphaStaxLLC/tornado,jparise/tornado,coderhaoxin/tornado,felixonmars/tornado,jarrahwu/tornado,zguangyu/tornado,ms7s/tornado,wujuguang/tornado,wsyzxcn/tornado,NoyaInRain/tornado,ydaniv/tornado,ColorFuzzy/tornado,mivade/tornado,obsh/tornado,Snamint/tornado,fengsp/tornado,LTD-Beget/tornado,mlyundin/tornado,jarrahwu/tornado,dongpinglai/my_tornado,anjan-srivastava/tornado,drewmiller/tornado,Batterfii/tornado,Snamint/tornado,nordaux/tornado,gwillem/tornado,MjAbuz/tornado,0x73/tornado,mr-ping/tornado,fengshao0907/tornado,QuanZag/tornado,legnaleurc/tornado,noxiouz/tornado,jsjohnst/tornado,304471720/tornado,mlyundin/tornado,eklitzke/tornado,eXcomm/tornado,insflow/tornado,BencoLee/tornado,elijah513/tornado,nordaux/tornado,jarrahwu/tornado,shaohung001/tornado,sevenguin/tornado,elelianghh/tornado,lujinda/tornado,kangbiao/tornado,dongpinglai/my_tornado,mr-ping/tornado,yangkf1985/tornado,codeb2cc/tornado,ms7s/tornado,fengsp/tornado,MjAbuz/tornado,z-fork/tornado,kaushik94/tornado,djt5019/tornado,xinyu7/tornado,andyaguiar/tornado,tianyk/tornado-research,ms7s/tornado,arthurdarcet/tornado,drewmiller/tornado,SuminAndrew/tornado,wsyzxcn/tornado,eXcomm/tornado,shashankbassi92/tornado,mivade/tornado,dsseter/tornado,jonashagstedt/tornado,304471720/tornado,importcjj/tornado,leekchan/tornado_test,zguangyu/tornado,ovidiucp/tornado,jparise/tornado,lilydjwg/tornado,ajdavis/tornado,ovidiucp/tornado,lsanotes/tornado,LTD-Beget/tornado,NoyaInRain/tornado,ajdavis/tornado,MjAbuz/tornado,VShangxiao/tornado,Callwoola/tornado,fengshao0907/tornado,yuyangit/tornado,codeb2cc/tornado,InverseLina/tornado,noxiouz/tornado,chenxiaba/tornado,takeshineshiro/tornado,elelianghh/tornado,wxhzk/tornado-1,johan--/tornado,eXcomm/tornado,Snamint/tornado,0xkag/tornado,mr-ping/tornado,ZhuPeng/tornado,MjAbuz/tornado,noxiouz/tornado,importcjj/tornado,LTD-Beget/tornado,noxiouz/tornado,tornadoweb/tornado,anjan-srivastava/tornado,zhuochenKIDD/tornado,sunjeammy/tornado,chenxiaba/tornado,cyrilMargaria/tornado,codeb2cc/tornado,sxfmol/tornado,erichuang1994/tornado,codeb2cc/tornado,sevenguin/tornado,Fydot/tornado,leekchan/tornado_test,eklitzke/tornado,Polyconseil/tornado,wujuguang/tornado,liqueur/tornado,akalipetis/tornado,ovidiucp/tornado,liqueur/tornado,johan--/tornado,jparise/tornado,wujuguang/tornado,sxfmol/tornado,elelianghh/tornado,wsyzxcn/tornado,legnaleurc/tornado,z-fork/tornado,kangbiao/tornado,jampp/tornado,insflow/tornado,LTD-Beget/tornado,fengshao0907/tornado,tianyk/tornado-research,shaohung001/tornado,ZhuPeng/tornado,allenl203/tornado,zguangyu/tornado,mehmetkose/tornado,allenl203/tornado,gwillem/tornado,yangkf1985/tornado,nordaux/tornado,hhru/tornado,frtmelody/tornado,shaohung001/tornado,ifduyue/tornado,MjAbuz/tornado,QuanZag/tornado,anandology/tornado,kaushik94/tornado,shashankbassi92/tornado,kaushik94/tornado,icejoywoo/tornado,djt5019/tornado,sxfmol/tornado,VShangxiao/tornado,gitchs/tornado,SuminAndrew/tornado,Batterfii/tornado,johan--/tornado,Drooids/tornado,wechasing/tornado,shashankbassi92/tornado,wxhzk/tornado-1,Lancher/tornado,mivade/tornado,hhru/tornado,ymero/tornado,eXcomm/tornado,wxhzk/tornado-1,drewmiller/tornado,AlphaStaxLLC/tornado,takeshineshiro/tornado,Snamint/tornado,ydaniv/tornado,zhuochenKIDD/tornado,liqueur/tornado,SuminAndrew/tornado,cyrilMargaria/tornado,icejoywoo/tornado,QuanZag/tornado,Batterfii/tornado,codecov/tornado,BencoLee/tornado,kippandrew/tornado,icejoywoo/tornado,Geoion/tornado,ifduyue/tornado,chenxiaba/tornado,bywbilly/tornado,0x73/tornado,mivade/tornado,xinyu7/tornado,zguangyu/tornado,drewmiller/tornado,gwillem/tornado,Fydot/tornado,fengsp/tornado,ListFranz/tornado,andyaguiar/tornado,ubear/tornado,cyrilMargaria/tornado,cyrusin/tornado,arthurdarcet/tornado,tornadoweb/tornado,akalipetis/tornado,nbargnesi/tornado,importcjj/tornado,AlphaStaxLLC/tornado,jampp/tornado,ydaniv/tornado,ListFranz/tornado,bywbilly/tornado,BencoLee/tornado,frtmelody/tornado,sevenguin/tornado,djt5019/tornado,shashankbassi92/tornado,z-fork/tornado,wechasing/tornado,gwillem/tornado,ovidiucp/tornado,jonashagstedt/tornado,legnaleurc/tornado,BencoLee/tornado,Callwoola/tornado,yuezhonghua/tornado,gitchs/tornado,Callwoola/tornado,bywbilly/tornado,Batterfii/tornado,anandology/tornado,0x73/tornado,lsanotes/tornado,wechasing/tornado,takeshineshiro/tornado,insflow/tornado,NoyaInRain/tornado,ymero/tornado,pombredanne/tornado,jampp/tornado,mehmetkose/tornado,Windsooon/tornado,akalipetis/tornado,jonashagstedt/tornado,Drooids/tornado,mivade/tornado,djt5019/tornado,Fydot/tornado,304471720/tornado,shaohung001/tornado,ydaniv/tornado,yuyangit/tornado,arthurdarcet/tornado,ubear/tornado,anandology/tornado,VShangxiao/tornado,cyrilMargaria/tornado,jparise/tornado,liqueur/tornado,mehmetkose/tornado,jampp/tornado,jsjohnst/tornado,chenxiaba/tornado,Snamint/tornado,coderhaoxin/tornado,sunjeammy/tornado
|
Add a benchmark for coroutine processing.
|
#!/usr/bin/env python
#
# A simple benchmark of the tornado.gen module.
# Runs in two modes, testing new-style (@coroutine and Futures)
# and old-style (@engine and Tasks) coroutines.
from timeit import Timer
from tornado import gen
from tornado.options import options, define, parse_command_line
define('num', default=10000, help='number of iterations')
# These benchmarks are delicate. They hit various fast-paths in the gen
# machinery in order to stay synchronous so we don't need an IOLoop.
# This removes noise from the results, but it's easy to change things
# in a way that completely invalidates the results.
@gen.engine
def e2(callback):
callback()
@gen.engine
def e1():
for i in range(10):
yield gen.Task(e2)
@gen.coroutine
def c2():
pass
@gen.coroutine
def c1():
for i in range(10):
yield c2()
def main():
parse_command_line()
t = Timer(e1)
results = t.timeit(options.num) / options.num
print 'engine: %0.3f ms per iteration' % (results * 1000)
t = Timer(c1)
results = t.timeit(options.num) / options.num
print 'coroutine: %0.3f ms per iteration' % (results * 1000)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a benchmark for coroutine processing.<commit_after>
|
#!/usr/bin/env python
#
# A simple benchmark of the tornado.gen module.
# Runs in two modes, testing new-style (@coroutine and Futures)
# and old-style (@engine and Tasks) coroutines.
from timeit import Timer
from tornado import gen
from tornado.options import options, define, parse_command_line
define('num', default=10000, help='number of iterations')
# These benchmarks are delicate. They hit various fast-paths in the gen
# machinery in order to stay synchronous so we don't need an IOLoop.
# This removes noise from the results, but it's easy to change things
# in a way that completely invalidates the results.
@gen.engine
def e2(callback):
callback()
@gen.engine
def e1():
for i in range(10):
yield gen.Task(e2)
@gen.coroutine
def c2():
pass
@gen.coroutine
def c1():
for i in range(10):
yield c2()
def main():
parse_command_line()
t = Timer(e1)
results = t.timeit(options.num) / options.num
print 'engine: %0.3f ms per iteration' % (results * 1000)
t = Timer(c1)
results = t.timeit(options.num) / options.num
print 'coroutine: %0.3f ms per iteration' % (results * 1000)
if __name__ == '__main__':
main()
|
Add a benchmark for coroutine processing.#!/usr/bin/env python
#
# A simple benchmark of the tornado.gen module.
# Runs in two modes, testing new-style (@coroutine and Futures)
# and old-style (@engine and Tasks) coroutines.
from timeit import Timer
from tornado import gen
from tornado.options import options, define, parse_command_line
define('num', default=10000, help='number of iterations')
# These benchmarks are delicate. They hit various fast-paths in the gen
# machinery in order to stay synchronous so we don't need an IOLoop.
# This removes noise from the results, but it's easy to change things
# in a way that completely invalidates the results.
@gen.engine
def e2(callback):
callback()
@gen.engine
def e1():
for i in range(10):
yield gen.Task(e2)
@gen.coroutine
def c2():
pass
@gen.coroutine
def c1():
for i in range(10):
yield c2()
def main():
parse_command_line()
t = Timer(e1)
results = t.timeit(options.num) / options.num
print 'engine: %0.3f ms per iteration' % (results * 1000)
t = Timer(c1)
results = t.timeit(options.num) / options.num
print 'coroutine: %0.3f ms per iteration' % (results * 1000)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a benchmark for coroutine processing.<commit_after>#!/usr/bin/env python
#
# A simple benchmark of the tornado.gen module.
# Runs in two modes, testing new-style (@coroutine and Futures)
# and old-style (@engine and Tasks) coroutines.
from timeit import Timer
from tornado import gen
from tornado.options import options, define, parse_command_line
define('num', default=10000, help='number of iterations')
# These benchmarks are delicate. They hit various fast-paths in the gen
# machinery in order to stay synchronous so we don't need an IOLoop.
# This removes noise from the results, but it's easy to change things
# in a way that completely invalidates the results.
@gen.engine
def e2(callback):
callback()
@gen.engine
def e1():
for i in range(10):
yield gen.Task(e2)
@gen.coroutine
def c2():
pass
@gen.coroutine
def c1():
for i in range(10):
yield c2()
def main():
parse_command_line()
t = Timer(e1)
results = t.timeit(options.num) / options.num
print 'engine: %0.3f ms per iteration' % (results * 1000)
t = Timer(c1)
results = t.timeit(options.num) / options.num
print 'coroutine: %0.3f ms per iteration' % (results * 1000)
if __name__ == '__main__':
main()
|
|
09d267808e69217bca37089fbd8907392eaa96ab
|
pox/forwarding/l2_learning_tutorial.py
|
pox/forwarding/l2_learning_tutorial.py
|
# Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
This is the world's simplest OpenFlow learning switch.
"""
from pox.core import core
from pox.lib.addresses import EthAddr
import pox.openflow.libopenflow_01 as of
log = core.getLogger()
def _handle_PacketIn (event):
packet = event.parsed
# If we get a packet FROM some src address on some input port, we
# know that if we want to send TO that address, we should send it
# out that port. Install a rule for this.
msg = of.ofp_flow_mod()
msg.match.dl_dst = packet.src
msg.actions.append(of.ofp_action_output(port = event.port))
event.connection.send(msg)
# Now since we got a packet at the controller, that must mean
# that we hadn't installed a rule for the destination address
# yet -- we don't know where it is. So, we'll just send the
# packet out all ports (except the one it came in on!) and
# hope the destination is out there somewhere. :)
msg = of.ofp_packet_out()
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
msg.buffer_id = event.ofp.buffer_id # Resend the incoming packet
msg.in_port = event.port # Don't flood out the incoming port
event.connection.send(msg)
def _handle_ConnectionUp (event):
# Install rule so that broadcast packets just get flooded
msg = of.ofp_flow_mod()
msg.match.dl_dst = EthAddr("ff:ff:ff:ff:ff:ff") # Broadcast
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
# FLOOD means all ports
event.connection.send(msg)
log.info("Switch connected.")
def launch ():
core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp)
core.openflow.addListenerByName("PacketIn", _handle_PacketIn)
log.info("Learning switch running.")
|
Add simplest learning switch ever
|
forwarding: Add simplest learning switch ever
|
Python
|
apache-2.0
|
diogommartins/pox,pthien92/sdn,adusia/pox,diogommartins/pox,diogommartins/pox,chenyuntc/pox,adusia/pox,adusia/pox,chenyuntc/pox,denovogroup/pox,diogommartins/pox,carlye566/IoT-POX,pthien92/sdn,carlye566/IoT-POX,xAKLx/pox,jacobq/csci5221-viro-project,PrincetonUniversity/pox,adusia/pox,kavitshah8/SDNDeveloper,VamsikrishnaNallabothu/pox,waltznetworks/pox,VamsikrishnaNallabothu/pox,noxrepo/pox,carlye566/IoT-POX,pthien92/sdn,kpengboy/pox-exercise,waltznetworks/pox,denovogroup/pox,xAKLx/pox,VamsikrishnaNallabothu/pox,pthien92/sdn,kavitshah8/SDNDeveloper,MurphyMc/pox,andiwundsam/_of_normalize,andiwundsam/_of_normalize,chenyuntc/pox,jacobq/csci5221-viro-project,kpengboy/pox-exercise,kulawczukmarcin/mypox,jacobq/csci5221-viro-project,chenyuntc/pox,kulawczukmarcin/mypox,jacobq/csci5221-viro-project,kulawczukmarcin/mypox,MurphyMc/pox,PrincetonUniversity/pox,MurphyMc/pox,waltznetworks/pox,kavitshah8/SDNDeveloper,waltznetworks/pox,denovogroup/pox,kpengboy/pox-exercise,xAKLx/pox,kulawczukmarcin/mypox,VamsikrishnaNallabothu/pox,diogommartins/pox,MurphyMc/pox,noxrepo/pox,waltznetworks/pox,jacobq/csci5221-viro-project,pthien92/sdn,andiwundsam/_of_normalize,VamsikrishnaNallabothu/pox,adusia/pox,kavitshah8/SDNDeveloper,kpengboy/pox-exercise,denovogroup/pox,PrincetonUniversity/pox,PrincetonUniversity/pox,carlye566/IoT-POX,denovogroup/pox,carlye566/IoT-POX,xAKLx/pox,kulawczukmarcin/mypox,MurphyMc/pox,noxrepo/pox,kpengboy/pox-exercise,noxrepo/pox,andiwundsam/_of_normalize,chenyuntc/pox,PrincetonUniversity/pox,xAKLx/pox
|
forwarding: Add simplest learning switch ever
|
# Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
This is the world's simplest OpenFlow learning switch.
"""
from pox.core import core
from pox.lib.addresses import EthAddr
import pox.openflow.libopenflow_01 as of
log = core.getLogger()
def _handle_PacketIn (event):
packet = event.parsed
# If we get a packet FROM some src address on some input port, we
# know that if we want to send TO that address, we should send it
# out that port. Install a rule for this.
msg = of.ofp_flow_mod()
msg.match.dl_dst = packet.src
msg.actions.append(of.ofp_action_output(port = event.port))
event.connection.send(msg)
# Now since we got a packet at the controller, that must mean
# that we hadn't installed a rule for the destination address
# yet -- we don't know where it is. So, we'll just send the
# packet out all ports (except the one it came in on!) and
# hope the destination is out there somewhere. :)
msg = of.ofp_packet_out()
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
msg.buffer_id = event.ofp.buffer_id # Resend the incoming packet
msg.in_port = event.port # Don't flood out the incoming port
event.connection.send(msg)
def _handle_ConnectionUp (event):
# Install rule so that broadcast packets just get flooded
msg = of.ofp_flow_mod()
msg.match.dl_dst = EthAddr("ff:ff:ff:ff:ff:ff") # Broadcast
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
# FLOOD means all ports
event.connection.send(msg)
log.info("Switch connected.")
def launch ():
core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp)
core.openflow.addListenerByName("PacketIn", _handle_PacketIn)
log.info("Learning switch running.")
|
<commit_before><commit_msg>forwarding: Add simplest learning switch ever<commit_after>
|
# Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
This is the world's simplest OpenFlow learning switch.
"""
from pox.core import core
from pox.lib.addresses import EthAddr
import pox.openflow.libopenflow_01 as of
log = core.getLogger()
def _handle_PacketIn (event):
packet = event.parsed
# If we get a packet FROM some src address on some input port, we
# know that if we want to send TO that address, we should send it
# out that port. Install a rule for this.
msg = of.ofp_flow_mod()
msg.match.dl_dst = packet.src
msg.actions.append(of.ofp_action_output(port = event.port))
event.connection.send(msg)
# Now since we got a packet at the controller, that must mean
# that we hadn't installed a rule for the destination address
# yet -- we don't know where it is. So, we'll just send the
# packet out all ports (except the one it came in on!) and
# hope the destination is out there somewhere. :)
msg = of.ofp_packet_out()
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
msg.buffer_id = event.ofp.buffer_id # Resend the incoming packet
msg.in_port = event.port # Don't flood out the incoming port
event.connection.send(msg)
def _handle_ConnectionUp (event):
# Install rule so that broadcast packets just get flooded
msg = of.ofp_flow_mod()
msg.match.dl_dst = EthAddr("ff:ff:ff:ff:ff:ff") # Broadcast
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
# FLOOD means all ports
event.connection.send(msg)
log.info("Switch connected.")
def launch ():
core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp)
core.openflow.addListenerByName("PacketIn", _handle_PacketIn)
log.info("Learning switch running.")
|
forwarding: Add simplest learning switch ever# Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
This is the world's simplest OpenFlow learning switch.
"""
from pox.core import core
from pox.lib.addresses import EthAddr
import pox.openflow.libopenflow_01 as of
log = core.getLogger()
def _handle_PacketIn (event):
packet = event.parsed
# If we get a packet FROM some src address on some input port, we
# know that if we want to send TO that address, we should send it
# out that port. Install a rule for this.
msg = of.ofp_flow_mod()
msg.match.dl_dst = packet.src
msg.actions.append(of.ofp_action_output(port = event.port))
event.connection.send(msg)
# Now since we got a packet at the controller, that must mean
# that we hadn't installed a rule for the destination address
# yet -- we don't know where it is. So, we'll just send the
# packet out all ports (except the one it came in on!) and
# hope the destination is out there somewhere. :)
msg = of.ofp_packet_out()
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
msg.buffer_id = event.ofp.buffer_id # Resend the incoming packet
msg.in_port = event.port # Don't flood out the incoming port
event.connection.send(msg)
def _handle_ConnectionUp (event):
# Install rule so that broadcast packets just get flooded
msg = of.ofp_flow_mod()
msg.match.dl_dst = EthAddr("ff:ff:ff:ff:ff:ff") # Broadcast
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
# FLOOD means all ports
event.connection.send(msg)
log.info("Switch connected.")
def launch ():
core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp)
core.openflow.addListenerByName("PacketIn", _handle_PacketIn)
log.info("Learning switch running.")
|
<commit_before><commit_msg>forwarding: Add simplest learning switch ever<commit_after># Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
This is the world's simplest OpenFlow learning switch.
"""
from pox.core import core
from pox.lib.addresses import EthAddr
import pox.openflow.libopenflow_01 as of
log = core.getLogger()
def _handle_PacketIn (event):
packet = event.parsed
# If we get a packet FROM some src address on some input port, we
# know that if we want to send TO that address, we should send it
# out that port. Install a rule for this.
msg = of.ofp_flow_mod()
msg.match.dl_dst = packet.src
msg.actions.append(of.ofp_action_output(port = event.port))
event.connection.send(msg)
# Now since we got a packet at the controller, that must mean
# that we hadn't installed a rule for the destination address
# yet -- we don't know where it is. So, we'll just send the
# packet out all ports (except the one it came in on!) and
# hope the destination is out there somewhere. :)
msg = of.ofp_packet_out()
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
msg.buffer_id = event.ofp.buffer_id # Resend the incoming packet
msg.in_port = event.port # Don't flood out the incoming port
event.connection.send(msg)
def _handle_ConnectionUp (event):
# Install rule so that broadcast packets just get flooded
msg = of.ofp_flow_mod()
msg.match.dl_dst = EthAddr("ff:ff:ff:ff:ff:ff") # Broadcast
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
# FLOOD means all ports
event.connection.send(msg)
log.info("Switch connected.")
def launch ():
core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp)
core.openflow.addListenerByName("PacketIn", _handle_PacketIn)
log.info("Learning switch running.")
|
|
cf657b6f9744d4b53a32db51752677eaaf2ec998
|
migrations/versions/7791f2c862d2_.py
|
migrations/versions/7791f2c862d2_.py
|
"""Adds TimeExercisesHistory and TimeExercisesTaxonomy Tables
Revision ID: 7791f2c862d2
Revises: 56630a78dca0
Create Date: 2017-07-30 20:40:17.174425
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7791f2c862d2'
down_revision = '56630a78dca0'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('time_exercises_taxonomy',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('time_exercises_history',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('exercise_id', sa.Integer(), nullable=True),
sa.Column('distance', sa.Float(), nullable=True),
sa.Column('duration', sa.Float(), nullable=True),
sa.Column('exercise_date', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['exercise_id'], ['time_exercises_history.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('time_exercises_history')
op.drop_table('time_exercises_taxonomy')
|
Upgrade to add Time based exercises tables
|
Upgrade to add Time based exercises tables
|
Python
|
mit
|
pbraunstein/trackercise,pbraunstein/trackercise,pbraunstein/trackercise,pbraunstein/trackercise,pbraunstein/trackercise
|
Upgrade to add Time based exercises tables
|
"""Adds TimeExercisesHistory and TimeExercisesTaxonomy Tables
Revision ID: 7791f2c862d2
Revises: 56630a78dca0
Create Date: 2017-07-30 20:40:17.174425
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7791f2c862d2'
down_revision = '56630a78dca0'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('time_exercises_taxonomy',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('time_exercises_history',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('exercise_id', sa.Integer(), nullable=True),
sa.Column('distance', sa.Float(), nullable=True),
sa.Column('duration', sa.Float(), nullable=True),
sa.Column('exercise_date', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['exercise_id'], ['time_exercises_history.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('time_exercises_history')
op.drop_table('time_exercises_taxonomy')
|
<commit_before><commit_msg>Upgrade to add Time based exercises tables<commit_after>
|
"""Adds TimeExercisesHistory and TimeExercisesTaxonomy Tables
Revision ID: 7791f2c862d2
Revises: 56630a78dca0
Create Date: 2017-07-30 20:40:17.174425
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7791f2c862d2'
down_revision = '56630a78dca0'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('time_exercises_taxonomy',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('time_exercises_history',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('exercise_id', sa.Integer(), nullable=True),
sa.Column('distance', sa.Float(), nullable=True),
sa.Column('duration', sa.Float(), nullable=True),
sa.Column('exercise_date', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['exercise_id'], ['time_exercises_history.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('time_exercises_history')
op.drop_table('time_exercises_taxonomy')
|
Upgrade to add Time based exercises tables"""Adds TimeExercisesHistory and TimeExercisesTaxonomy Tables
Revision ID: 7791f2c862d2
Revises: 56630a78dca0
Create Date: 2017-07-30 20:40:17.174425
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7791f2c862d2'
down_revision = '56630a78dca0'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('time_exercises_taxonomy',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('time_exercises_history',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('exercise_id', sa.Integer(), nullable=True),
sa.Column('distance', sa.Float(), nullable=True),
sa.Column('duration', sa.Float(), nullable=True),
sa.Column('exercise_date', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['exercise_id'], ['time_exercises_history.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('time_exercises_history')
op.drop_table('time_exercises_taxonomy')
|
<commit_before><commit_msg>Upgrade to add Time based exercises tables<commit_after>"""Adds TimeExercisesHistory and TimeExercisesTaxonomy Tables
Revision ID: 7791f2c862d2
Revises: 56630a78dca0
Create Date: 2017-07-30 20:40:17.174425
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7791f2c862d2'
down_revision = '56630a78dca0'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('time_exercises_taxonomy',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('time_exercises_history',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('exercise_id', sa.Integer(), nullable=True),
sa.Column('distance', sa.Float(), nullable=True),
sa.Column('duration', sa.Float(), nullable=True),
sa.Column('exercise_date', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['exercise_id'], ['time_exercises_history.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('time_exercises_history')
op.drop_table('time_exercises_taxonomy')
|
|
1f61ac74ed7a00d642fa94944cbce3ebc4690e9c
|
scripts/index-g6-in-elasticsearch.py
|
scripts/index-g6-in-elasticsearch.py
|
#!/usr/bin/python
'''Process G6 JSON files into elasticsearch
This version reads G6 JSON from disk or DM API.
Usage:
process-g6-into-elastic-search.py <es_endpoint> <dir_or_endpoint> [<token>]
Arguments:
es_endpoint Full ES index URL
dir_or_endpoint Directory path to import or an API URL if token is given
token Digital Marketplace API token
'''
import os
import sys
import json
import urllib2
def post_to_es(es_endpoint, json_data):
handler = urllib2.HTTPHandler()
opener = urllib2.build_opener(handler)
if not es_endpoint.endswith('/'):
es_endpoint += '/'
request = urllib2.Request(es_endpoint + str(json_data['id']),
data=json.dumps(json_data))
request.add_header("Content-Type", 'application/json')
print request.get_full_url()
# print request.get_data()
try:
connection = opener.open(request)
except urllib2.HTTPError, e:
connection = e
print connection
# check. Substitute with appropriate HTTP code.
if connection.code == 200:
data = connection.read()
print str(connection.code) + " " + data
else:
print "connection.code = " + str(connection.code)
def request_services(endpoint, token):
handler = urllib2.HTTPBasicAuthHandler()
opener = urllib2.build_opener(handler)
page_url = endpoint
while page_url:
print "requesting {}".format(page_url)
request = urllib2.Request(page_url)
request.add_header("Authorization", "Bearer {}".format(token))
response = opener.open(request).read()
data = json.loads(response)
for service in data["services"]:
yield service
page_url = filter(lambda l: l['rel'] == 'next', data['links'])
if page_url:
page_url = page_url[0]['href']
def process_json_files_in_directory(dirname):
for filename in os.listdir(dirname):
with open(os.path.join(dirname, filename)) as f:
data = json.loads(f.read())
print "doing " + filename
yield data
def main():
if len(sys.argv) == 4:
es_endpoint, endpoint, token = sys.argv[1:]
for data in request_services(endpoint, token):
post_to_es(es_endpoint, data)
elif len(sys.argv) == 3:
es_endpoint, listing_dir = sys.argv[1:]
for data in process_json_files_in_directory(listing_dir):
post_to_es(es_endpoint, data)
else:
print __doc__
if __name__ == '__main__':
main()
|
Move ElasticSearch import script from buyer-frontend into search-api
|
Move ElasticSearch import script from buyer-frontend into search-api
This script should have moved when the search-api was split off from the buyer-frontend app.
|
Python
|
mit
|
alphagov/digitalmarketplace-search-api,RichardKnop/digitalmarketplace-search-api,alphagov/digitalmarketplace-search-api,RichardKnop/digitalmarketplace-search-api,RichardKnop/digitalmarketplace-search-api,RichardKnop/digitalmarketplace-search-api
|
Move ElasticSearch import script from buyer-frontend into search-api
This script should have moved when the search-api was split off from the buyer-frontend app.
|
#!/usr/bin/python
'''Process G6 JSON files into elasticsearch
This version reads G6 JSON from disk or DM API.
Usage:
process-g6-into-elastic-search.py <es_endpoint> <dir_or_endpoint> [<token>]
Arguments:
es_endpoint Full ES index URL
dir_or_endpoint Directory path to import or an API URL if token is given
token Digital Marketplace API token
'''
import os
import sys
import json
import urllib2
def post_to_es(es_endpoint, json_data):
handler = urllib2.HTTPHandler()
opener = urllib2.build_opener(handler)
if not es_endpoint.endswith('/'):
es_endpoint += '/'
request = urllib2.Request(es_endpoint + str(json_data['id']),
data=json.dumps(json_data))
request.add_header("Content-Type", 'application/json')
print request.get_full_url()
# print request.get_data()
try:
connection = opener.open(request)
except urllib2.HTTPError, e:
connection = e
print connection
# check. Substitute with appropriate HTTP code.
if connection.code == 200:
data = connection.read()
print str(connection.code) + " " + data
else:
print "connection.code = " + str(connection.code)
def request_services(endpoint, token):
handler = urllib2.HTTPBasicAuthHandler()
opener = urllib2.build_opener(handler)
page_url = endpoint
while page_url:
print "requesting {}".format(page_url)
request = urllib2.Request(page_url)
request.add_header("Authorization", "Bearer {}".format(token))
response = opener.open(request).read()
data = json.loads(response)
for service in data["services"]:
yield service
page_url = filter(lambda l: l['rel'] == 'next', data['links'])
if page_url:
page_url = page_url[0]['href']
def process_json_files_in_directory(dirname):
for filename in os.listdir(dirname):
with open(os.path.join(dirname, filename)) as f:
data = json.loads(f.read())
print "doing " + filename
yield data
def main():
if len(sys.argv) == 4:
es_endpoint, endpoint, token = sys.argv[1:]
for data in request_services(endpoint, token):
post_to_es(es_endpoint, data)
elif len(sys.argv) == 3:
es_endpoint, listing_dir = sys.argv[1:]
for data in process_json_files_in_directory(listing_dir):
post_to_es(es_endpoint, data)
else:
print __doc__
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Move ElasticSearch import script from buyer-frontend into search-api
This script should have moved when the search-api was split off from the buyer-frontend app.<commit_after>
|
#!/usr/bin/python
'''Process G6 JSON files into elasticsearch
This version reads G6 JSON from disk or DM API.
Usage:
process-g6-into-elastic-search.py <es_endpoint> <dir_or_endpoint> [<token>]
Arguments:
es_endpoint Full ES index URL
dir_or_endpoint Directory path to import or an API URL if token is given
token Digital Marketplace API token
'''
import os
import sys
import json
import urllib2
def post_to_es(es_endpoint, json_data):
handler = urllib2.HTTPHandler()
opener = urllib2.build_opener(handler)
if not es_endpoint.endswith('/'):
es_endpoint += '/'
request = urllib2.Request(es_endpoint + str(json_data['id']),
data=json.dumps(json_data))
request.add_header("Content-Type", 'application/json')
print request.get_full_url()
# print request.get_data()
try:
connection = opener.open(request)
except urllib2.HTTPError, e:
connection = e
print connection
# check. Substitute with appropriate HTTP code.
if connection.code == 200:
data = connection.read()
print str(connection.code) + " " + data
else:
print "connection.code = " + str(connection.code)
def request_services(endpoint, token):
handler = urllib2.HTTPBasicAuthHandler()
opener = urllib2.build_opener(handler)
page_url = endpoint
while page_url:
print "requesting {}".format(page_url)
request = urllib2.Request(page_url)
request.add_header("Authorization", "Bearer {}".format(token))
response = opener.open(request).read()
data = json.loads(response)
for service in data["services"]:
yield service
page_url = filter(lambda l: l['rel'] == 'next', data['links'])
if page_url:
page_url = page_url[0]['href']
def process_json_files_in_directory(dirname):
for filename in os.listdir(dirname):
with open(os.path.join(dirname, filename)) as f:
data = json.loads(f.read())
print "doing " + filename
yield data
def main():
if len(sys.argv) == 4:
es_endpoint, endpoint, token = sys.argv[1:]
for data in request_services(endpoint, token):
post_to_es(es_endpoint, data)
elif len(sys.argv) == 3:
es_endpoint, listing_dir = sys.argv[1:]
for data in process_json_files_in_directory(listing_dir):
post_to_es(es_endpoint, data)
else:
print __doc__
if __name__ == '__main__':
main()
|
Move ElasticSearch import script from buyer-frontend into search-api
This script should have moved when the search-api was split off from the buyer-frontend app.#!/usr/bin/python
'''Process G6 JSON files into elasticsearch
This version reads G6 JSON from disk or DM API.
Usage:
process-g6-into-elastic-search.py <es_endpoint> <dir_or_endpoint> [<token>]
Arguments:
es_endpoint Full ES index URL
dir_or_endpoint Directory path to import or an API URL if token is given
token Digital Marketplace API token
'''
import os
import sys
import json
import urllib2
def post_to_es(es_endpoint, json_data):
handler = urllib2.HTTPHandler()
opener = urllib2.build_opener(handler)
if not es_endpoint.endswith('/'):
es_endpoint += '/'
request = urllib2.Request(es_endpoint + str(json_data['id']),
data=json.dumps(json_data))
request.add_header("Content-Type", 'application/json')
print request.get_full_url()
# print request.get_data()
try:
connection = opener.open(request)
except urllib2.HTTPError, e:
connection = e
print connection
# check. Substitute with appropriate HTTP code.
if connection.code == 200:
data = connection.read()
print str(connection.code) + " " + data
else:
print "connection.code = " + str(connection.code)
def request_services(endpoint, token):
handler = urllib2.HTTPBasicAuthHandler()
opener = urllib2.build_opener(handler)
page_url = endpoint
while page_url:
print "requesting {}".format(page_url)
request = urllib2.Request(page_url)
request.add_header("Authorization", "Bearer {}".format(token))
response = opener.open(request).read()
data = json.loads(response)
for service in data["services"]:
yield service
page_url = filter(lambda l: l['rel'] == 'next', data['links'])
if page_url:
page_url = page_url[0]['href']
def process_json_files_in_directory(dirname):
for filename in os.listdir(dirname):
with open(os.path.join(dirname, filename)) as f:
data = json.loads(f.read())
print "doing " + filename
yield data
def main():
if len(sys.argv) == 4:
es_endpoint, endpoint, token = sys.argv[1:]
for data in request_services(endpoint, token):
post_to_es(es_endpoint, data)
elif len(sys.argv) == 3:
es_endpoint, listing_dir = sys.argv[1:]
for data in process_json_files_in_directory(listing_dir):
post_to_es(es_endpoint, data)
else:
print __doc__
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Move ElasticSearch import script from buyer-frontend into search-api
This script should have moved when the search-api was split off from the buyer-frontend app.<commit_after>#!/usr/bin/python
'''Process G6 JSON files into elasticsearch
This version reads G6 JSON from disk or DM API.
Usage:
process-g6-into-elastic-search.py <es_endpoint> <dir_or_endpoint> [<token>]
Arguments:
es_endpoint Full ES index URL
dir_or_endpoint Directory path to import or an API URL if token is given
token Digital Marketplace API token
'''
import os
import sys
import json
import urllib2
def post_to_es(es_endpoint, json_data):
handler = urllib2.HTTPHandler()
opener = urllib2.build_opener(handler)
if not es_endpoint.endswith('/'):
es_endpoint += '/'
request = urllib2.Request(es_endpoint + str(json_data['id']),
data=json.dumps(json_data))
request.add_header("Content-Type", 'application/json')
print request.get_full_url()
# print request.get_data()
try:
connection = opener.open(request)
except urllib2.HTTPError, e:
connection = e
print connection
# check. Substitute with appropriate HTTP code.
if connection.code == 200:
data = connection.read()
print str(connection.code) + " " + data
else:
print "connection.code = " + str(connection.code)
def request_services(endpoint, token):
handler = urllib2.HTTPBasicAuthHandler()
opener = urllib2.build_opener(handler)
page_url = endpoint
while page_url:
print "requesting {}".format(page_url)
request = urllib2.Request(page_url)
request.add_header("Authorization", "Bearer {}".format(token))
response = opener.open(request).read()
data = json.loads(response)
for service in data["services"]:
yield service
page_url = filter(lambda l: l['rel'] == 'next', data['links'])
if page_url:
page_url = page_url[0]['href']
def process_json_files_in_directory(dirname):
for filename in os.listdir(dirname):
with open(os.path.join(dirname, filename)) as f:
data = json.loads(f.read())
print "doing " + filename
yield data
def main():
if len(sys.argv) == 4:
es_endpoint, endpoint, token = sys.argv[1:]
for data in request_services(endpoint, token):
post_to_es(es_endpoint, data)
elif len(sys.argv) == 3:
es_endpoint, listing_dir = sys.argv[1:]
for data in process_json_files_in_directory(listing_dir):
post_to_es(es_endpoint, data)
else:
print __doc__
if __name__ == '__main__':
main()
|
|
c4013140720f3fd9d128c1cbaa3554436ecf360f
|
migrations/versions/0185_add_is_active_to_reply_tos.py
|
migrations/versions/0185_add_is_active_to_reply_tos.py
|
"""
Revision ID: 0185_add_is_active_to_reply_tos
Revises: 0184_alter_primary_key_1
Create Date: 2018-04-10 16:35:41.824981
"""
from alembic import op
import sqlalchemy as sa
revision = '0185_add_is_active_to_reply_tos'
down_revision = '0184_alter_primary_key_1'
def upgrade():
op.add_column('service_email_reply_to', sa.Column('is_active', sa.Boolean(), nullable=False, server_default=sa.true()))
op.add_column('service_letter_contacts', sa.Column('is_active', sa.Boolean(), nullable=False, server_default=sa.true()))
op.add_column('service_sms_senders', sa.Column('is_active', sa.Boolean(), nullable=False, server_default=sa.true()))
def downgrade():
op.drop_column('service_sms_senders', 'is_active')
op.drop_column('service_letter_contacts', 'is_active')
op.drop_column('service_email_reply_to', 'is_active')
|
Add is_active column for the 3 reply_to tables
|
Add is_active column for the 3 reply_to tables
Added a new boolean column, `is_active` to these tables
* service_email_reply_to
* service_sms_senders
* service_letter_contacts
This has a database default of True in order to backfill the data, but
this default will be replaced with a model default later.
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add is_active column for the 3 reply_to tables
Added a new boolean column, `is_active` to these tables
* service_email_reply_to
* service_sms_senders
* service_letter_contacts
This has a database default of True in order to backfill the data, but
this default will be replaced with a model default later.
|
"""
Revision ID: 0185_add_is_active_to_reply_tos
Revises: 0184_alter_primary_key_1
Create Date: 2018-04-10 16:35:41.824981
"""
from alembic import op
import sqlalchemy as sa
revision = '0185_add_is_active_to_reply_tos'
down_revision = '0184_alter_primary_key_1'
def upgrade():
op.add_column('service_email_reply_to', sa.Column('is_active', sa.Boolean(), nullable=False, server_default=sa.true()))
op.add_column('service_letter_contacts', sa.Column('is_active', sa.Boolean(), nullable=False, server_default=sa.true()))
op.add_column('service_sms_senders', sa.Column('is_active', sa.Boolean(), nullable=False, server_default=sa.true()))
def downgrade():
op.drop_column('service_sms_senders', 'is_active')
op.drop_column('service_letter_contacts', 'is_active')
op.drop_column('service_email_reply_to', 'is_active')
|
<commit_before><commit_msg>Add is_active column for the 3 reply_to tables
Added a new boolean column, `is_active` to these tables
* service_email_reply_to
* service_sms_senders
* service_letter_contacts
This has a database default of True in order to backfill the data, but
this default will be replaced with a model default later.<commit_after>
|
"""
Revision ID: 0185_add_is_active_to_reply_tos
Revises: 0184_alter_primary_key_1
Create Date: 2018-04-10 16:35:41.824981
"""
from alembic import op
import sqlalchemy as sa
revision = '0185_add_is_active_to_reply_tos'
down_revision = '0184_alter_primary_key_1'
def upgrade():
op.add_column('service_email_reply_to', sa.Column('is_active', sa.Boolean(), nullable=False, server_default=sa.true()))
op.add_column('service_letter_contacts', sa.Column('is_active', sa.Boolean(), nullable=False, server_default=sa.true()))
op.add_column('service_sms_senders', sa.Column('is_active', sa.Boolean(), nullable=False, server_default=sa.true()))
def downgrade():
op.drop_column('service_sms_senders', 'is_active')
op.drop_column('service_letter_contacts', 'is_active')
op.drop_column('service_email_reply_to', 'is_active')
|
Add is_active column for the 3 reply_to tables
Added a new boolean column, `is_active` to these tables
* service_email_reply_to
* service_sms_senders
* service_letter_contacts
This has a database default of True in order to backfill the data, but
this default will be replaced with a model default later."""
Revision ID: 0185_add_is_active_to_reply_tos
Revises: 0184_alter_primary_key_1
Create Date: 2018-04-10 16:35:41.824981
"""
from alembic import op
import sqlalchemy as sa
revision = '0185_add_is_active_to_reply_tos'
down_revision = '0184_alter_primary_key_1'
def upgrade():
op.add_column('service_email_reply_to', sa.Column('is_active', sa.Boolean(), nullable=False, server_default=sa.true()))
op.add_column('service_letter_contacts', sa.Column('is_active', sa.Boolean(), nullable=False, server_default=sa.true()))
op.add_column('service_sms_senders', sa.Column('is_active', sa.Boolean(), nullable=False, server_default=sa.true()))
def downgrade():
op.drop_column('service_sms_senders', 'is_active')
op.drop_column('service_letter_contacts', 'is_active')
op.drop_column('service_email_reply_to', 'is_active')
|
<commit_before><commit_msg>Add is_active column for the 3 reply_to tables
Added a new boolean column, `is_active` to these tables
* service_email_reply_to
* service_sms_senders
* service_letter_contacts
This has a database default of True in order to backfill the data, but
this default will be replaced with a model default later.<commit_after>"""
Revision ID: 0185_add_is_active_to_reply_tos
Revises: 0184_alter_primary_key_1
Create Date: 2018-04-10 16:35:41.824981
"""
from alembic import op
import sqlalchemy as sa
revision = '0185_add_is_active_to_reply_tos'
down_revision = '0184_alter_primary_key_1'
def upgrade():
op.add_column('service_email_reply_to', sa.Column('is_active', sa.Boolean(), nullable=False, server_default=sa.true()))
op.add_column('service_letter_contacts', sa.Column('is_active', sa.Boolean(), nullable=False, server_default=sa.true()))
op.add_column('service_sms_senders', sa.Column('is_active', sa.Boolean(), nullable=False, server_default=sa.true()))
def downgrade():
op.drop_column('service_sms_senders', 'is_active')
op.drop_column('service_letter_contacts', 'is_active')
op.drop_column('service_email_reply_to', 'is_active')
|
|
51256756897c1e504dab670abbe71fbb5971f9d5
|
src/gcf/geni/am/api_error_exception.py
|
src/gcf/geni/am/api_error_exception.py
|
#----------------------------------------------------------------------
# Copyright (c) 2012-2015 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
"""
When AggregateManager delegate's raise that type of exception, the result of
the current call will be a well formatted result, with a message as in
{ 'output': 'exception output',
'code': {'am_type': 'gcf',
'geni_code': <exception code>},
'value': ''}
"""
class ApiErrorException(Exception):
def __init__(self, code, output):
self.code = code
self.output = output
def __str__(self):
return "ApiError(%r, %r)" % (self.code, self.output)
|
Rework the patch to handle ApiErrorException in a generic way in AMMethodContext. Create a specific file for ApiErrorException so as to be able to import it from am3.py and am_method_context.py.
|
Rework the patch to handle ApiErrorException in a generic way in AMMethodContext. Create a specific file for ApiErrorException so as to be able to import it from am3.py and am_method_context.py.
|
Python
|
mit
|
tcmitchell/geni-tools,ahelsing/geni-tools,plantigrade/geni-tools,plantigrade/geni-tools,ahelsing/geni-tools,tcmitchell/geni-tools
|
Rework the patch to handle ApiErrorException in a generic way in AMMethodContext. Create a specific file for ApiErrorException so as to be able to import it from am3.py and am_method_context.py.
|
#----------------------------------------------------------------------
# Copyright (c) 2012-2015 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
"""
When AggregateManager delegate's raise that type of exception, the result of
the current call will be a well formatted result, with a message as in
{ 'output': 'exception output',
'code': {'am_type': 'gcf',
'geni_code': <exception code>},
'value': ''}
"""
class ApiErrorException(Exception):
def __init__(self, code, output):
self.code = code
self.output = output
def __str__(self):
return "ApiError(%r, %r)" % (self.code, self.output)
|
<commit_before><commit_msg>Rework the patch to handle ApiErrorException in a generic way in AMMethodContext. Create a specific file for ApiErrorException so as to be able to import it from am3.py and am_method_context.py.<commit_after>
|
#----------------------------------------------------------------------
# Copyright (c) 2012-2015 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
"""
When AggregateManager delegate's raise that type of exception, the result of
the current call will be a well formatted result, with a message as in
{ 'output': 'exception output',
'code': {'am_type': 'gcf',
'geni_code': <exception code>},
'value': ''}
"""
class ApiErrorException(Exception):
def __init__(self, code, output):
self.code = code
self.output = output
def __str__(self):
return "ApiError(%r, %r)" % (self.code, self.output)
|
Rework the patch to handle ApiErrorException in a generic way in AMMethodContext. Create a specific file for ApiErrorException so as to be able to import it from am3.py and am_method_context.py.#----------------------------------------------------------------------
# Copyright (c) 2012-2015 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
"""
When AggregateManager delegate's raise that type of exception, the result of
the current call will be a well formatted result, with a message as in
{ 'output': 'exception output',
'code': {'am_type': 'gcf',
'geni_code': <exception code>},
'value': ''}
"""
class ApiErrorException(Exception):
def __init__(self, code, output):
self.code = code
self.output = output
def __str__(self):
return "ApiError(%r, %r)" % (self.code, self.output)
|
<commit_before><commit_msg>Rework the patch to handle ApiErrorException in a generic way in AMMethodContext. Create a specific file for ApiErrorException so as to be able to import it from am3.py and am_method_context.py.<commit_after>#----------------------------------------------------------------------
# Copyright (c) 2012-2015 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
"""
When AggregateManager delegate's raise that type of exception, the result of
the current call will be a well formatted result, with a message as in
{ 'output': 'exception output',
'code': {'am_type': 'gcf',
'geni_code': <exception code>},
'value': ''}
"""
class ApiErrorException(Exception):
def __init__(self, code, output):
self.code = code
self.output = output
def __str__(self):
return "ApiError(%r, %r)" % (self.code, self.output)
|
|
00ec33f227feabbfc97230048d56ef0966f2ace5
|
test/python/tests/test_conversion.py
|
test/python/tests/test_conversion.py
|
import util
import sys
class test_conversion:
def init(self):
yield "a = M.arange(1);"
def test_float(self, cmd):
cmd += "res = float(a);"
return cmd
def test_int(self, cmd):
cmd += "res = int(a);"
return cmd
if sys.version_info[0] < 3:
def test_oct(self, cmd):
cmd += "res = oct(a);"
return cmd
def test_hex(self, cmd):
cmd += "res = hex(a);"
return cmd
def test_long(self, cmd):
cmd += "res = long(a);"
return cmd
|
Test conversion (e.g. float()) works
|
Test conversion (e.g. float()) works
|
Python
|
apache-2.0
|
madsbk/bohrium,bh107/bohrium,madsbk/bohrium,madsbk/bohrium,bh107/bohrium,madsbk/bohrium,bh107/bohrium,bh107/bohrium
|
Test conversion (e.g. float()) works
|
import util
import sys
class test_conversion:
def init(self):
yield "a = M.arange(1);"
def test_float(self, cmd):
cmd += "res = float(a);"
return cmd
def test_int(self, cmd):
cmd += "res = int(a);"
return cmd
if sys.version_info[0] < 3:
def test_oct(self, cmd):
cmd += "res = oct(a);"
return cmd
def test_hex(self, cmd):
cmd += "res = hex(a);"
return cmd
def test_long(self, cmd):
cmd += "res = long(a);"
return cmd
|
<commit_before><commit_msg>Test conversion (e.g. float()) works<commit_after>
|
import util
import sys
class test_conversion:
def init(self):
yield "a = M.arange(1);"
def test_float(self, cmd):
cmd += "res = float(a);"
return cmd
def test_int(self, cmd):
cmd += "res = int(a);"
return cmd
if sys.version_info[0] < 3:
def test_oct(self, cmd):
cmd += "res = oct(a);"
return cmd
def test_hex(self, cmd):
cmd += "res = hex(a);"
return cmd
def test_long(self, cmd):
cmd += "res = long(a);"
return cmd
|
Test conversion (e.g. float()) worksimport util
import sys
class test_conversion:
def init(self):
yield "a = M.arange(1);"
def test_float(self, cmd):
cmd += "res = float(a);"
return cmd
def test_int(self, cmd):
cmd += "res = int(a);"
return cmd
if sys.version_info[0] < 3:
def test_oct(self, cmd):
cmd += "res = oct(a);"
return cmd
def test_hex(self, cmd):
cmd += "res = hex(a);"
return cmd
def test_long(self, cmd):
cmd += "res = long(a);"
return cmd
|
<commit_before><commit_msg>Test conversion (e.g. float()) works<commit_after>import util
import sys
class test_conversion:
def init(self):
yield "a = M.arange(1);"
def test_float(self, cmd):
cmd += "res = float(a);"
return cmd
def test_int(self, cmd):
cmd += "res = int(a);"
return cmd
if sys.version_info[0] < 3:
def test_oct(self, cmd):
cmd += "res = oct(a);"
return cmd
def test_hex(self, cmd):
cmd += "res = hex(a);"
return cmd
def test_long(self, cmd):
cmd += "res = long(a);"
return cmd
|
|
b6b57d4d693b1194b3606eeb9146a89dae435b6e
|
indra/databases/pubmed_client.py
|
indra/databases/pubmed_client.py
|
import urllib, urllib2
import xml.etree.ElementTree as ET
pubmed_search = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'
pubmed_fetch = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'
def send_request(url, data):
try:
req = urllib2.Request(url, data)
res = urllib2.urlopen(req)
xml_str = res.read()
tree = ET.fromstring(xml_str)
except:
return None
return tree
def get_ids(search_term, retmax=1000):
params = {'db': 'pubmed',
'term': search_term,
'retstart': 0,
'retmax': retmax}
tree = send_request(pubmed_search, urllib.urlencode(params))
if tree is None:
return []
count = int(tree.find('Count').text)
id_terms = tree.findall('IdList/Id')
if id_terms is None:
return []
ids = [idt.text for idt in id_terms]
if count != len(ids):
print 'Not all ids were retreived, limited at %d.' % retmax
return ids
def get_abstract(pubmed_id):
params = {'db': 'pubmed',
'retmode': 'xml',
'rettype': 'abstract',
'id': pubmed_id}
tree = send_request(pubmed_fetch, urllib.urlencode(params))
if tree is None:
return None
article = tree.find('PubmedArticle/MedlineCitation/Article')
if article is None:
return None
abstract = article.findall('Abstract/AbstractText')
if abstract is None:
return None
else:
abstract_text = ' '.join([' ' if abst.text is None
else abst.text for abst in abstract])
return abstract_text
|
Add API for searching all PubMed abstracts and downloading the abstract text.
|
Add API for searching all PubMed abstracts and downloading
the abstract text.
|
Python
|
bsd-2-clause
|
johnbachman/indra,sorgerlab/indra,jmuhlich/indra,pvtodorov/indra,johnbachman/belpy,johnbachman/indra,bgyori/indra,johnbachman/indra,sorgerlab/belpy,jmuhlich/indra,sorgerlab/belpy,sorgerlab/indra,pvtodorov/indra,jmuhlich/indra,pvtodorov/indra,pvtodorov/indra,bgyori/indra,sorgerlab/indra,johnbachman/belpy,sorgerlab/belpy,bgyori/indra,johnbachman/belpy
|
Add API for searching all PubMed abstracts and downloading
the abstract text.
|
import urllib, urllib2
import xml.etree.ElementTree as ET
pubmed_search = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'
pubmed_fetch = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'
def send_request(url, data):
try:
req = urllib2.Request(url, data)
res = urllib2.urlopen(req)
xml_str = res.read()
tree = ET.fromstring(xml_str)
except:
return None
return tree
def get_ids(search_term, retmax=1000):
params = {'db': 'pubmed',
'term': search_term,
'retstart': 0,
'retmax': retmax}
tree = send_request(pubmed_search, urllib.urlencode(params))
if tree is None:
return []
count = int(tree.find('Count').text)
id_terms = tree.findall('IdList/Id')
if id_terms is None:
return []
ids = [idt.text for idt in id_terms]
if count != len(ids):
print 'Not all ids were retreived, limited at %d.' % retmax
return ids
def get_abstract(pubmed_id):
params = {'db': 'pubmed',
'retmode': 'xml',
'rettype': 'abstract',
'id': pubmed_id}
tree = send_request(pubmed_fetch, urllib.urlencode(params))
if tree is None:
return None
article = tree.find('PubmedArticle/MedlineCitation/Article')
if article is None:
return None
abstract = article.findall('Abstract/AbstractText')
if abstract is None:
return None
else:
abstract_text = ' '.join([' ' if abst.text is None
else abst.text for abst in abstract])
return abstract_text
|
<commit_before><commit_msg>Add API for searching all PubMed abstracts and downloading
the abstract text.<commit_after>
|
import urllib, urllib2
import xml.etree.ElementTree as ET
pubmed_search = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'
pubmed_fetch = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'
def send_request(url, data):
try:
req = urllib2.Request(url, data)
res = urllib2.urlopen(req)
xml_str = res.read()
tree = ET.fromstring(xml_str)
except:
return None
return tree
def get_ids(search_term, retmax=1000):
params = {'db': 'pubmed',
'term': search_term,
'retstart': 0,
'retmax': retmax}
tree = send_request(pubmed_search, urllib.urlencode(params))
if tree is None:
return []
count = int(tree.find('Count').text)
id_terms = tree.findall('IdList/Id')
if id_terms is None:
return []
ids = [idt.text for idt in id_terms]
if count != len(ids):
print 'Not all ids were retreived, limited at %d.' % retmax
return ids
def get_abstract(pubmed_id):
params = {'db': 'pubmed',
'retmode': 'xml',
'rettype': 'abstract',
'id': pubmed_id}
tree = send_request(pubmed_fetch, urllib.urlencode(params))
if tree is None:
return None
article = tree.find('PubmedArticle/MedlineCitation/Article')
if article is None:
return None
abstract = article.findall('Abstract/AbstractText')
if abstract is None:
return None
else:
abstract_text = ' '.join([' ' if abst.text is None
else abst.text for abst in abstract])
return abstract_text
|
Add API for searching all PubMed abstracts and downloading
the abstract text.import urllib, urllib2
import xml.etree.ElementTree as ET
pubmed_search = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'
pubmed_fetch = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'
def send_request(url, data):
try:
req = urllib2.Request(url, data)
res = urllib2.urlopen(req)
xml_str = res.read()
tree = ET.fromstring(xml_str)
except:
return None
return tree
def get_ids(search_term, retmax=1000):
params = {'db': 'pubmed',
'term': search_term,
'retstart': 0,
'retmax': retmax}
tree = send_request(pubmed_search, urllib.urlencode(params))
if tree is None:
return []
count = int(tree.find('Count').text)
id_terms = tree.findall('IdList/Id')
if id_terms is None:
return []
ids = [idt.text for idt in id_terms]
if count != len(ids):
print 'Not all ids were retreived, limited at %d.' % retmax
return ids
def get_abstract(pubmed_id):
params = {'db': 'pubmed',
'retmode': 'xml',
'rettype': 'abstract',
'id': pubmed_id}
tree = send_request(pubmed_fetch, urllib.urlencode(params))
if tree is None:
return None
article = tree.find('PubmedArticle/MedlineCitation/Article')
if article is None:
return None
abstract = article.findall('Abstract/AbstractText')
if abstract is None:
return None
else:
abstract_text = ' '.join([' ' if abst.text is None
else abst.text for abst in abstract])
return abstract_text
|
<commit_before><commit_msg>Add API for searching all PubMed abstracts and downloading
the abstract text.<commit_after>import urllib, urllib2
import xml.etree.ElementTree as ET
pubmed_search = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'
pubmed_fetch = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'
def send_request(url, data):
try:
req = urllib2.Request(url, data)
res = urllib2.urlopen(req)
xml_str = res.read()
tree = ET.fromstring(xml_str)
except:
return None
return tree
def get_ids(search_term, retmax=1000):
params = {'db': 'pubmed',
'term': search_term,
'retstart': 0,
'retmax': retmax}
tree = send_request(pubmed_search, urllib.urlencode(params))
if tree is None:
return []
count = int(tree.find('Count').text)
id_terms = tree.findall('IdList/Id')
if id_terms is None:
return []
ids = [idt.text for idt in id_terms]
if count != len(ids):
print 'Not all ids were retreived, limited at %d.' % retmax
return ids
def get_abstract(pubmed_id):
params = {'db': 'pubmed',
'retmode': 'xml',
'rettype': 'abstract',
'id': pubmed_id}
tree = send_request(pubmed_fetch, urllib.urlencode(params))
if tree is None:
return None
article = tree.find('PubmedArticle/MedlineCitation/Article')
if article is None:
return None
abstract = article.findall('Abstract/AbstractText')
if abstract is None:
return None
else:
abstract_text = ' '.join([' ' if abst.text is None
else abst.text for abst in abstract])
return abstract_text
|
|
52e9c35d245184c254e088a52ba6c12f69313137
|
app/timetables/migrations/0008_auto_20160913_2203.py
|
app/timetables/migrations/0008_auto_20160913_2203.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-13 22:03
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('timetables', '0007_auto_20160908_1221'),
]
operations = [
migrations.CreateModel(
name='Admin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_super', models.BooleanField()),
('timetable', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='timetables.Timetable')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='timetable',
name='admins',
field=models.ManyToManyField(through='timetables.Admin', to=settings.AUTH_USER_MODEL),
),
]
|
Make migration file for Timetables administratorship
|
Make migration file for Timetables administratorship
|
Python
|
mit
|
teamtaverna/core
|
Make migration file for Timetables administratorship
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-13 22:03
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('timetables', '0007_auto_20160908_1221'),
]
operations = [
migrations.CreateModel(
name='Admin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_super', models.BooleanField()),
('timetable', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='timetables.Timetable')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='timetable',
name='admins',
field=models.ManyToManyField(through='timetables.Admin', to=settings.AUTH_USER_MODEL),
),
]
|
<commit_before><commit_msg>Make migration file for Timetables administratorship<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-13 22:03
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('timetables', '0007_auto_20160908_1221'),
]
operations = [
migrations.CreateModel(
name='Admin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_super', models.BooleanField()),
('timetable', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='timetables.Timetable')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='timetable',
name='admins',
field=models.ManyToManyField(through='timetables.Admin', to=settings.AUTH_USER_MODEL),
),
]
|
Make migration file for Timetables administratorship# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-13 22:03
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('timetables', '0007_auto_20160908_1221'),
]
operations = [
migrations.CreateModel(
name='Admin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_super', models.BooleanField()),
('timetable', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='timetables.Timetable')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='timetable',
name='admins',
field=models.ManyToManyField(through='timetables.Admin', to=settings.AUTH_USER_MODEL),
),
]
|
<commit_before><commit_msg>Make migration file for Timetables administratorship<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-13 22:03
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('timetables', '0007_auto_20160908_1221'),
]
operations = [
migrations.CreateModel(
name='Admin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_super', models.BooleanField()),
('timetable', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='timetables.Timetable')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='timetable',
name='admins',
field=models.ManyToManyField(through='timetables.Admin', to=settings.AUTH_USER_MODEL),
),
]
|
|
5702b080695818273463667045d9622ad9b02694
|
apps/challenge/migrations/0051_auto_20181019_1411.py
|
apps/challenge/migrations/0051_auto_20181019_1411.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-19 12:11
from __future__ import unicode_literals
from django.db import migrations
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('challenge', '0050_historicalhelperseasonworkwish_historicalhelpersessionavailability_historicalqualification_historica'),
]
operations = [
migrations.AlterField(
model_name='historicalseason',
name='cantons',
field=multiselectfield.db.fields.MultiSelectField(choices=[('AR', 'Appenzell Ausserrhoden'), ('BE', 'Berne'), ('BS', 'Basel-Stadt'), ('FR', 'Fribourg'), ('GE', 'Geneva'), ('JU', 'Jura'), ('LU', 'Lucerne'), ('NE', 'Neuchatel'), ('SG', 'St. Gallen'), ('VD', 'Vaud'), ('VS', 'Valais'), ('WS', 'Haut-Valais'), ('ZH', 'Zurich')], max_length=38, verbose_name='Cantons'),
),
]
|
Add missing migration for some reason
|
Add missing migration for some reason
|
Python
|
agpl-3.0
|
defivelo/db,defivelo/db,defivelo/db
|
Add missing migration for some reason
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-19 12:11
from __future__ import unicode_literals
from django.db import migrations
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('challenge', '0050_historicalhelperseasonworkwish_historicalhelpersessionavailability_historicalqualification_historica'),
]
operations = [
migrations.AlterField(
model_name='historicalseason',
name='cantons',
field=multiselectfield.db.fields.MultiSelectField(choices=[('AR', 'Appenzell Ausserrhoden'), ('BE', 'Berne'), ('BS', 'Basel-Stadt'), ('FR', 'Fribourg'), ('GE', 'Geneva'), ('JU', 'Jura'), ('LU', 'Lucerne'), ('NE', 'Neuchatel'), ('SG', 'St. Gallen'), ('VD', 'Vaud'), ('VS', 'Valais'), ('WS', 'Haut-Valais'), ('ZH', 'Zurich')], max_length=38, verbose_name='Cantons'),
),
]
|
<commit_before><commit_msg>Add missing migration for some reason<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-19 12:11
from __future__ import unicode_literals
from django.db import migrations
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('challenge', '0050_historicalhelperseasonworkwish_historicalhelpersessionavailability_historicalqualification_historica'),
]
operations = [
migrations.AlterField(
model_name='historicalseason',
name='cantons',
field=multiselectfield.db.fields.MultiSelectField(choices=[('AR', 'Appenzell Ausserrhoden'), ('BE', 'Berne'), ('BS', 'Basel-Stadt'), ('FR', 'Fribourg'), ('GE', 'Geneva'), ('JU', 'Jura'), ('LU', 'Lucerne'), ('NE', 'Neuchatel'), ('SG', 'St. Gallen'), ('VD', 'Vaud'), ('VS', 'Valais'), ('WS', 'Haut-Valais'), ('ZH', 'Zurich')], max_length=38, verbose_name='Cantons'),
),
]
|
Add missing migration for some reason# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-19 12:11
from __future__ import unicode_literals
from django.db import migrations
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('challenge', '0050_historicalhelperseasonworkwish_historicalhelpersessionavailability_historicalqualification_historica'),
]
operations = [
migrations.AlterField(
model_name='historicalseason',
name='cantons',
field=multiselectfield.db.fields.MultiSelectField(choices=[('AR', 'Appenzell Ausserrhoden'), ('BE', 'Berne'), ('BS', 'Basel-Stadt'), ('FR', 'Fribourg'), ('GE', 'Geneva'), ('JU', 'Jura'), ('LU', 'Lucerne'), ('NE', 'Neuchatel'), ('SG', 'St. Gallen'), ('VD', 'Vaud'), ('VS', 'Valais'), ('WS', 'Haut-Valais'), ('ZH', 'Zurich')], max_length=38, verbose_name='Cantons'),
),
]
|
<commit_before><commit_msg>Add missing migration for some reason<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-19 12:11
from __future__ import unicode_literals
from django.db import migrations
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('challenge', '0050_historicalhelperseasonworkwish_historicalhelpersessionavailability_historicalqualification_historica'),
]
operations = [
migrations.AlterField(
model_name='historicalseason',
name='cantons',
field=multiselectfield.db.fields.MultiSelectField(choices=[('AR', 'Appenzell Ausserrhoden'), ('BE', 'Berne'), ('BS', 'Basel-Stadt'), ('FR', 'Fribourg'), ('GE', 'Geneva'), ('JU', 'Jura'), ('LU', 'Lucerne'), ('NE', 'Neuchatel'), ('SG', 'St. Gallen'), ('VD', 'Vaud'), ('VS', 'Valais'), ('WS', 'Haut-Valais'), ('ZH', 'Zurich')], max_length=38, verbose_name='Cantons'),
),
]
|
|
ff9d6bc72673843fcdf6f7e0d866beec5bdb45f0
|
mezzanine/accounts/models.py
|
mezzanine/accounts/models.py
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from mezzanine.utils.models import get_user_model
from mezzanine.accounts import get_profile_model, get_profile_user_fieldname
# Signal for ensuring users have a profile instance.
Profile = get_profile_model()
User = get_user_model()
if Profile:
user_field = get_profile_user_fieldname()
@receiver(post_save, sender=User)
def user_saved(sender=None, instance=None, **kwargs):
Profile.objects.get_or_create(**{str(user_field): instance})
|
from django.db import connection
from django.db.models.signals import post_save
from django.db.utils import DatabaseError
from django.dispatch import receiver
from mezzanine.utils.models import get_user_model
from mezzanine.accounts import get_profile_model, get_profile_user_fieldname
# Signal for ensuring users have a profile instance.
Profile = get_profile_model()
User = get_user_model()
if Profile:
user_field = get_profile_user_fieldname()
@receiver(post_save, sender=User)
def user_saved(sender=None, instance=None, **kwargs):
try:
Profile.objects.get_or_create(**{str(user_field): instance})
except DatabaseError:
# User creation in initial syncdb may have been triggered,
# while profile model is under migration management and
# doesn't exist yet. We close the connection so that it
# gets re-opened, allowing syncdb to continue and complete.
connection.close()
|
Allow initial user creation in syncdb when a profile model is managed by migrations and doesn't yet exist.
|
Allow initial user creation in syncdb when a profile model is managed by migrations and doesn't yet exist.
|
Python
|
bsd-2-clause
|
jjz/mezzanine,cccs-web/mezzanine,stephenmcd/mezzanine,Kniyl/mezzanine,gradel/mezzanine,mush42/mezzanine,dekomote/mezzanine-modeltranslation-backport,eino-makitalo/mezzanine,dsanders11/mezzanine,cccs-web/mezzanine,christianwgd/mezzanine,scarcry/snm-mezzanine,theclanks/mezzanine,webounty/mezzanine,dovydas/mezzanine,nikolas/mezzanine,douglaskastle/mezzanine,readevalprint/mezzanine,dovydas/mezzanine,emile2016/mezzanine,ZeroXn/mezzanine,vladir/mezzanine,damnfine/mezzanine,vladir/mezzanine,damnfine/mezzanine,stbarnabas/mezzanine,frankchin/mezzanine,stephenmcd/mezzanine,saintbird/mezzanine,Cajoline/mezzanine,Kniyl/mezzanine,wyzex/mezzanine,jjz/mezzanine,readevalprint/mezzanine,viaregio/mezzanine,jerivas/mezzanine,promil23/mezzanine,jerivas/mezzanine,spookylukey/mezzanine,viaregio/mezzanine,nikolas/mezzanine,douglaskastle/mezzanine,Kniyl/mezzanine,sjuxax/mezzanine,webounty/mezzanine,promil23/mezzanine,PegasusWang/mezzanine,biomassives/mezzanine,scarcry/snm-mezzanine,PegasusWang/mezzanine,SoLoHiC/mezzanine,nikolas/mezzanine,agepoly/mezzanine,fusionbox/mezzanine,frankier/mezzanine,PegasusWang/mezzanine,fusionbox/mezzanine,geodesign/mezzanine,sjdines/mezzanine,batpad/mezzanine,AlexHill/mezzanine,scarcry/snm-mezzanine,molokov/mezzanine,viaregio/mezzanine,dekomote/mezzanine-modeltranslation-backport,wbtuomela/mezzanine,promil23/mezzanine,wbtuomela/mezzanine,eino-makitalo/mezzanine,wyzex/mezzanine,saintbird/mezzanine,stephenmcd/mezzanine,joshcartme/mezzanine,gradel/mezzanine,wyzex/mezzanine,adrian-the-git/mezzanine,emile2016/mezzanine,geodesign/mezzanine,frankier/mezzanine,dovydas/mezzanine,industrydive/mezzanine,emile2016/mezzanine,stbarnabas/mezzanine,agepoly/mezzanine,SoLoHiC/mezzanine,readevalprint/mezzanine,SoLoHiC/mezzanine,dustinrb/mezzanine,biomassives/mezzanine,christianwgd/mezzanine,tuxinhang1989/mezzanine,theclanks/mezzanine,sjdines/mezzanine,tuxinhang1989/mezzanine,webounty/mezzanine,Cajoline/mezzanine,dsanders11/mezzanine,jjz/mezzanine,Skytorn86/mezzanine,ryneeverett/mezzanine,joshcartme/mezzanine,Skytorn86/mezzanine,biomassives/mezzanine,agepoly/mezzanine,spookylukey/mezzanine,ZeroXn/mezzanine,spookylukey/mezzanine,industrydive/mezzanine,joshcartme/mezzanine,eino-makitalo/mezzanine,mush42/mezzanine,christianwgd/mezzanine,ryneeverett/mezzanine,jerivas/mezzanine,dustinrb/mezzanine,theclanks/mezzanine,sjdines/mezzanine,vladir/mezzanine,Cicero-Zhao/mezzanine,adrian-the-git/mezzanine,Cicero-Zhao/mezzanine,industrydive/mezzanine,gradel/mezzanine,dekomote/mezzanine-modeltranslation-backport,sjuxax/mezzanine,douglaskastle/mezzanine,adrian-the-git/mezzanine,AlexHill/mezzanine,damnfine/mezzanine,Cajoline/mezzanine,frankchin/mezzanine,molokov/mezzanine,dsanders11/mezzanine,ryneeverett/mezzanine,tuxinhang1989/mezzanine,wbtuomela/mezzanine,ZeroXn/mezzanine,Skytorn86/mezzanine,batpad/mezzanine,mush42/mezzanine,frankchin/mezzanine,saintbird/mezzanine,sjuxax/mezzanine,frankier/mezzanine,molokov/mezzanine,geodesign/mezzanine,dustinrb/mezzanine
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from mezzanine.utils.models import get_user_model
from mezzanine.accounts import get_profile_model, get_profile_user_fieldname
# Signal for ensuring users have a profile instance.
Profile = get_profile_model()
User = get_user_model()
if Profile:
user_field = get_profile_user_fieldname()
@receiver(post_save, sender=User)
def user_saved(sender=None, instance=None, **kwargs):
Profile.objects.get_or_create(**{str(user_field): instance})
Allow initial user creation in syncdb when a profile model is managed by migrations and doesn't yet exist.
|
from django.db import connection
from django.db.models.signals import post_save
from django.db.utils import DatabaseError
from django.dispatch import receiver
from mezzanine.utils.models import get_user_model
from mezzanine.accounts import get_profile_model, get_profile_user_fieldname
# Signal for ensuring users have a profile instance.
Profile = get_profile_model()
User = get_user_model()
if Profile:
user_field = get_profile_user_fieldname()
@receiver(post_save, sender=User)
def user_saved(sender=None, instance=None, **kwargs):
try:
Profile.objects.get_or_create(**{str(user_field): instance})
except DatabaseError:
# User creation in initial syncdb may have been triggered,
# while profile model is under migration management and
# doesn't exist yet. We close the connection so that it
# gets re-opened, allowing syncdb to continue and complete.
connection.close()
|
<commit_before>from django.db.models.signals import post_save
from django.dispatch import receiver
from mezzanine.utils.models import get_user_model
from mezzanine.accounts import get_profile_model, get_profile_user_fieldname
# Signal for ensuring users have a profile instance.
Profile = get_profile_model()
User = get_user_model()
if Profile:
user_field = get_profile_user_fieldname()
@receiver(post_save, sender=User)
def user_saved(sender=None, instance=None, **kwargs):
Profile.objects.get_or_create(**{str(user_field): instance})
<commit_msg>Allow initial user creation in syncdb when a profile model is managed by migrations and doesn't yet exist.<commit_after>
|
from django.db import connection
from django.db.models.signals import post_save
from django.db.utils import DatabaseError
from django.dispatch import receiver
from mezzanine.utils.models import get_user_model
from mezzanine.accounts import get_profile_model, get_profile_user_fieldname
# Signal for ensuring users have a profile instance.
Profile = get_profile_model()
User = get_user_model()
if Profile:
user_field = get_profile_user_fieldname()
@receiver(post_save, sender=User)
def user_saved(sender=None, instance=None, **kwargs):
try:
Profile.objects.get_or_create(**{str(user_field): instance})
except DatabaseError:
# User creation in initial syncdb may have been triggered,
# while profile model is under migration management and
# doesn't exist yet. We close the connection so that it
# gets re-opened, allowing syncdb to continue and complete.
connection.close()
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from mezzanine.utils.models import get_user_model
from mezzanine.accounts import get_profile_model, get_profile_user_fieldname
# Signal for ensuring users have a profile instance.
Profile = get_profile_model()
User = get_user_model()
if Profile:
user_field = get_profile_user_fieldname()
@receiver(post_save, sender=User)
def user_saved(sender=None, instance=None, **kwargs):
Profile.objects.get_or_create(**{str(user_field): instance})
Allow initial user creation in syncdb when a profile model is managed by migrations and doesn't yet exist.
from django.db import connection
from django.db.models.signals import post_save
from django.db.utils import DatabaseError
from django.dispatch import receiver
from mezzanine.utils.models import get_user_model
from mezzanine.accounts import get_profile_model, get_profile_user_fieldname
# Signal for ensuring users have a profile instance.
Profile = get_profile_model()
User = get_user_model()
if Profile:
user_field = get_profile_user_fieldname()
@receiver(post_save, sender=User)
def user_saved(sender=None, instance=None, **kwargs):
try:
Profile.objects.get_or_create(**{str(user_field): instance})
except DatabaseError:
# User creation in initial syncdb may have been triggered,
# while profile model is under migration management and
# doesn't exist yet. We close the connection so that it
# gets re-opened, allowing syncdb to continue and complete.
connection.close()
|
<commit_before>from django.db.models.signals import post_save
from django.dispatch import receiver
from mezzanine.utils.models import get_user_model
from mezzanine.accounts import get_profile_model, get_profile_user_fieldname
# Signal for ensuring users have a profile instance.
Profile = get_profile_model()
User = get_user_model()
if Profile:
user_field = get_profile_user_fieldname()
@receiver(post_save, sender=User)
def user_saved(sender=None, instance=None, **kwargs):
Profile.objects.get_or_create(**{str(user_field): instance})
<commit_msg>Allow initial user creation in syncdb when a profile model is managed by migrations and doesn't yet exist.<commit_after>
from django.db import connection
from django.db.models.signals import post_save
from django.db.utils import DatabaseError
from django.dispatch import receiver
from mezzanine.utils.models import get_user_model
from mezzanine.accounts import get_profile_model, get_profile_user_fieldname
# Signal for ensuring users have a profile instance.
Profile = get_profile_model()
User = get_user_model()
if Profile:
user_field = get_profile_user_fieldname()
@receiver(post_save, sender=User)
def user_saved(sender=None, instance=None, **kwargs):
try:
Profile.objects.get_or_create(**{str(user_field): instance})
except DatabaseError:
# User creation in initial syncdb may have been triggered,
# while profile model is under migration management and
# doesn't exist yet. We close the connection so that it
# gets re-opened, allowing syncdb to continue and complete.
connection.close()
|
f0f9df5020bfec920911ae1fecb2d480dbf8e3d5
|
prismriver/test/test_preprocessor.py
|
prismriver/test/test_preprocessor.py
|
from prismriver import preprocessor
test_cases = [('Artist', 'Title', 'Artist', 'Title'),
(' Artist', ' Title', 'Artist', 'Title'),
('Artist ', 'Title ', 'Artist', 'Title'),
(' Artist ', ' Title ', 'Artist', 'Title')]
def test():
for params in test_cases:
yield check_em, params[0], params[1], params[2], params[3]
def check_em(artist: str, title: str, clean_artist: str, clean_title: str):
(processed_artist, processed_title) = preprocessor.apply(artist, title, ['trim'])
assert processed_artist == clean_artist
assert processed_title == clean_title
|
Add test case for preprocessor
|
Add test case for preprocessor
|
Python
|
mit
|
anlar/prismriver-lyrics,anlar/prismriver-lyrics,anlar/prismriver,anlar/prismriver
|
Add test case for preprocessor
|
from prismriver import preprocessor
test_cases = [('Artist', 'Title', 'Artist', 'Title'),
(' Artist', ' Title', 'Artist', 'Title'),
('Artist ', 'Title ', 'Artist', 'Title'),
(' Artist ', ' Title ', 'Artist', 'Title')]
def test():
for params in test_cases:
yield check_em, params[0], params[1], params[2], params[3]
def check_em(artist: str, title: str, clean_artist: str, clean_title: str):
(processed_artist, processed_title) = preprocessor.apply(artist, title, ['trim'])
assert processed_artist == clean_artist
assert processed_title == clean_title
|
<commit_before><commit_msg>Add test case for preprocessor<commit_after>
|
from prismriver import preprocessor
test_cases = [('Artist', 'Title', 'Artist', 'Title'),
(' Artist', ' Title', 'Artist', 'Title'),
('Artist ', 'Title ', 'Artist', 'Title'),
(' Artist ', ' Title ', 'Artist', 'Title')]
def test():
for params in test_cases:
yield check_em, params[0], params[1], params[2], params[3]
def check_em(artist: str, title: str, clean_artist: str, clean_title: str):
(processed_artist, processed_title) = preprocessor.apply(artist, title, ['trim'])
assert processed_artist == clean_artist
assert processed_title == clean_title
|
Add test case for preprocessorfrom prismriver import preprocessor
test_cases = [('Artist', 'Title', 'Artist', 'Title'),
(' Artist', ' Title', 'Artist', 'Title'),
('Artist ', 'Title ', 'Artist', 'Title'),
(' Artist ', ' Title ', 'Artist', 'Title')]
def test():
for params in test_cases:
yield check_em, params[0], params[1], params[2], params[3]
def check_em(artist: str, title: str, clean_artist: str, clean_title: str):
(processed_artist, processed_title) = preprocessor.apply(artist, title, ['trim'])
assert processed_artist == clean_artist
assert processed_title == clean_title
|
<commit_before><commit_msg>Add test case for preprocessor<commit_after>from prismriver import preprocessor
test_cases = [('Artist', 'Title', 'Artist', 'Title'),
(' Artist', ' Title', 'Artist', 'Title'),
('Artist ', 'Title ', 'Artist', 'Title'),
(' Artist ', ' Title ', 'Artist', 'Title')]
def test():
for params in test_cases:
yield check_em, params[0], params[1], params[2], params[3]
def check_em(artist: str, title: str, clean_artist: str, clean_title: str):
(processed_artist, processed_title) = preprocessor.apply(artist, title, ['trim'])
assert processed_artist == clean_artist
assert processed_title == clean_title
|
|
3500d3fc7aa8135dec16f3fbd63834a5e5e6958f
|
nuts/test/test_Evaluation.py
|
nuts/test/test_Evaluation.py
|
import pytest
from src.data.Evaluation import Evaluation
from src.data.EvaluationResult import EvaluationResult
@pytest.fixture
def example_evaluation():
evaluation = Evaluation("expected_result", "=")
evaluation.evaluation_results.append(EvaluationResult("minion", "actual_result", True))
evaluation.evaluation_results.append(EvaluationResult("minion2", "actual_result", True))
evaluation.evaluation_results.append(EvaluationResult("minion3", "actual_result", True))
return evaluation
class TestEvaluation:
def test_result_true(self, example_evaluation):
assert example_evaluation.result() is True
def test_first_result_false(self, example_evaluation):
example_evaluation.evaluation_results[0].passed = False
assert example_evaluation.result() is False
def test_middle_result_false(self, example_evaluation):
example_evaluation.evaluation_results[1].passed = False
assert example_evaluation.result() is False
def test_last_result_false(self, example_evaluation):
example_evaluation.evaluation_results[-1].passed = False
assert example_evaluation.result() is False
def test_result_empty(self):
evaluation = Evaluation("expected_result", "=")
assert evaluation.result() is True
|
Add basic tests for Evaluation class
|
Add basic tests for Evaluation class
|
Python
|
mit
|
HSRNetwork/Nuts
|
Add basic tests for Evaluation class
|
import pytest
from src.data.Evaluation import Evaluation
from src.data.EvaluationResult import EvaluationResult
@pytest.fixture
def example_evaluation():
evaluation = Evaluation("expected_result", "=")
evaluation.evaluation_results.append(EvaluationResult("minion", "actual_result", True))
evaluation.evaluation_results.append(EvaluationResult("minion2", "actual_result", True))
evaluation.evaluation_results.append(EvaluationResult("minion3", "actual_result", True))
return evaluation
class TestEvaluation:
def test_result_true(self, example_evaluation):
assert example_evaluation.result() is True
def test_first_result_false(self, example_evaluation):
example_evaluation.evaluation_results[0].passed = False
assert example_evaluation.result() is False
def test_middle_result_false(self, example_evaluation):
example_evaluation.evaluation_results[1].passed = False
assert example_evaluation.result() is False
def test_last_result_false(self, example_evaluation):
example_evaluation.evaluation_results[-1].passed = False
assert example_evaluation.result() is False
def test_result_empty(self):
evaluation = Evaluation("expected_result", "=")
assert evaluation.result() is True
|
<commit_before><commit_msg>Add basic tests for Evaluation class<commit_after>
|
import pytest
from src.data.Evaluation import Evaluation
from src.data.EvaluationResult import EvaluationResult
@pytest.fixture
def example_evaluation():
evaluation = Evaluation("expected_result", "=")
evaluation.evaluation_results.append(EvaluationResult("minion", "actual_result", True))
evaluation.evaluation_results.append(EvaluationResult("minion2", "actual_result", True))
evaluation.evaluation_results.append(EvaluationResult("minion3", "actual_result", True))
return evaluation
class TestEvaluation:
def test_result_true(self, example_evaluation):
assert example_evaluation.result() is True
def test_first_result_false(self, example_evaluation):
example_evaluation.evaluation_results[0].passed = False
assert example_evaluation.result() is False
def test_middle_result_false(self, example_evaluation):
example_evaluation.evaluation_results[1].passed = False
assert example_evaluation.result() is False
def test_last_result_false(self, example_evaluation):
example_evaluation.evaluation_results[-1].passed = False
assert example_evaluation.result() is False
def test_result_empty(self):
evaluation = Evaluation("expected_result", "=")
assert evaluation.result() is True
|
Add basic tests for Evaluation classimport pytest
from src.data.Evaluation import Evaluation
from src.data.EvaluationResult import EvaluationResult
@pytest.fixture
def example_evaluation():
evaluation = Evaluation("expected_result", "=")
evaluation.evaluation_results.append(EvaluationResult("minion", "actual_result", True))
evaluation.evaluation_results.append(EvaluationResult("minion2", "actual_result", True))
evaluation.evaluation_results.append(EvaluationResult("minion3", "actual_result", True))
return evaluation
class TestEvaluation:
def test_result_true(self, example_evaluation):
assert example_evaluation.result() is True
def test_first_result_false(self, example_evaluation):
example_evaluation.evaluation_results[0].passed = False
assert example_evaluation.result() is False
def test_middle_result_false(self, example_evaluation):
example_evaluation.evaluation_results[1].passed = False
assert example_evaluation.result() is False
def test_last_result_false(self, example_evaluation):
example_evaluation.evaluation_results[-1].passed = False
assert example_evaluation.result() is False
def test_result_empty(self):
evaluation = Evaluation("expected_result", "=")
assert evaluation.result() is True
|
<commit_before><commit_msg>Add basic tests for Evaluation class<commit_after>import pytest
from src.data.Evaluation import Evaluation
from src.data.EvaluationResult import EvaluationResult
@pytest.fixture
def example_evaluation():
evaluation = Evaluation("expected_result", "=")
evaluation.evaluation_results.append(EvaluationResult("minion", "actual_result", True))
evaluation.evaluation_results.append(EvaluationResult("minion2", "actual_result", True))
evaluation.evaluation_results.append(EvaluationResult("minion3", "actual_result", True))
return evaluation
class TestEvaluation:
def test_result_true(self, example_evaluation):
assert example_evaluation.result() is True
def test_first_result_false(self, example_evaluation):
example_evaluation.evaluation_results[0].passed = False
assert example_evaluation.result() is False
def test_middle_result_false(self, example_evaluation):
example_evaluation.evaluation_results[1].passed = False
assert example_evaluation.result() is False
def test_last_result_false(self, example_evaluation):
example_evaluation.evaluation_results[-1].passed = False
assert example_evaluation.result() is False
def test_result_empty(self):
evaluation = Evaluation("expected_result", "=")
assert evaluation.result() is True
|
|
cff1286cfca16b75fc3b646c32277976b1c53169
|
data_analysis/get_model_statistics.py
|
data_analysis/get_model_statistics.py
|
# -*- coding: utf-8 -*-
"""Script which can be used to compare the features obtained of two different influenza models
Usage:
get_model_statistics.py <model> [--country=<country_name>] [--no-future] [--basedir=<directory>] [--start-year=<start_year>] [--end-year=<end_year>] [--save] [--no-graph]
<baseline> Data file of the first model
<other_method> Data file of the second model
-h, --help Print this help message
"""
import pandas as pd
from scipy import stats
from docopt import docopt
import os
import glob
def get_results_filename(basepath):
files = [f for f in glob.glob(basepath + "/*-prediction.csv", recursive=True)]
y = os.path.basename(files[0]).split("-")[0]
y2 = os.path.basename(files[0]).split("-")[1]
return "{}-{}".format(y, y2)
if __name__ == "__main__":
args = docopt(__doc__)
model = args["<model>"]
base_dir = args["--basedir"] if args["--basedir"] else "../complete_results"
country = args["--country"] if args["--country"] else "italy"
future = "no-future" if args["--no-future"] else "future"
# Read the baseline results and merge them
model_path = os.path.join(base_dir, args["<model>"], future, country)
season_years = get_results_filename(model_path)
model_file = os.path.join(model_path, "{}-prediction.csv".format(season_years))
# Load the data
data = pd.read_csv(model_file)
# Describe the data
print("")
print("[*] Describe the given dataset {}".format(model_file))
print(data.describe())
# Generate residuals
print("")
print("[*] Describe the residuals")
residuals = data["incidence"]-data["prediction"]
print(residuals.describe())
# Get some statistics
print("")
print("Pearson Correlation (value/p): ", stats.pearsonr(data["prediction"], data["incidence"]))
print("")
|
Add file to get some basic statistics from the prediction.
|
Add file to get some basic statistics from the prediction.
|
Python
|
mit
|
geektoni/Influenza-Like-Illness-Predictor,geektoni/Influenza-Like-Illness-Predictor
|
Add file to get some basic statistics from the prediction.
|
# -*- coding: utf-8 -*-
"""Script which can be used to compare the features obtained of two different influenza models
Usage:
get_model_statistics.py <model> [--country=<country_name>] [--no-future] [--basedir=<directory>] [--start-year=<start_year>] [--end-year=<end_year>] [--save] [--no-graph]
<baseline> Data file of the first model
<other_method> Data file of the second model
-h, --help Print this help message
"""
import pandas as pd
from scipy import stats
from docopt import docopt
import os
import glob
def get_results_filename(basepath):
files = [f for f in glob.glob(basepath + "/*-prediction.csv", recursive=True)]
y = os.path.basename(files[0]).split("-")[0]
y2 = os.path.basename(files[0]).split("-")[1]
return "{}-{}".format(y, y2)
if __name__ == "__main__":
args = docopt(__doc__)
model = args["<model>"]
base_dir = args["--basedir"] if args["--basedir"] else "../complete_results"
country = args["--country"] if args["--country"] else "italy"
future = "no-future" if args["--no-future"] else "future"
# Read the baseline results and merge them
model_path = os.path.join(base_dir, args["<model>"], future, country)
season_years = get_results_filename(model_path)
model_file = os.path.join(model_path, "{}-prediction.csv".format(season_years))
# Load the data
data = pd.read_csv(model_file)
# Describe the data
print("")
print("[*] Describe the given dataset {}".format(model_file))
print(data.describe())
# Generate residuals
print("")
print("[*] Describe the residuals")
residuals = data["incidence"]-data["prediction"]
print(residuals.describe())
# Get some statistics
print("")
print("Pearson Correlation (value/p): ", stats.pearsonr(data["prediction"], data["incidence"]))
print("")
|
<commit_before><commit_msg>Add file to get some basic statistics from the prediction.<commit_after>
|
# -*- coding: utf-8 -*-
"""Script which can be used to compare the features obtained of two different influenza models
Usage:
get_model_statistics.py <model> [--country=<country_name>] [--no-future] [--basedir=<directory>] [--start-year=<start_year>] [--end-year=<end_year>] [--save] [--no-graph]
<baseline> Data file of the first model
<other_method> Data file of the second model
-h, --help Print this help message
"""
import pandas as pd
from scipy import stats
from docopt import docopt
import os
import glob
def get_results_filename(basepath):
files = [f for f in glob.glob(basepath + "/*-prediction.csv", recursive=True)]
y = os.path.basename(files[0]).split("-")[0]
y2 = os.path.basename(files[0]).split("-")[1]
return "{}-{}".format(y, y2)
if __name__ == "__main__":
args = docopt(__doc__)
model = args["<model>"]
base_dir = args["--basedir"] if args["--basedir"] else "../complete_results"
country = args["--country"] if args["--country"] else "italy"
future = "no-future" if args["--no-future"] else "future"
# Read the baseline results and merge them
model_path = os.path.join(base_dir, args["<model>"], future, country)
season_years = get_results_filename(model_path)
model_file = os.path.join(model_path, "{}-prediction.csv".format(season_years))
# Load the data
data = pd.read_csv(model_file)
# Describe the data
print("")
print("[*] Describe the given dataset {}".format(model_file))
print(data.describe())
# Generate residuals
print("")
print("[*] Describe the residuals")
residuals = data["incidence"]-data["prediction"]
print(residuals.describe())
# Get some statistics
print("")
print("Pearson Correlation (value/p): ", stats.pearsonr(data["prediction"], data["incidence"]))
print("")
|
Add file to get some basic statistics from the prediction.# -*- coding: utf-8 -*-
"""Script which can be used to compare the features obtained of two different influenza models
Usage:
get_model_statistics.py <model> [--country=<country_name>] [--no-future] [--basedir=<directory>] [--start-year=<start_year>] [--end-year=<end_year>] [--save] [--no-graph]
<baseline> Data file of the first model
<other_method> Data file of the second model
-h, --help Print this help message
"""
import pandas as pd
from scipy import stats
from docopt import docopt
import os
import glob
def get_results_filename(basepath):
files = [f for f in glob.glob(basepath + "/*-prediction.csv", recursive=True)]
y = os.path.basename(files[0]).split("-")[0]
y2 = os.path.basename(files[0]).split("-")[1]
return "{}-{}".format(y, y2)
if __name__ == "__main__":
args = docopt(__doc__)
model = args["<model>"]
base_dir = args["--basedir"] if args["--basedir"] else "../complete_results"
country = args["--country"] if args["--country"] else "italy"
future = "no-future" if args["--no-future"] else "future"
# Read the baseline results and merge them
model_path = os.path.join(base_dir, args["<model>"], future, country)
season_years = get_results_filename(model_path)
model_file = os.path.join(model_path, "{}-prediction.csv".format(season_years))
# Load the data
data = pd.read_csv(model_file)
# Describe the data
print("")
print("[*] Describe the given dataset {}".format(model_file))
print(data.describe())
# Generate residuals
print("")
print("[*] Describe the residuals")
residuals = data["incidence"]-data["prediction"]
print(residuals.describe())
# Get some statistics
print("")
print("Pearson Correlation (value/p): ", stats.pearsonr(data["prediction"], data["incidence"]))
print("")
|
<commit_before><commit_msg>Add file to get some basic statistics from the prediction.<commit_after># -*- coding: utf-8 -*-
"""Script which can be used to compare the features obtained of two different influenza models
Usage:
get_model_statistics.py <model> [--country=<country_name>] [--no-future] [--basedir=<directory>] [--start-year=<start_year>] [--end-year=<end_year>] [--save] [--no-graph]
<baseline> Data file of the first model
<other_method> Data file of the second model
-h, --help Print this help message
"""
import pandas as pd
from scipy import stats
from docopt import docopt
import os
import glob
def get_results_filename(basepath):
files = [f for f in glob.glob(basepath + "/*-prediction.csv", recursive=True)]
y = os.path.basename(files[0]).split("-")[0]
y2 = os.path.basename(files[0]).split("-")[1]
return "{}-{}".format(y, y2)
if __name__ == "__main__":
args = docopt(__doc__)
model = args["<model>"]
base_dir = args["--basedir"] if args["--basedir"] else "../complete_results"
country = args["--country"] if args["--country"] else "italy"
future = "no-future" if args["--no-future"] else "future"
# Read the baseline results and merge them
model_path = os.path.join(base_dir, args["<model>"], future, country)
season_years = get_results_filename(model_path)
model_file = os.path.join(model_path, "{}-prediction.csv".format(season_years))
# Load the data
data = pd.read_csv(model_file)
# Describe the data
print("")
print("[*] Describe the given dataset {}".format(model_file))
print(data.describe())
# Generate residuals
print("")
print("[*] Describe the residuals")
residuals = data["incidence"]-data["prediction"]
print(residuals.describe())
# Get some statistics
print("")
print("Pearson Correlation (value/p): ", stats.pearsonr(data["prediction"], data["incidence"]))
print("")
|
|
3dc718866905dbcf920c2c2cb5227c52e6069782
|
examples/timestamp-op-ret.py
|
examples/timestamp-op-ret.py
|
#!/usr/bin/python3
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Example of timestamping a file via OP_RETURN"""
import hashlib
import bitcoin.rpc
import sys
from bitcoin.core import *
from bitcoin.core.script import *
proxy = bitcoin.rpc.Proxy()
assert len(sys.argv) > 1
digests = []
for f in sys.argv[1:]:
try:
with open(f, 'rb') as fd:
digests.append(Hash(fd.read()))
except FileNotFoundError as exp:
if len(f)/2 in (20, 32):
digests.append(x(f))
else:
raise exp
except IOError as exp:
print(exp,file=sys.stderr)
continue
for digest in digests:
txouts = []
unspent = sorted(proxy.listunspent(0), key=lambda x: hash(x['amount']))
txins = [CTxIn(unspent[-1]['outpoint'])]
value_in = unspent[-1]['amount']
change_addr = proxy.getnewaddress()
change_pubkey = proxy.validateaddress(change_addr)['pubkey']
change_out = CTxOut(MAX_MONEY, CScript([change_pubkey, OP_CHECKSIG]))
digest_outs = [CTxOut(0, CScript([script.OP_RETURN, digest]))]
txouts = [change_out] + digest_outs
tx = CTransaction(txins, txouts)
FEE_PER_BYTE = 0.00025*COIN/1000
while True:
tx.vout[0].nValue = int(value_in - max(len(tx.serialize())*FEE_PER_BYTE, 0.00011*COIN))
r = proxy.signrawtransaction(tx)
assert r['complete']
tx = r['tx']
if value_in - tx.vout[0].nValue >= len(tx.serialize())*FEE_PER_BYTE:
print(b2x(tx.serialize()))
print(len(tx.serialize()),'bytes',file=sys.stderr)
print(b2lx(proxy.sendrawtransaction(tx)))
break
|
Add timestamp via op-return example
|
Add timestamp via op-return example
Quick-n-dirty
|
Python
|
mit
|
petertodd/dust-b-gone,petertodd/stealth-addresses-ref-implementation
|
Add timestamp via op-return example
Quick-n-dirty
|
#!/usr/bin/python3
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Example of timestamping a file via OP_RETURN"""
import hashlib
import bitcoin.rpc
import sys
from bitcoin.core import *
from bitcoin.core.script import *
proxy = bitcoin.rpc.Proxy()
assert len(sys.argv) > 1
digests = []
for f in sys.argv[1:]:
try:
with open(f, 'rb') as fd:
digests.append(Hash(fd.read()))
except FileNotFoundError as exp:
if len(f)/2 in (20, 32):
digests.append(x(f))
else:
raise exp
except IOError as exp:
print(exp,file=sys.stderr)
continue
for digest in digests:
txouts = []
unspent = sorted(proxy.listunspent(0), key=lambda x: hash(x['amount']))
txins = [CTxIn(unspent[-1]['outpoint'])]
value_in = unspent[-1]['amount']
change_addr = proxy.getnewaddress()
change_pubkey = proxy.validateaddress(change_addr)['pubkey']
change_out = CTxOut(MAX_MONEY, CScript([change_pubkey, OP_CHECKSIG]))
digest_outs = [CTxOut(0, CScript([script.OP_RETURN, digest]))]
txouts = [change_out] + digest_outs
tx = CTransaction(txins, txouts)
FEE_PER_BYTE = 0.00025*COIN/1000
while True:
tx.vout[0].nValue = int(value_in - max(len(tx.serialize())*FEE_PER_BYTE, 0.00011*COIN))
r = proxy.signrawtransaction(tx)
assert r['complete']
tx = r['tx']
if value_in - tx.vout[0].nValue >= len(tx.serialize())*FEE_PER_BYTE:
print(b2x(tx.serialize()))
print(len(tx.serialize()),'bytes',file=sys.stderr)
print(b2lx(proxy.sendrawtransaction(tx)))
break
|
<commit_before><commit_msg>Add timestamp via op-return example
Quick-n-dirty<commit_after>
|
#!/usr/bin/python3
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Example of timestamping a file via OP_RETURN"""
import hashlib
import bitcoin.rpc
import sys
from bitcoin.core import *
from bitcoin.core.script import *
proxy = bitcoin.rpc.Proxy()
assert len(sys.argv) > 1
digests = []
for f in sys.argv[1:]:
try:
with open(f, 'rb') as fd:
digests.append(Hash(fd.read()))
except FileNotFoundError as exp:
if len(f)/2 in (20, 32):
digests.append(x(f))
else:
raise exp
except IOError as exp:
print(exp,file=sys.stderr)
continue
for digest in digests:
txouts = []
unspent = sorted(proxy.listunspent(0), key=lambda x: hash(x['amount']))
txins = [CTxIn(unspent[-1]['outpoint'])]
value_in = unspent[-1]['amount']
change_addr = proxy.getnewaddress()
change_pubkey = proxy.validateaddress(change_addr)['pubkey']
change_out = CTxOut(MAX_MONEY, CScript([change_pubkey, OP_CHECKSIG]))
digest_outs = [CTxOut(0, CScript([script.OP_RETURN, digest]))]
txouts = [change_out] + digest_outs
tx = CTransaction(txins, txouts)
FEE_PER_BYTE = 0.00025*COIN/1000
while True:
tx.vout[0].nValue = int(value_in - max(len(tx.serialize())*FEE_PER_BYTE, 0.00011*COIN))
r = proxy.signrawtransaction(tx)
assert r['complete']
tx = r['tx']
if value_in - tx.vout[0].nValue >= len(tx.serialize())*FEE_PER_BYTE:
print(b2x(tx.serialize()))
print(len(tx.serialize()),'bytes',file=sys.stderr)
print(b2lx(proxy.sendrawtransaction(tx)))
break
|
Add timestamp via op-return example
Quick-n-dirty#!/usr/bin/python3
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Example of timestamping a file via OP_RETURN"""
import hashlib
import bitcoin.rpc
import sys
from bitcoin.core import *
from bitcoin.core.script import *
proxy = bitcoin.rpc.Proxy()
assert len(sys.argv) > 1
digests = []
for f in sys.argv[1:]:
try:
with open(f, 'rb') as fd:
digests.append(Hash(fd.read()))
except FileNotFoundError as exp:
if len(f)/2 in (20, 32):
digests.append(x(f))
else:
raise exp
except IOError as exp:
print(exp,file=sys.stderr)
continue
for digest in digests:
txouts = []
unspent = sorted(proxy.listunspent(0), key=lambda x: hash(x['amount']))
txins = [CTxIn(unspent[-1]['outpoint'])]
value_in = unspent[-1]['amount']
change_addr = proxy.getnewaddress()
change_pubkey = proxy.validateaddress(change_addr)['pubkey']
change_out = CTxOut(MAX_MONEY, CScript([change_pubkey, OP_CHECKSIG]))
digest_outs = [CTxOut(0, CScript([script.OP_RETURN, digest]))]
txouts = [change_out] + digest_outs
tx = CTransaction(txins, txouts)
FEE_PER_BYTE = 0.00025*COIN/1000
while True:
tx.vout[0].nValue = int(value_in - max(len(tx.serialize())*FEE_PER_BYTE, 0.00011*COIN))
r = proxy.signrawtransaction(tx)
assert r['complete']
tx = r['tx']
if value_in - tx.vout[0].nValue >= len(tx.serialize())*FEE_PER_BYTE:
print(b2x(tx.serialize()))
print(len(tx.serialize()),'bytes',file=sys.stderr)
print(b2lx(proxy.sendrawtransaction(tx)))
break
|
<commit_before><commit_msg>Add timestamp via op-return example
Quick-n-dirty<commit_after>#!/usr/bin/python3
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Example of timestamping a file via OP_RETURN"""
import hashlib
import bitcoin.rpc
import sys
from bitcoin.core import *
from bitcoin.core.script import *
proxy = bitcoin.rpc.Proxy()
assert len(sys.argv) > 1
digests = []
for f in sys.argv[1:]:
try:
with open(f, 'rb') as fd:
digests.append(Hash(fd.read()))
except FileNotFoundError as exp:
if len(f)/2 in (20, 32):
digests.append(x(f))
else:
raise exp
except IOError as exp:
print(exp,file=sys.stderr)
continue
for digest in digests:
txouts = []
unspent = sorted(proxy.listunspent(0), key=lambda x: hash(x['amount']))
txins = [CTxIn(unspent[-1]['outpoint'])]
value_in = unspent[-1]['amount']
change_addr = proxy.getnewaddress()
change_pubkey = proxy.validateaddress(change_addr)['pubkey']
change_out = CTxOut(MAX_MONEY, CScript([change_pubkey, OP_CHECKSIG]))
digest_outs = [CTxOut(0, CScript([script.OP_RETURN, digest]))]
txouts = [change_out] + digest_outs
tx = CTransaction(txins, txouts)
FEE_PER_BYTE = 0.00025*COIN/1000
while True:
tx.vout[0].nValue = int(value_in - max(len(tx.serialize())*FEE_PER_BYTE, 0.00011*COIN))
r = proxy.signrawtransaction(tx)
assert r['complete']
tx = r['tx']
if value_in - tx.vout[0].nValue >= len(tx.serialize())*FEE_PER_BYTE:
print(b2x(tx.serialize()))
print(len(tx.serialize()),'bytes',file=sys.stderr)
print(b2lx(proxy.sendrawtransaction(tx)))
break
|
|
69783b41c719940081e08071c34a037ba0ea5b90
|
random_selection/random_selection.py
|
random_selection/random_selection.py
|
from random import randint
import sys
def select(arr, order):
length = len(arr)
if length == 1:
return arr[0]
pivot_index = randint(0, length-1)
pivot = arr[pivot_index]
swap(arr, 0, pivot_index)
i = j = 1
while j < length:
if arr[j] < pivot:
swap(arr, j, i)
i += 1
j += 1
swap(arr, 0, i-1)
pivot_index = i - 1
if pivot_index == order:
return pivot
if pivot_index > order:
return select(arr[:pivot_index], order)
if pivot_index < order:
return select(arr[pivot_index+1:], order - pivot_index -1)
def swap(arr, x, y):
temp = arr[x]
arr[x] = arr[y]
arr[y] = temp
def main(arr_len, order):
unsorted = [randint(0, arr_len*10) for n in range(arr_len)]
length = len(unsorted)
print (unsorted, select(unsorted, order))
if __name__ == '__main__':
try:
arr_len = int(sys.argv[1])
order_stat = int(sys.argv[2]) - 1
except (IndexError, ValueError):
print 'Format: python quicksort.py <array-length> <order-stat>'
main(arr_len, order_stat)
|
Add linear randomized select algorithm
|
Add linear randomized select algorithm
Using the partitioning priciple from quicksort, linear select
finds the i-th smallest element in an array. It operates by
partitioning around a pivot, checking whether the pivot index
matches the desired order statistic, and either returning or
recursing on the appropriate part of the array.
|
Python
|
mit
|
timpel/stanford-algs,timpel/stanford-algs
|
Add linear randomized select algorithm
Using the partitioning priciple from quicksort, linear select
finds the i-th smallest element in an array. It operates by
partitioning around a pivot, checking whether the pivot index
matches the desired order statistic, and either returning or
recursing on the appropriate part of the array.
|
from random import randint
import sys
def select(arr, order):
length = len(arr)
if length == 1:
return arr[0]
pivot_index = randint(0, length-1)
pivot = arr[pivot_index]
swap(arr, 0, pivot_index)
i = j = 1
while j < length:
if arr[j] < pivot:
swap(arr, j, i)
i += 1
j += 1
swap(arr, 0, i-1)
pivot_index = i - 1
if pivot_index == order:
return pivot
if pivot_index > order:
return select(arr[:pivot_index], order)
if pivot_index < order:
return select(arr[pivot_index+1:], order - pivot_index -1)
def swap(arr, x, y):
temp = arr[x]
arr[x] = arr[y]
arr[y] = temp
def main(arr_len, order):
unsorted = [randint(0, arr_len*10) for n in range(arr_len)]
length = len(unsorted)
print (unsorted, select(unsorted, order))
if __name__ == '__main__':
try:
arr_len = int(sys.argv[1])
order_stat = int(sys.argv[2]) - 1
except (IndexError, ValueError):
print 'Format: python quicksort.py <array-length> <order-stat>'
main(arr_len, order_stat)
|
<commit_before><commit_msg>Add linear randomized select algorithm
Using the partitioning priciple from quicksort, linear select
finds the i-th smallest element in an array. It operates by
partitioning around a pivot, checking whether the pivot index
matches the desired order statistic, and either returning or
recursing on the appropriate part of the array.<commit_after>
|
from random import randint
import sys
def select(arr, order):
length = len(arr)
if length == 1:
return arr[0]
pivot_index = randint(0, length-1)
pivot = arr[pivot_index]
swap(arr, 0, pivot_index)
i = j = 1
while j < length:
if arr[j] < pivot:
swap(arr, j, i)
i += 1
j += 1
swap(arr, 0, i-1)
pivot_index = i - 1
if pivot_index == order:
return pivot
if pivot_index > order:
return select(arr[:pivot_index], order)
if pivot_index < order:
return select(arr[pivot_index+1:], order - pivot_index -1)
def swap(arr, x, y):
temp = arr[x]
arr[x] = arr[y]
arr[y] = temp
def main(arr_len, order):
unsorted = [randint(0, arr_len*10) for n in range(arr_len)]
length = len(unsorted)
print (unsorted, select(unsorted, order))
if __name__ == '__main__':
try:
arr_len = int(sys.argv[1])
order_stat = int(sys.argv[2]) - 1
except (IndexError, ValueError):
print 'Format: python quicksort.py <array-length> <order-stat>'
main(arr_len, order_stat)
|
Add linear randomized select algorithm
Using the partitioning priciple from quicksort, linear select
finds the i-th smallest element in an array. It operates by
partitioning around a pivot, checking whether the pivot index
matches the desired order statistic, and either returning or
recursing on the appropriate part of the array.from random import randint
import sys
def select(arr, order):
length = len(arr)
if length == 1:
return arr[0]
pivot_index = randint(0, length-1)
pivot = arr[pivot_index]
swap(arr, 0, pivot_index)
i = j = 1
while j < length:
if arr[j] < pivot:
swap(arr, j, i)
i += 1
j += 1
swap(arr, 0, i-1)
pivot_index = i - 1
if pivot_index == order:
return pivot
if pivot_index > order:
return select(arr[:pivot_index], order)
if pivot_index < order:
return select(arr[pivot_index+1:], order - pivot_index -1)
def swap(arr, x, y):
temp = arr[x]
arr[x] = arr[y]
arr[y] = temp
def main(arr_len, order):
unsorted = [randint(0, arr_len*10) for n in range(arr_len)]
length = len(unsorted)
print (unsorted, select(unsorted, order))
if __name__ == '__main__':
try:
arr_len = int(sys.argv[1])
order_stat = int(sys.argv[2]) - 1
except (IndexError, ValueError):
print 'Format: python quicksort.py <array-length> <order-stat>'
main(arr_len, order_stat)
|
<commit_before><commit_msg>Add linear randomized select algorithm
Using the partitioning priciple from quicksort, linear select
finds the i-th smallest element in an array. It operates by
partitioning around a pivot, checking whether the pivot index
matches the desired order statistic, and either returning or
recursing on the appropriate part of the array.<commit_after>from random import randint
import sys
def select(arr, order):
length = len(arr)
if length == 1:
return arr[0]
pivot_index = randint(0, length-1)
pivot = arr[pivot_index]
swap(arr, 0, pivot_index)
i = j = 1
while j < length:
if arr[j] < pivot:
swap(arr, j, i)
i += 1
j += 1
swap(arr, 0, i-1)
pivot_index = i - 1
if pivot_index == order:
return pivot
if pivot_index > order:
return select(arr[:pivot_index], order)
if pivot_index < order:
return select(arr[pivot_index+1:], order - pivot_index -1)
def swap(arr, x, y):
temp = arr[x]
arr[x] = arr[y]
arr[y] = temp
def main(arr_len, order):
unsorted = [randint(0, arr_len*10) for n in range(arr_len)]
length = len(unsorted)
print (unsorted, select(unsorted, order))
if __name__ == '__main__':
try:
arr_len = int(sys.argv[1])
order_stat = int(sys.argv[2]) - 1
except (IndexError, ValueError):
print 'Format: python quicksort.py <array-length> <order-stat>'
main(arr_len, order_stat)
|
|
d476fbb92bf30becdbbd6b5bc8458fae09ff88a5
|
comrade/core/managers.py
|
comrade/core/managers.py
|
from django.db import models
class QuerySetManager(models.Manager):
def __getattr__(self, name, *args):
if name.startswith('_'):
raise AttributeError
return getattr(self.get_query_set(), name, *args)
def get_query_set(self):
return self.model.QuerySet(self.model)
|
Add QuerySet manager, for implementing chainable filters.
|
Add QuerySet manager, for implementing chainable filters.
|
Python
|
mit
|
bueda/django-comrade
|
Add QuerySet manager, for implementing chainable filters.
|
from django.db import models
class QuerySetManager(models.Manager):
def __getattr__(self, name, *args):
if name.startswith('_'):
raise AttributeError
return getattr(self.get_query_set(), name, *args)
def get_query_set(self):
return self.model.QuerySet(self.model)
|
<commit_before><commit_msg>Add QuerySet manager, for implementing chainable filters.<commit_after>
|
from django.db import models
class QuerySetManager(models.Manager):
def __getattr__(self, name, *args):
if name.startswith('_'):
raise AttributeError
return getattr(self.get_query_set(), name, *args)
def get_query_set(self):
return self.model.QuerySet(self.model)
|
Add QuerySet manager, for implementing chainable filters.from django.db import models
class QuerySetManager(models.Manager):
def __getattr__(self, name, *args):
if name.startswith('_'):
raise AttributeError
return getattr(self.get_query_set(), name, *args)
def get_query_set(self):
return self.model.QuerySet(self.model)
|
<commit_before><commit_msg>Add QuerySet manager, for implementing chainable filters.<commit_after>from django.db import models
class QuerySetManager(models.Manager):
def __getattr__(self, name, *args):
if name.startswith('_'):
raise AttributeError
return getattr(self.get_query_set(), name, *args)
def get_query_set(self):
return self.model.QuerySet(self.model)
|
|
5afae8a39345ef3d334817234177d656cf12cff3
|
scripts/bioinfo_project_status_update.py
|
scripts/bioinfo_project_status_update.py
|
#!/usr/bin/env python
import argparse
import os
import yaml
from genologics.lims import Lims
from genologics.entities import Project
from genologics.config import BASEURI, USERNAME, PASSWORD
import LIMS2DB.utils as lutils
from requests.exceptions import HTTPError
def main(args):
log = lutils.setupLog('bioinfologger', args.logfile)
lims = Lims(BASEURI, USERNAME, PASSWORD)
with open(args.conf) as conf_file:
conf = yaml.safe_load(conf_file)
bioinfodb = lutils.setupServer(conf)['bioinfo_analysis']
open_projects = bioinfodb.view('latest_data/sample_id_open')
for row in open_projects.rows:
project_id = row.key[0]
sample_id = row.key[3]
close_date = None
try:
close_date = Project(lims=lims, id=project_id).close_date
except HTTPError as e:
if '404: Project not found' in e.message:
log.error('Project '+project_id+' not found in LIMS')
continue
if close_date is not None:
try:
doc = bioinfodb.get(row.id)
except Exception as e:
log.error(e + 'in Project '+project_id+ ' Sample '+sample_id+ ' while accessing doc from statusdb')
doc['project_closed'] = True
try:
bioinfodb.save(doc)
log.info('Updated Project '+project_id+ ' Sample '+sample_id)
except Exception as e:
log.error(e + 'in Project '+project_id+ ' Sample '+sample_id+ ' while saving to statusdb')
if __name__ == '__main__':
usage = "Usage: python bioinfo_project_status_update.py [options]"
parser = argparse.ArgumentParser(description=usage)
parser.add_argument("-c", "--conf", dest="conf",
default=os.path.join(os.environ['HOME'],'opt/config/post_process.yaml'),
help = "Config file. Default: ~/opt/config/post_process.yaml")
parser.add_argument("-l", "--log", dest="logfile",
default=os.path.join(os.environ['HOME'],'statusdb_bioinfo_closed.log'),
help = "log file. Default: ~/statusdb_bioinfo_closed.log")
args = parser.parse_args()
main(args)
|
Add script to update project closure in bioinfo_analysis db
|
Add script to update project closure in bioinfo_analysis db
|
Python
|
mit
|
SciLifeLab/LIMS2DB
|
Add script to update project closure in bioinfo_analysis db
|
#!/usr/bin/env python
import argparse
import os
import yaml
from genologics.lims import Lims
from genologics.entities import Project
from genologics.config import BASEURI, USERNAME, PASSWORD
import LIMS2DB.utils as lutils
from requests.exceptions import HTTPError
def main(args):
log = lutils.setupLog('bioinfologger', args.logfile)
lims = Lims(BASEURI, USERNAME, PASSWORD)
with open(args.conf) as conf_file:
conf = yaml.safe_load(conf_file)
bioinfodb = lutils.setupServer(conf)['bioinfo_analysis']
open_projects = bioinfodb.view('latest_data/sample_id_open')
for row in open_projects.rows:
project_id = row.key[0]
sample_id = row.key[3]
close_date = None
try:
close_date = Project(lims=lims, id=project_id).close_date
except HTTPError as e:
if '404: Project not found' in e.message:
log.error('Project '+project_id+' not found in LIMS')
continue
if close_date is not None:
try:
doc = bioinfodb.get(row.id)
except Exception as e:
log.error(e + 'in Project '+project_id+ ' Sample '+sample_id+ ' while accessing doc from statusdb')
doc['project_closed'] = True
try:
bioinfodb.save(doc)
log.info('Updated Project '+project_id+ ' Sample '+sample_id)
except Exception as e:
log.error(e + 'in Project '+project_id+ ' Sample '+sample_id+ ' while saving to statusdb')
if __name__ == '__main__':
usage = "Usage: python bioinfo_project_status_update.py [options]"
parser = argparse.ArgumentParser(description=usage)
parser.add_argument("-c", "--conf", dest="conf",
default=os.path.join(os.environ['HOME'],'opt/config/post_process.yaml'),
help = "Config file. Default: ~/opt/config/post_process.yaml")
parser.add_argument("-l", "--log", dest="logfile",
default=os.path.join(os.environ['HOME'],'statusdb_bioinfo_closed.log'),
help = "log file. Default: ~/statusdb_bioinfo_closed.log")
args = parser.parse_args()
main(args)
|
<commit_before><commit_msg>Add script to update project closure in bioinfo_analysis db<commit_after>
|
#!/usr/bin/env python
import argparse
import os
import yaml
from genologics.lims import Lims
from genologics.entities import Project
from genologics.config import BASEURI, USERNAME, PASSWORD
import LIMS2DB.utils as lutils
from requests.exceptions import HTTPError
def main(args):
log = lutils.setupLog('bioinfologger', args.logfile)
lims = Lims(BASEURI, USERNAME, PASSWORD)
with open(args.conf) as conf_file:
conf = yaml.safe_load(conf_file)
bioinfodb = lutils.setupServer(conf)['bioinfo_analysis']
open_projects = bioinfodb.view('latest_data/sample_id_open')
for row in open_projects.rows:
project_id = row.key[0]
sample_id = row.key[3]
close_date = None
try:
close_date = Project(lims=lims, id=project_id).close_date
except HTTPError as e:
if '404: Project not found' in e.message:
log.error('Project '+project_id+' not found in LIMS')
continue
if close_date is not None:
try:
doc = bioinfodb.get(row.id)
except Exception as e:
log.error(e + 'in Project '+project_id+ ' Sample '+sample_id+ ' while accessing doc from statusdb')
doc['project_closed'] = True
try:
bioinfodb.save(doc)
log.info('Updated Project '+project_id+ ' Sample '+sample_id)
except Exception as e:
log.error(e + 'in Project '+project_id+ ' Sample '+sample_id+ ' while saving to statusdb')
if __name__ == '__main__':
usage = "Usage: python bioinfo_project_status_update.py [options]"
parser = argparse.ArgumentParser(description=usage)
parser.add_argument("-c", "--conf", dest="conf",
default=os.path.join(os.environ['HOME'],'opt/config/post_process.yaml'),
help = "Config file. Default: ~/opt/config/post_process.yaml")
parser.add_argument("-l", "--log", dest="logfile",
default=os.path.join(os.environ['HOME'],'statusdb_bioinfo_closed.log'),
help = "log file. Default: ~/statusdb_bioinfo_closed.log")
args = parser.parse_args()
main(args)
|
Add script to update project closure in bioinfo_analysis db#!/usr/bin/env python
import argparse
import os
import yaml
from genologics.lims import Lims
from genologics.entities import Project
from genologics.config import BASEURI, USERNAME, PASSWORD
import LIMS2DB.utils as lutils
from requests.exceptions import HTTPError
def main(args):
log = lutils.setupLog('bioinfologger', args.logfile)
lims = Lims(BASEURI, USERNAME, PASSWORD)
with open(args.conf) as conf_file:
conf = yaml.safe_load(conf_file)
bioinfodb = lutils.setupServer(conf)['bioinfo_analysis']
open_projects = bioinfodb.view('latest_data/sample_id_open')
for row in open_projects.rows:
project_id = row.key[0]
sample_id = row.key[3]
close_date = None
try:
close_date = Project(lims=lims, id=project_id).close_date
except HTTPError as e:
if '404: Project not found' in e.message:
log.error('Project '+project_id+' not found in LIMS')
continue
if close_date is not None:
try:
doc = bioinfodb.get(row.id)
except Exception as e:
log.error(e + 'in Project '+project_id+ ' Sample '+sample_id+ ' while accessing doc from statusdb')
doc['project_closed'] = True
try:
bioinfodb.save(doc)
log.info('Updated Project '+project_id+ ' Sample '+sample_id)
except Exception as e:
log.error(e + 'in Project '+project_id+ ' Sample '+sample_id+ ' while saving to statusdb')
if __name__ == '__main__':
usage = "Usage: python bioinfo_project_status_update.py [options]"
parser = argparse.ArgumentParser(description=usage)
parser.add_argument("-c", "--conf", dest="conf",
default=os.path.join(os.environ['HOME'],'opt/config/post_process.yaml'),
help = "Config file. Default: ~/opt/config/post_process.yaml")
parser.add_argument("-l", "--log", dest="logfile",
default=os.path.join(os.environ['HOME'],'statusdb_bioinfo_closed.log'),
help = "log file. Default: ~/statusdb_bioinfo_closed.log")
args = parser.parse_args()
main(args)
|
<commit_before><commit_msg>Add script to update project closure in bioinfo_analysis db<commit_after>#!/usr/bin/env python
import argparse
import os
import yaml
from genologics.lims import Lims
from genologics.entities import Project
from genologics.config import BASEURI, USERNAME, PASSWORD
import LIMS2DB.utils as lutils
from requests.exceptions import HTTPError
def main(args):
log = lutils.setupLog('bioinfologger', args.logfile)
lims = Lims(BASEURI, USERNAME, PASSWORD)
with open(args.conf) as conf_file:
conf = yaml.safe_load(conf_file)
bioinfodb = lutils.setupServer(conf)['bioinfo_analysis']
open_projects = bioinfodb.view('latest_data/sample_id_open')
for row in open_projects.rows:
project_id = row.key[0]
sample_id = row.key[3]
close_date = None
try:
close_date = Project(lims=lims, id=project_id).close_date
except HTTPError as e:
if '404: Project not found' in e.message:
log.error('Project '+project_id+' not found in LIMS')
continue
if close_date is not None:
try:
doc = bioinfodb.get(row.id)
except Exception as e:
log.error(e + 'in Project '+project_id+ ' Sample '+sample_id+ ' while accessing doc from statusdb')
doc['project_closed'] = True
try:
bioinfodb.save(doc)
log.info('Updated Project '+project_id+ ' Sample '+sample_id)
except Exception as e:
log.error(e + 'in Project '+project_id+ ' Sample '+sample_id+ ' while saving to statusdb')
if __name__ == '__main__':
usage = "Usage: python bioinfo_project_status_update.py [options]"
parser = argparse.ArgumentParser(description=usage)
parser.add_argument("-c", "--conf", dest="conf",
default=os.path.join(os.environ['HOME'],'opt/config/post_process.yaml'),
help = "Config file. Default: ~/opt/config/post_process.yaml")
parser.add_argument("-l", "--log", dest="logfile",
default=os.path.join(os.environ['HOME'],'statusdb_bioinfo_closed.log'),
help = "log file. Default: ~/statusdb_bioinfo_closed.log")
args = parser.parse_args()
main(args)
|
|
1cb0382eea48539618e1c36d7ae4ec22a0274aa5
|
usingnamespace/api/traversal/v1/__init__.py
|
usingnamespace/api/traversal/v1/__init__.py
|
# Package
from .... import models as m
class Root(object):
"""Root
The main root object for v1 API traversal
"""
__name__ = None
__parent__ = None
def __init__(self, request):
"""Create the default root object
:request: The Pyramid request object
"""
self._request = request
def __getitem__(self, key):
"""Check to see if we can traverse this ..."""
next_ctx = None
if key == 'v1':
next_ctx = None
if next_ctx is None:
raise KeyError
else:
next_ctx.__parent__ = self
return next_ctx
def finalise(self, **kw):
"""Gets the list of sites
:returns: None
"""
self.sites = m.DBSession.query(m.Sites)
|
Add the new version 1 API root
|
Add the new version 1 API root
|
Python
|
isc
|
usingnamespace/usingnamespace
|
Add the new version 1 API root
|
# Package
from .... import models as m
class Root(object):
"""Root
The main root object for v1 API traversal
"""
__name__ = None
__parent__ = None
def __init__(self, request):
"""Create the default root object
:request: The Pyramid request object
"""
self._request = request
def __getitem__(self, key):
"""Check to see if we can traverse this ..."""
next_ctx = None
if key == 'v1':
next_ctx = None
if next_ctx is None:
raise KeyError
else:
next_ctx.__parent__ = self
return next_ctx
def finalise(self, **kw):
"""Gets the list of sites
:returns: None
"""
self.sites = m.DBSession.query(m.Sites)
|
<commit_before><commit_msg>Add the new version 1 API root<commit_after>
|
# Package
from .... import models as m
class Root(object):
"""Root
The main root object for v1 API traversal
"""
__name__ = None
__parent__ = None
def __init__(self, request):
"""Create the default root object
:request: The Pyramid request object
"""
self._request = request
def __getitem__(self, key):
"""Check to see if we can traverse this ..."""
next_ctx = None
if key == 'v1':
next_ctx = None
if next_ctx is None:
raise KeyError
else:
next_ctx.__parent__ = self
return next_ctx
def finalise(self, **kw):
"""Gets the list of sites
:returns: None
"""
self.sites = m.DBSession.query(m.Sites)
|
Add the new version 1 API root# Package
from .... import models as m
class Root(object):
"""Root
The main root object for v1 API traversal
"""
__name__ = None
__parent__ = None
def __init__(self, request):
"""Create the default root object
:request: The Pyramid request object
"""
self._request = request
def __getitem__(self, key):
"""Check to see if we can traverse this ..."""
next_ctx = None
if key == 'v1':
next_ctx = None
if next_ctx is None:
raise KeyError
else:
next_ctx.__parent__ = self
return next_ctx
def finalise(self, **kw):
"""Gets the list of sites
:returns: None
"""
self.sites = m.DBSession.query(m.Sites)
|
<commit_before><commit_msg>Add the new version 1 API root<commit_after># Package
from .... import models as m
class Root(object):
"""Root
The main root object for v1 API traversal
"""
__name__ = None
__parent__ = None
def __init__(self, request):
"""Create the default root object
:request: The Pyramid request object
"""
self._request = request
def __getitem__(self, key):
"""Check to see if we can traverse this ..."""
next_ctx = None
if key == 'v1':
next_ctx = None
if next_ctx is None:
raise KeyError
else:
next_ctx.__parent__ = self
return next_ctx
def finalise(self, **kw):
"""Gets the list of sites
:returns: None
"""
self.sites = m.DBSession.query(m.Sites)
|
|
8a5e29bf62e701b755767cfb7a31aeda434a0eac
|
lib/pyfrc/robotpy/boot.py
|
lib/pyfrc/robotpy/boot.py
|
import sys
class RollbackImporter:
def __init__(self):
"Creates an instance and installs as the global importer"
self.previousModules = sys.modules.copy()
def uninstall(self):
newmodules = sys.modules.copy()
for modname in newmodules.keys():
if modname not in self.previousModules and modname != "boot":
# Force reload when modname next imported
#print("Unloading '%s'" % modname)
# force delete module globals
for v in sys.modules[modname].__dict__.copy():
if v.startswith("__"):
continue
del sys.modules[modname].__dict__[v]
del sys.modules[modname]
def main():
#print(sys.path)
if "/c/py" not in sys.path:
sys.path.insert(0, "/c/py")
if "." not in sys.path:
sys.path.insert(0, ".")
import traceback
import gc
import time
#import runpy
while True:
rollback = RollbackImporter()
robot = None
try:
print("Importing user code.")
robot = __import__("robot")
print("Running user code.")
robot.run()
#runpy.run_module("robot", run_name="__main__")
except SystemExit:
pass
except:
print("Exception in user code, type 'reboot' to restart:")
print("-"*60)
traceback.print_exc(file=sys.stdout)
print("-"*60)
return
print("User code raised SystemExit; waiting 5 seconds before restart")
time.sleep(5)
sys.exc_traceback = None
sys.last_traceback = None
rollback.uninstall()
if robot is not None:
del robot
robot = None
gc.collect()
if __name__ == "__main__":
main()
|
Add robotpy files for uploading
|
Add robotpy files for uploading
|
Python
|
mit
|
robotpy/pyfrc
|
Add robotpy files for uploading
|
import sys
class RollbackImporter:
def __init__(self):
"Creates an instance and installs as the global importer"
self.previousModules = sys.modules.copy()
def uninstall(self):
newmodules = sys.modules.copy()
for modname in newmodules.keys():
if modname not in self.previousModules and modname != "boot":
# Force reload when modname next imported
#print("Unloading '%s'" % modname)
# force delete module globals
for v in sys.modules[modname].__dict__.copy():
if v.startswith("__"):
continue
del sys.modules[modname].__dict__[v]
del sys.modules[modname]
def main():
#print(sys.path)
if "/c/py" not in sys.path:
sys.path.insert(0, "/c/py")
if "." not in sys.path:
sys.path.insert(0, ".")
import traceback
import gc
import time
#import runpy
while True:
rollback = RollbackImporter()
robot = None
try:
print("Importing user code.")
robot = __import__("robot")
print("Running user code.")
robot.run()
#runpy.run_module("robot", run_name="__main__")
except SystemExit:
pass
except:
print("Exception in user code, type 'reboot' to restart:")
print("-"*60)
traceback.print_exc(file=sys.stdout)
print("-"*60)
return
print("User code raised SystemExit; waiting 5 seconds before restart")
time.sleep(5)
sys.exc_traceback = None
sys.last_traceback = None
rollback.uninstall()
if robot is not None:
del robot
robot = None
gc.collect()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add robotpy files for uploading<commit_after>
|
import sys
class RollbackImporter:
def __init__(self):
"Creates an instance and installs as the global importer"
self.previousModules = sys.modules.copy()
def uninstall(self):
newmodules = sys.modules.copy()
for modname in newmodules.keys():
if modname not in self.previousModules and modname != "boot":
# Force reload when modname next imported
#print("Unloading '%s'" % modname)
# force delete module globals
for v in sys.modules[modname].__dict__.copy():
if v.startswith("__"):
continue
del sys.modules[modname].__dict__[v]
del sys.modules[modname]
def main():
#print(sys.path)
if "/c/py" not in sys.path:
sys.path.insert(0, "/c/py")
if "." not in sys.path:
sys.path.insert(0, ".")
import traceback
import gc
import time
#import runpy
while True:
rollback = RollbackImporter()
robot = None
try:
print("Importing user code.")
robot = __import__("robot")
print("Running user code.")
robot.run()
#runpy.run_module("robot", run_name="__main__")
except SystemExit:
pass
except:
print("Exception in user code, type 'reboot' to restart:")
print("-"*60)
traceback.print_exc(file=sys.stdout)
print("-"*60)
return
print("User code raised SystemExit; waiting 5 seconds before restart")
time.sleep(5)
sys.exc_traceback = None
sys.last_traceback = None
rollback.uninstall()
if robot is not None:
del robot
robot = None
gc.collect()
if __name__ == "__main__":
main()
|
Add robotpy files for uploadingimport sys
class RollbackImporter:
def __init__(self):
"Creates an instance and installs as the global importer"
self.previousModules = sys.modules.copy()
def uninstall(self):
newmodules = sys.modules.copy()
for modname in newmodules.keys():
if modname not in self.previousModules and modname != "boot":
# Force reload when modname next imported
#print("Unloading '%s'" % modname)
# force delete module globals
for v in sys.modules[modname].__dict__.copy():
if v.startswith("__"):
continue
del sys.modules[modname].__dict__[v]
del sys.modules[modname]
def main():
#print(sys.path)
if "/c/py" not in sys.path:
sys.path.insert(0, "/c/py")
if "." not in sys.path:
sys.path.insert(0, ".")
import traceback
import gc
import time
#import runpy
while True:
rollback = RollbackImporter()
robot = None
try:
print("Importing user code.")
robot = __import__("robot")
print("Running user code.")
robot.run()
#runpy.run_module("robot", run_name="__main__")
except SystemExit:
pass
except:
print("Exception in user code, type 'reboot' to restart:")
print("-"*60)
traceback.print_exc(file=sys.stdout)
print("-"*60)
return
print("User code raised SystemExit; waiting 5 seconds before restart")
time.sleep(5)
sys.exc_traceback = None
sys.last_traceback = None
rollback.uninstall()
if robot is not None:
del robot
robot = None
gc.collect()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add robotpy files for uploading<commit_after>import sys
class RollbackImporter:
def __init__(self):
"Creates an instance and installs as the global importer"
self.previousModules = sys.modules.copy()
def uninstall(self):
newmodules = sys.modules.copy()
for modname in newmodules.keys():
if modname not in self.previousModules and modname != "boot":
# Force reload when modname next imported
#print("Unloading '%s'" % modname)
# force delete module globals
for v in sys.modules[modname].__dict__.copy():
if v.startswith("__"):
continue
del sys.modules[modname].__dict__[v]
del sys.modules[modname]
def main():
#print(sys.path)
if "/c/py" not in sys.path:
sys.path.insert(0, "/c/py")
if "." not in sys.path:
sys.path.insert(0, ".")
import traceback
import gc
import time
#import runpy
while True:
rollback = RollbackImporter()
robot = None
try:
print("Importing user code.")
robot = __import__("robot")
print("Running user code.")
robot.run()
#runpy.run_module("robot", run_name="__main__")
except SystemExit:
pass
except:
print("Exception in user code, type 'reboot' to restart:")
print("-"*60)
traceback.print_exc(file=sys.stdout)
print("-"*60)
return
print("User code raised SystemExit; waiting 5 seconds before restart")
time.sleep(5)
sys.exc_traceback = None
sys.last_traceback = None
rollback.uninstall()
if robot is not None:
del robot
robot = None
gc.collect()
if __name__ == "__main__":
main()
|
|
857a1abd601ad2f50d30dcaf3bc8fc5bafc59165
|
txircd/modules/extra/stats_uptime.py
|
txircd/modules/extra/stats_uptime.py
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from txircd.utils import now
from zope.interface import implements
class StatsUptime(ModuleData):
implements(IPlugin, IModuleData)
name = "StatsUptime"
def actions(self):
return [ ("statsruntype-uptime", 10, self.displayUptime) ]
def displayUptime(self):
uptime = now() - self.ircd.startupTime
return {
self.ircd.name: "Server up {}".format(uptime if uptime.days > 0 else "0 days, {}".format(uptime))
}
statsUptime = StatsUptime()
|
Add STATS type to display server uptime
|
Add STATS type to display server uptime
|
Python
|
bsd-3-clause
|
Heufneutje/txircd
|
Add STATS type to display server uptime
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from txircd.utils import now
from zope.interface import implements
class StatsUptime(ModuleData):
implements(IPlugin, IModuleData)
name = "StatsUptime"
def actions(self):
return [ ("statsruntype-uptime", 10, self.displayUptime) ]
def displayUptime(self):
uptime = now() - self.ircd.startupTime
return {
self.ircd.name: "Server up {}".format(uptime if uptime.days > 0 else "0 days, {}".format(uptime))
}
statsUptime = StatsUptime()
|
<commit_before><commit_msg>Add STATS type to display server uptime<commit_after>
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from txircd.utils import now
from zope.interface import implements
class StatsUptime(ModuleData):
implements(IPlugin, IModuleData)
name = "StatsUptime"
def actions(self):
return [ ("statsruntype-uptime", 10, self.displayUptime) ]
def displayUptime(self):
uptime = now() - self.ircd.startupTime
return {
self.ircd.name: "Server up {}".format(uptime if uptime.days > 0 else "0 days, {}".format(uptime))
}
statsUptime = StatsUptime()
|
Add STATS type to display server uptimefrom twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from txircd.utils import now
from zope.interface import implements
class StatsUptime(ModuleData):
implements(IPlugin, IModuleData)
name = "StatsUptime"
def actions(self):
return [ ("statsruntype-uptime", 10, self.displayUptime) ]
def displayUptime(self):
uptime = now() - self.ircd.startupTime
return {
self.ircd.name: "Server up {}".format(uptime if uptime.days > 0 else "0 days, {}".format(uptime))
}
statsUptime = StatsUptime()
|
<commit_before><commit_msg>Add STATS type to display server uptime<commit_after>from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from txircd.utils import now
from zope.interface import implements
class StatsUptime(ModuleData):
implements(IPlugin, IModuleData)
name = "StatsUptime"
def actions(self):
return [ ("statsruntype-uptime", 10, self.displayUptime) ]
def displayUptime(self):
uptime = now() - self.ircd.startupTime
return {
self.ircd.name: "Server up {}".format(uptime if uptime.days > 0 else "0 days, {}".format(uptime))
}
statsUptime = StatsUptime()
|
|
038362a9965977f027ea4be6ce5bfd5ac496e04c
|
qtpy/tests/test_qtx11extras.py
|
qtpy/tests/test_qtx11extras.py
|
import sys
import pytest
from qtpy import PYQT6, PYSIDE2, PYSIDE6
from qtpy.tests.utils import using_conda
# @pytest.mark.skipif(
# PYQT6 or PYSIDE6, reason="Not availible on Qt6-based bindings")
# @pytest.mark.skipif(
# sys.platform != "win32" or using_conda(),
# reason="Only available in Qt5 bindings > 5.9 with pip on Windows in CIs")
def test_qtwinextras():
QtX11Extras = pytest.importorskip("qtpy.QtX11Extras")
# TODO: this is just a placeholder file
# assert QtX11Extras.QSomething is not None
|
Add placeholder test file for QtX11Extras
|
Add placeholder test file for QtX11Extras
|
Python
|
mit
|
spyder-ide/qtpy
|
Add placeholder test file for QtX11Extras
|
import sys
import pytest
from qtpy import PYQT6, PYSIDE2, PYSIDE6
from qtpy.tests.utils import using_conda
# @pytest.mark.skipif(
# PYQT6 or PYSIDE6, reason="Not availible on Qt6-based bindings")
# @pytest.mark.skipif(
# sys.platform != "win32" or using_conda(),
# reason="Only available in Qt5 bindings > 5.9 with pip on Windows in CIs")
def test_qtwinextras():
QtX11Extras = pytest.importorskip("qtpy.QtX11Extras")
# TODO: this is just a placeholder file
# assert QtX11Extras.QSomething is not None
|
<commit_before><commit_msg>Add placeholder test file for QtX11Extras<commit_after>
|
import sys
import pytest
from qtpy import PYQT6, PYSIDE2, PYSIDE6
from qtpy.tests.utils import using_conda
# @pytest.mark.skipif(
# PYQT6 or PYSIDE6, reason="Not availible on Qt6-based bindings")
# @pytest.mark.skipif(
# sys.platform != "win32" or using_conda(),
# reason="Only available in Qt5 bindings > 5.9 with pip on Windows in CIs")
def test_qtwinextras():
QtX11Extras = pytest.importorskip("qtpy.QtX11Extras")
# TODO: this is just a placeholder file
# assert QtX11Extras.QSomething is not None
|
Add placeholder test file for QtX11Extrasimport sys
import pytest
from qtpy import PYQT6, PYSIDE2, PYSIDE6
from qtpy.tests.utils import using_conda
# @pytest.mark.skipif(
# PYQT6 or PYSIDE6, reason="Not availible on Qt6-based bindings")
# @pytest.mark.skipif(
# sys.platform != "win32" or using_conda(),
# reason="Only available in Qt5 bindings > 5.9 with pip on Windows in CIs")
def test_qtwinextras():
QtX11Extras = pytest.importorskip("qtpy.QtX11Extras")
# TODO: this is just a placeholder file
# assert QtX11Extras.QSomething is not None
|
<commit_before><commit_msg>Add placeholder test file for QtX11Extras<commit_after>import sys
import pytest
from qtpy import PYQT6, PYSIDE2, PYSIDE6
from qtpy.tests.utils import using_conda
# @pytest.mark.skipif(
# PYQT6 or PYSIDE6, reason="Not availible on Qt6-based bindings")
# @pytest.mark.skipif(
# sys.platform != "win32" or using_conda(),
# reason="Only available in Qt5 bindings > 5.9 with pip on Windows in CIs")
def test_qtwinextras():
QtX11Extras = pytest.importorskip("qtpy.QtX11Extras")
# TODO: this is just a placeholder file
# assert QtX11Extras.QSomething is not None
|
|
5c2782ef0d6dfc292875d9a8577b6f104093fb78
|
peer_grading/peer_grading_service.py
|
peer_grading/peer_grading_service.py
|
def get_next_submission(student_id, location):
"""
Request the next submission to be peer graded.
@param student_id: The student requesting to grade a peer. Must check to determine if the requesting student has
submitted an answer of their own.
@param location: The associated location for the submission to be graded.
@return: The submission to grade, if one is available.
"""
pass
def get_last_submission(student_id, location):
"""
Used to give visibility to scoring and workflow for a submission in peer grading. If the student has submitted a
submission and has graded enough peers, this function will return the submission as is, with all available scoring
data. If the student has not finished grading peers, scoring information on their submission is withheld.
@param student_id: The student.
@param location: The associated location.
@return: The student's latest submission, restrained on workflow completion.
"""
pass
def submit(submission):
"""
Submit a submission for peer grading.
@param submission: The submission to add to the peer grading queue. Should contain the student_id,
associated location, and all answer related fields prepopulated. Submission date,
preferred grader, and other attributes can be determined internally.
@return: The saved submission.
"""
pass
|
Create the service stub for peer grading This stub just has some doc and empty functions so we can discuss if the contract is reasonable, then potentially build on this frame.
|
Create the service stub for peer grading
This stub just has some doc and empty functions so we can discuss if the contract is reasonable, then potentially build on this frame.
|
Python
|
agpl-3.0
|
nttks/edx-ora2,eestay/edx-ora2,Edraak/edx-ora2,nttks/edx-ora2,Lektorium-LLC/edx-ora2,ubc/edx-ora2,ubc/edx-ora2,Stanford-Online/edx-ora2,eestay/edx-ora2,EDUlib/edx-ora2,Edraak/edx-ora2,singingwolfboy/edx-ora2,edx/edx-ora2,Stanford-Online/edx-ora2,Lektorium-LLC/edx-ora2,EDUlib/edx-ora2,Stanford-Online/edx-ora2,singingwolfboy/edx-ora2,Edraak/edx-ora2,ubc/edx-ora2,singingwolfboy/edx-ora2,Lektorium-LLC/edx-ora2,EDUlib/edx-ora2,kursitet/edx-ora2,eestay/edx-ora2,edx/edx-ora2,eestay/edx-ora2,EDUlib/edx-ora2,nttks/edx-ora2,kursitet/edx-ora2,Stanford-Online/edx-ora2,kursitet/edx-ora2,singingwolfboy/edx-ora2,miptliot/edx-ora2,vasyarv/edx-ora2,kursitet/edx-ora2,vasyarv/edx-ora2,nttks/edx-ora2,vasyarv/edx-ora2,miptliot/edx-ora2,vasyarv/edx-ora2,edx/edx-ora2,miptliot/edx-ora2,ubc/edx-ora2,Edraak/edx-ora2,edx/edx-ora2,miptliot/edx-ora2,Lektorium-LLC/edx-ora2
|
Create the service stub for peer grading
This stub just has some doc and empty functions so we can discuss if the contract is reasonable, then potentially build on this frame.
|
def get_next_submission(student_id, location):
"""
Request the next submission to be peer graded.
@param student_id: The student requesting to grade a peer. Must check to determine if the requesting student has
submitted an answer of their own.
@param location: The associated location for the submission to be graded.
@return: The submission to grade, if one is available.
"""
pass
def get_last_submission(student_id, location):
"""
Used to give visibility to scoring and workflow for a submission in peer grading. If the student has submitted a
submission and has graded enough peers, this function will return the submission as is, with all available scoring
data. If the student has not finished grading peers, scoring information on their submission is withheld.
@param student_id: The student.
@param location: The associated location.
@return: The student's latest submission, restrained on workflow completion.
"""
pass
def submit(submission):
"""
Submit a submission for peer grading.
@param submission: The submission to add to the peer grading queue. Should contain the student_id,
associated location, and all answer related fields prepopulated. Submission date,
preferred grader, and other attributes can be determined internally.
@return: The saved submission.
"""
pass
|
<commit_before><commit_msg>Create the service stub for peer grading
This stub just has some doc and empty functions so we can discuss if the contract is reasonable, then potentially build on this frame.<commit_after>
|
def get_next_submission(student_id, location):
"""
Request the next submission to be peer graded.
@param student_id: The student requesting to grade a peer. Must check to determine if the requesting student has
submitted an answer of their own.
@param location: The associated location for the submission to be graded.
@return: The submission to grade, if one is available.
"""
pass
def get_last_submission(student_id, location):
"""
Used to give visibility to scoring and workflow for a submission in peer grading. If the student has submitted a
submission and has graded enough peers, this function will return the submission as is, with all available scoring
data. If the student has not finished grading peers, scoring information on their submission is withheld.
@param student_id: The student.
@param location: The associated location.
@return: The student's latest submission, restrained on workflow completion.
"""
pass
def submit(submission):
"""
Submit a submission for peer grading.
@param submission: The submission to add to the peer grading queue. Should contain the student_id,
associated location, and all answer related fields prepopulated. Submission date,
preferred grader, and other attributes can be determined internally.
@return: The saved submission.
"""
pass
|
Create the service stub for peer grading
This stub just has some doc and empty functions so we can discuss if the contract is reasonable, then potentially build on this frame.
def get_next_submission(student_id, location):
"""
Request the next submission to be peer graded.
@param student_id: The student requesting to grade a peer. Must check to determine if the requesting student has
submitted an answer of their own.
@param location: The associated location for the submission to be graded.
@return: The submission to grade, if one is available.
"""
pass
def get_last_submission(student_id, location):
"""
Used to give visibility to scoring and workflow for a submission in peer grading. If the student has submitted a
submission and has graded enough peers, this function will return the submission as is, with all available scoring
data. If the student has not finished grading peers, scoring information on their submission is withheld.
@param student_id: The student.
@param location: The associated location.
@return: The student's latest submission, restrained on workflow completion.
"""
pass
def submit(submission):
"""
Submit a submission for peer grading.
@param submission: The submission to add to the peer grading queue. Should contain the student_id,
associated location, and all answer related fields prepopulated. Submission date,
preferred grader, and other attributes can be determined internally.
@return: The saved submission.
"""
pass
|
<commit_before><commit_msg>Create the service stub for peer grading
This stub just has some doc and empty functions so we can discuss if the contract is reasonable, then potentially build on this frame.<commit_after>
def get_next_submission(student_id, location):
"""
Request the next submission to be peer graded.
@param student_id: The student requesting to grade a peer. Must check to determine if the requesting student has
submitted an answer of their own.
@param location: The associated location for the submission to be graded.
@return: The submission to grade, if one is available.
"""
pass
def get_last_submission(student_id, location):
"""
Used to give visibility to scoring and workflow for a submission in peer grading. If the student has submitted a
submission and has graded enough peers, this function will return the submission as is, with all available scoring
data. If the student has not finished grading peers, scoring information on their submission is withheld.
@param student_id: The student.
@param location: The associated location.
@return: The student's latest submission, restrained on workflow completion.
"""
pass
def submit(submission):
"""
Submit a submission for peer grading.
@param submission: The submission to add to the peer grading queue. Should contain the student_id,
associated location, and all answer related fields prepopulated. Submission date,
preferred grader, and other attributes can be determined internally.
@return: The saved submission.
"""
pass
|
|
b763ddb93c273f8641344866aa004c1798cf7234
|
py/minesweeper.py
|
py/minesweeper.py
|
class Solution(object):
def updateBoard(self, board, click):
"""
:type board: List[List[str]]
:type click: List[int]
:rtype: List[List[str]]
"""
r = len(board)
if r == 0:
return board
c = len(board[0])
if board[click[0]][click[1]] == 'M':
board[click[0]][click[1]] = 'X'
return board
else:
cnt = [[0] * len(board[0]) for _ in xrange(len(board))]
for x, row in enumerate(board):
for y, v in enumerate(row):
if v == 'M':
for i in xrange(-1, 2):
for j in xrange(-1, 2):
if i or j:
if 0 <= x + i < r and 0 <= y + j < c:
cnt[x + i][y + j] += 1
visited = set()
q = [tuple(click)]
for v in q:
if v not in visited:
visited.add(v)
if cnt[v[0]][v[1]] > 0:
board[v[0]][v[1]] = str(cnt[v[0]][v[1]])
else:
board[v[0]][v[1]] = 'B'
for i in xrange(-1, 2):
for j in xrange(-1, 2):
if i or j:
nx, ny = v[0] + i, v[1] + j
if 0 <= nx < r and 0 <= ny < c:
q.append((nx, ny))
return board
|
Add py solution for 529. Minesweeper
|
Add py solution for 529. Minesweeper
529. Minesweeper: https://leetcode.com/problems/minesweeper/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 529. Minesweeper
529. Minesweeper: https://leetcode.com/problems/minesweeper/
|
class Solution(object):
def updateBoard(self, board, click):
"""
:type board: List[List[str]]
:type click: List[int]
:rtype: List[List[str]]
"""
r = len(board)
if r == 0:
return board
c = len(board[0])
if board[click[0]][click[1]] == 'M':
board[click[0]][click[1]] = 'X'
return board
else:
cnt = [[0] * len(board[0]) for _ in xrange(len(board))]
for x, row in enumerate(board):
for y, v in enumerate(row):
if v == 'M':
for i in xrange(-1, 2):
for j in xrange(-1, 2):
if i or j:
if 0 <= x + i < r and 0 <= y + j < c:
cnt[x + i][y + j] += 1
visited = set()
q = [tuple(click)]
for v in q:
if v not in visited:
visited.add(v)
if cnt[v[0]][v[1]] > 0:
board[v[0]][v[1]] = str(cnt[v[0]][v[1]])
else:
board[v[0]][v[1]] = 'B'
for i in xrange(-1, 2):
for j in xrange(-1, 2):
if i or j:
nx, ny = v[0] + i, v[1] + j
if 0 <= nx < r and 0 <= ny < c:
q.append((nx, ny))
return board
|
<commit_before><commit_msg>Add py solution for 529. Minesweeper
529. Minesweeper: https://leetcode.com/problems/minesweeper/<commit_after>
|
class Solution(object):
def updateBoard(self, board, click):
"""
:type board: List[List[str]]
:type click: List[int]
:rtype: List[List[str]]
"""
r = len(board)
if r == 0:
return board
c = len(board[0])
if board[click[0]][click[1]] == 'M':
board[click[0]][click[1]] = 'X'
return board
else:
cnt = [[0] * len(board[0]) for _ in xrange(len(board))]
for x, row in enumerate(board):
for y, v in enumerate(row):
if v == 'M':
for i in xrange(-1, 2):
for j in xrange(-1, 2):
if i or j:
if 0 <= x + i < r and 0 <= y + j < c:
cnt[x + i][y + j] += 1
visited = set()
q = [tuple(click)]
for v in q:
if v not in visited:
visited.add(v)
if cnt[v[0]][v[1]] > 0:
board[v[0]][v[1]] = str(cnt[v[0]][v[1]])
else:
board[v[0]][v[1]] = 'B'
for i in xrange(-1, 2):
for j in xrange(-1, 2):
if i or j:
nx, ny = v[0] + i, v[1] + j
if 0 <= nx < r and 0 <= ny < c:
q.append((nx, ny))
return board
|
Add py solution for 529. Minesweeper
529. Minesweeper: https://leetcode.com/problems/minesweeper/class Solution(object):
def updateBoard(self, board, click):
"""
:type board: List[List[str]]
:type click: List[int]
:rtype: List[List[str]]
"""
r = len(board)
if r == 0:
return board
c = len(board[0])
if board[click[0]][click[1]] == 'M':
board[click[0]][click[1]] = 'X'
return board
else:
cnt = [[0] * len(board[0]) for _ in xrange(len(board))]
for x, row in enumerate(board):
for y, v in enumerate(row):
if v == 'M':
for i in xrange(-1, 2):
for j in xrange(-1, 2):
if i or j:
if 0 <= x + i < r and 0 <= y + j < c:
cnt[x + i][y + j] += 1
visited = set()
q = [tuple(click)]
for v in q:
if v not in visited:
visited.add(v)
if cnt[v[0]][v[1]] > 0:
board[v[0]][v[1]] = str(cnt[v[0]][v[1]])
else:
board[v[0]][v[1]] = 'B'
for i in xrange(-1, 2):
for j in xrange(-1, 2):
if i or j:
nx, ny = v[0] + i, v[1] + j
if 0 <= nx < r and 0 <= ny < c:
q.append((nx, ny))
return board
|
<commit_before><commit_msg>Add py solution for 529. Minesweeper
529. Minesweeper: https://leetcode.com/problems/minesweeper/<commit_after>class Solution(object):
def updateBoard(self, board, click):
"""
:type board: List[List[str]]
:type click: List[int]
:rtype: List[List[str]]
"""
r = len(board)
if r == 0:
return board
c = len(board[0])
if board[click[0]][click[1]] == 'M':
board[click[0]][click[1]] = 'X'
return board
else:
cnt = [[0] * len(board[0]) for _ in xrange(len(board))]
for x, row in enumerate(board):
for y, v in enumerate(row):
if v == 'M':
for i in xrange(-1, 2):
for j in xrange(-1, 2):
if i or j:
if 0 <= x + i < r and 0 <= y + j < c:
cnt[x + i][y + j] += 1
visited = set()
q = [tuple(click)]
for v in q:
if v not in visited:
visited.add(v)
if cnt[v[0]][v[1]] > 0:
board[v[0]][v[1]] = str(cnt[v[0]][v[1]])
else:
board[v[0]][v[1]] = 'B'
for i in xrange(-1, 2):
for j in xrange(-1, 2):
if i or j:
nx, ny = v[0] + i, v[1] + j
if 0 <= nx < r and 0 <= ny < c:
q.append((nx, ny))
return board
|
|
3e2a52e27e702532ca198a815adda9a83e4b96f2
|
examples/test_checkboxes.py
|
examples/test_checkboxes.py
|
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_checkboxes_and_radio_buttons(self):
self.open("https://www.w3schools.com/tags/tryit.asp"
"?filename=tryhtml5_input_type_checkbox")
self.switch_to_frame("iframeResult")
checkbox = "input#vehicle2"
self.assert_false(self.is_selected(checkbox))
self.click(checkbox)
self.assert_true(self.is_selected(checkbox))
self.open("https://www.w3schools.com/tags/tryit.asp"
"?filename=tryhtml5_input_type_radio")
self.switch_to_frame("iframeResult")
option_button = "input#male"
self.assert_false(self.is_selected(option_button))
self.click(option_button)
self.assert_true(self.is_selected(option_button))
|
Add a test for checkboxes and radio buttons
|
Add a test for checkboxes and radio buttons
|
Python
|
mit
|
seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase
|
Add a test for checkboxes and radio buttons
|
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_checkboxes_and_radio_buttons(self):
self.open("https://www.w3schools.com/tags/tryit.asp"
"?filename=tryhtml5_input_type_checkbox")
self.switch_to_frame("iframeResult")
checkbox = "input#vehicle2"
self.assert_false(self.is_selected(checkbox))
self.click(checkbox)
self.assert_true(self.is_selected(checkbox))
self.open("https://www.w3schools.com/tags/tryit.asp"
"?filename=tryhtml5_input_type_radio")
self.switch_to_frame("iframeResult")
option_button = "input#male"
self.assert_false(self.is_selected(option_button))
self.click(option_button)
self.assert_true(self.is_selected(option_button))
|
<commit_before><commit_msg>Add a test for checkboxes and radio buttons<commit_after>
|
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_checkboxes_and_radio_buttons(self):
self.open("https://www.w3schools.com/tags/tryit.asp"
"?filename=tryhtml5_input_type_checkbox")
self.switch_to_frame("iframeResult")
checkbox = "input#vehicle2"
self.assert_false(self.is_selected(checkbox))
self.click(checkbox)
self.assert_true(self.is_selected(checkbox))
self.open("https://www.w3schools.com/tags/tryit.asp"
"?filename=tryhtml5_input_type_radio")
self.switch_to_frame("iframeResult")
option_button = "input#male"
self.assert_false(self.is_selected(option_button))
self.click(option_button)
self.assert_true(self.is_selected(option_button))
|
Add a test for checkboxes and radio buttonsfrom seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_checkboxes_and_radio_buttons(self):
self.open("https://www.w3schools.com/tags/tryit.asp"
"?filename=tryhtml5_input_type_checkbox")
self.switch_to_frame("iframeResult")
checkbox = "input#vehicle2"
self.assert_false(self.is_selected(checkbox))
self.click(checkbox)
self.assert_true(self.is_selected(checkbox))
self.open("https://www.w3schools.com/tags/tryit.asp"
"?filename=tryhtml5_input_type_radio")
self.switch_to_frame("iframeResult")
option_button = "input#male"
self.assert_false(self.is_selected(option_button))
self.click(option_button)
self.assert_true(self.is_selected(option_button))
|
<commit_before><commit_msg>Add a test for checkboxes and radio buttons<commit_after>from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_checkboxes_and_radio_buttons(self):
self.open("https://www.w3schools.com/tags/tryit.asp"
"?filename=tryhtml5_input_type_checkbox")
self.switch_to_frame("iframeResult")
checkbox = "input#vehicle2"
self.assert_false(self.is_selected(checkbox))
self.click(checkbox)
self.assert_true(self.is_selected(checkbox))
self.open("https://www.w3schools.com/tags/tryit.asp"
"?filename=tryhtml5_input_type_radio")
self.switch_to_frame("iframeResult")
option_button = "input#male"
self.assert_false(self.is_selected(option_button))
self.click(option_button)
self.assert_true(self.is_selected(option_button))
|
|
704fa9ba20a0a05298eaf9146eafc158e1a5953a
|
temba/chatbase/tasks.py
|
temba/chatbase/tasks.py
|
from __future__ import print_function, unicode_literals
import logging
from celery.task import task
from temba.orgs.models import Org
from .models import Chatbase
logger = logging.getLogger(__name__)
@task(track_started=True, name='send_chatbase_event')
def send_chatbase_event(org, channel, msg, contact):
try:
org = Org.objects.get(id=org)
if org.is_connected_to_chatbase():
chatbase_args = dict(org=org.id,
channel=channel,
msg=msg,
contact=contact)
chatbase = Chatbase.create(**chatbase_args)
chatbase.trigger_chatbase_event()
except Exception as e:
logger.error("Error for chatbase event: %s" % e.args, exc_info=True)
|
Add task to chatbase call
|
Add task to chatbase call
|
Python
|
agpl-3.0
|
pulilab/rapidpro,pulilab/rapidpro,pulilab/rapidpro,pulilab/rapidpro,pulilab/rapidpro
|
Add task to chatbase call
|
from __future__ import print_function, unicode_literals
import logging
from celery.task import task
from temba.orgs.models import Org
from .models import Chatbase
logger = logging.getLogger(__name__)
@task(track_started=True, name='send_chatbase_event')
def send_chatbase_event(org, channel, msg, contact):
try:
org = Org.objects.get(id=org)
if org.is_connected_to_chatbase():
chatbase_args = dict(org=org.id,
channel=channel,
msg=msg,
contact=contact)
chatbase = Chatbase.create(**chatbase_args)
chatbase.trigger_chatbase_event()
except Exception as e:
logger.error("Error for chatbase event: %s" % e.args, exc_info=True)
|
<commit_before><commit_msg>Add task to chatbase call<commit_after>
|
from __future__ import print_function, unicode_literals
import logging
from celery.task import task
from temba.orgs.models import Org
from .models import Chatbase
logger = logging.getLogger(__name__)
@task(track_started=True, name='send_chatbase_event')
def send_chatbase_event(org, channel, msg, contact):
try:
org = Org.objects.get(id=org)
if org.is_connected_to_chatbase():
chatbase_args = dict(org=org.id,
channel=channel,
msg=msg,
contact=contact)
chatbase = Chatbase.create(**chatbase_args)
chatbase.trigger_chatbase_event()
except Exception as e:
logger.error("Error for chatbase event: %s" % e.args, exc_info=True)
|
Add task to chatbase callfrom __future__ import print_function, unicode_literals
import logging
from celery.task import task
from temba.orgs.models import Org
from .models import Chatbase
logger = logging.getLogger(__name__)
@task(track_started=True, name='send_chatbase_event')
def send_chatbase_event(org, channel, msg, contact):
try:
org = Org.objects.get(id=org)
if org.is_connected_to_chatbase():
chatbase_args = dict(org=org.id,
channel=channel,
msg=msg,
contact=contact)
chatbase = Chatbase.create(**chatbase_args)
chatbase.trigger_chatbase_event()
except Exception as e:
logger.error("Error for chatbase event: %s" % e.args, exc_info=True)
|
<commit_before><commit_msg>Add task to chatbase call<commit_after>from __future__ import print_function, unicode_literals
import logging
from celery.task import task
from temba.orgs.models import Org
from .models import Chatbase
logger = logging.getLogger(__name__)
@task(track_started=True, name='send_chatbase_event')
def send_chatbase_event(org, channel, msg, contact):
try:
org = Org.objects.get(id=org)
if org.is_connected_to_chatbase():
chatbase_args = dict(org=org.id,
channel=channel,
msg=msg,
contact=contact)
chatbase = Chatbase.create(**chatbase_args)
chatbase.trigger_chatbase_event()
except Exception as e:
logger.error("Error for chatbase event: %s" % e.args, exc_info=True)
|
|
83b290b8d3da89d371ae88057472b838c5433471
|
cura/Settings/MaterialSettingsVisibilityHandler.py
|
cura/Settings/MaterialSettingsVisibilityHandler.py
|
# Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.Settings.Models.SettingVisibilityHandler import SettingVisibilityHandler
class MaterialSettingsVisibilityHandler(SettingVisibilityHandler):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent = parent, *args, **kwargs)
material_settings = {
"default_material_print_temperature",
"material_bed_temperature",
"material_standby_temperature",
"cool_fan_speed",
"retraction_amount",
"retraction_speed",
}
self.setVisible(material_settings)
|
# Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import UM.Settings.Models.SettingVisibilityHandler
class MaterialSettingsVisibilityHandler(UM.Settings.Models.SettingVisibilityHandler.SettingVisibilityHandler):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent = parent, *args, **kwargs)
material_settings = {
"default_material_print_temperature",
"material_bed_temperature",
"material_standby_temperature",
"cool_fan_speed",
"retraction_amount",
"retraction_speed",
}
self.setVisible(material_settings)
|
Use full import path for parent class
|
Use full import path for parent class
Something seems off with the build for some reason. I'm trying to fix it this way.
|
Python
|
agpl-3.0
|
hmflash/Cura,ynotstartups/Wanhao,ynotstartups/Wanhao,hmflash/Cura,Curahelper/Cura,fieldOfView/Cura,fieldOfView/Cura,Curahelper/Cura
|
# Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.Settings.Models.SettingVisibilityHandler import SettingVisibilityHandler
class MaterialSettingsVisibilityHandler(SettingVisibilityHandler):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent = parent, *args, **kwargs)
material_settings = {
"default_material_print_temperature",
"material_bed_temperature",
"material_standby_temperature",
"cool_fan_speed",
"retraction_amount",
"retraction_speed",
}
self.setVisible(material_settings)
Use full import path for parent class
Something seems off with the build for some reason. I'm trying to fix it this way.
|
# Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import UM.Settings.Models.SettingVisibilityHandler
class MaterialSettingsVisibilityHandler(UM.Settings.Models.SettingVisibilityHandler.SettingVisibilityHandler):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent = parent, *args, **kwargs)
material_settings = {
"default_material_print_temperature",
"material_bed_temperature",
"material_standby_temperature",
"cool_fan_speed",
"retraction_amount",
"retraction_speed",
}
self.setVisible(material_settings)
|
<commit_before># Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.Settings.Models.SettingVisibilityHandler import SettingVisibilityHandler
class MaterialSettingsVisibilityHandler(SettingVisibilityHandler):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent = parent, *args, **kwargs)
material_settings = {
"default_material_print_temperature",
"material_bed_temperature",
"material_standby_temperature",
"cool_fan_speed",
"retraction_amount",
"retraction_speed",
}
self.setVisible(material_settings)
<commit_msg>Use full import path for parent class
Something seems off with the build for some reason. I'm trying to fix it this way.<commit_after>
|
# Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import UM.Settings.Models.SettingVisibilityHandler
class MaterialSettingsVisibilityHandler(UM.Settings.Models.SettingVisibilityHandler.SettingVisibilityHandler):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent = parent, *args, **kwargs)
material_settings = {
"default_material_print_temperature",
"material_bed_temperature",
"material_standby_temperature",
"cool_fan_speed",
"retraction_amount",
"retraction_speed",
}
self.setVisible(material_settings)
|
# Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.Settings.Models.SettingVisibilityHandler import SettingVisibilityHandler
class MaterialSettingsVisibilityHandler(SettingVisibilityHandler):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent = parent, *args, **kwargs)
material_settings = {
"default_material_print_temperature",
"material_bed_temperature",
"material_standby_temperature",
"cool_fan_speed",
"retraction_amount",
"retraction_speed",
}
self.setVisible(material_settings)
Use full import path for parent class
Something seems off with the build for some reason. I'm trying to fix it this way.# Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import UM.Settings.Models.SettingVisibilityHandler
class MaterialSettingsVisibilityHandler(UM.Settings.Models.SettingVisibilityHandler.SettingVisibilityHandler):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent = parent, *args, **kwargs)
material_settings = {
"default_material_print_temperature",
"material_bed_temperature",
"material_standby_temperature",
"cool_fan_speed",
"retraction_amount",
"retraction_speed",
}
self.setVisible(material_settings)
|
<commit_before># Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.Settings.Models.SettingVisibilityHandler import SettingVisibilityHandler
class MaterialSettingsVisibilityHandler(SettingVisibilityHandler):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent = parent, *args, **kwargs)
material_settings = {
"default_material_print_temperature",
"material_bed_temperature",
"material_standby_temperature",
"cool_fan_speed",
"retraction_amount",
"retraction_speed",
}
self.setVisible(material_settings)
<commit_msg>Use full import path for parent class
Something seems off with the build for some reason. I'm trying to fix it this way.<commit_after># Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import UM.Settings.Models.SettingVisibilityHandler
class MaterialSettingsVisibilityHandler(UM.Settings.Models.SettingVisibilityHandler.SettingVisibilityHandler):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent = parent, *args, **kwargs)
material_settings = {
"default_material_print_temperature",
"material_bed_temperature",
"material_standby_temperature",
"cool_fan_speed",
"retraction_amount",
"retraction_speed",
}
self.setVisible(material_settings)
|
878aa1aa78b7895d2211d5cef392675512f19d6a
|
fullcalendar/migrations/0003_auto_20150430_2235.py
|
fullcalendar/migrations/0003_auto_20150430_2235.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0002_auto_20150321_1234'),
]
operations = [
migrations.AlterField(
model_name='eventcategory',
name='name',
field=models.CharField(max_length=50, verbose_name='name'),
preserve_default=True,
),
]
|
Add migration to remove the unique constraint.
|
Add migration to remove the unique constraint.
|
Python
|
mit
|
jonge-democraten/mezzanine-fullcalendar
|
Add migration to remove the unique constraint.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0002_auto_20150321_1234'),
]
operations = [
migrations.AlterField(
model_name='eventcategory',
name='name',
field=models.CharField(max_length=50, verbose_name='name'),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration to remove the unique constraint.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0002_auto_20150321_1234'),
]
operations = [
migrations.AlterField(
model_name='eventcategory',
name='name',
field=models.CharField(max_length=50, verbose_name='name'),
preserve_default=True,
),
]
|
Add migration to remove the unique constraint.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0002_auto_20150321_1234'),
]
operations = [
migrations.AlterField(
model_name='eventcategory',
name='name',
field=models.CharField(max_length=50, verbose_name='name'),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration to remove the unique constraint.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0002_auto_20150321_1234'),
]
operations = [
migrations.AlterField(
model_name='eventcategory',
name='name',
field=models.CharField(max_length=50, verbose_name='name'),
preserve_default=True,
),
]
|
|
4ec2992737a256ad89ebee64885a0c6fd2bccc7b
|
tests/UselessSymbolsRemove/SimpleTest.py
|
tests/UselessSymbolsRemove/SimpleTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 17.08.2017 14:39
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import ContextFree
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class RuleSto0(Rule): rule = ([S], [0])
class RuleStoA(Rule): rule = ([S], [A])
class RuleAtoAB(Rule): rule = ([A], [A, B])
class RuleBto1(Rule): rule = ([B], [1])
class SimpleTest(TestCase):
def test_simpleTest(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B],
rules=[RuleSto0, RuleStoA, RuleAtoAB, RuleBto1],
start_symbol=S)
com = ContextFree.remove_useless_symbols(g)
self.assertTrue(com.have_term(0))
self.assertFalse(com.have_term(1))
self.assertTrue(com.have_nonterm(S))
self.assertFalse(com.have_nonterm(A))
self.assertFalse(com.have_nonterm(B))
def test_simpleTestShouldNotChange(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B],
rules=[RuleSto0, RuleStoA, RuleAtoAB, RuleBto1],
start_symbol=S)
ContextFree.remove_useless_symbols(g)
self.assertTrue(g.have_term([0, 1]))
self.assertTrue(g.have_nonterm([S, A, B]))
def test_simpleTestShouldChange(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B],
rules=[RuleSto0, RuleStoA, RuleAtoAB, RuleBto1],
start_symbol=S)
ContextFree.remove_useless_symbols(g)
self.assertTrue(g.have_term(0))
self.assertFalse(g.have_term(1))
self.assertTrue(g.have_nonterm(S))
self.assertFalse(g.have_nonterm(A))
self.assertFalse(g.have_nonterm(B))
if __name__ == '__main__':
main()
|
Create simple test for simple removing of useless symbols
|
Create simple test for simple removing of useless symbols
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Create simple test for simple removing of useless symbols
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 17.08.2017 14:39
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import ContextFree
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class RuleSto0(Rule): rule = ([S], [0])
class RuleStoA(Rule): rule = ([S], [A])
class RuleAtoAB(Rule): rule = ([A], [A, B])
class RuleBto1(Rule): rule = ([B], [1])
class SimpleTest(TestCase):
def test_simpleTest(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B],
rules=[RuleSto0, RuleStoA, RuleAtoAB, RuleBto1],
start_symbol=S)
com = ContextFree.remove_useless_symbols(g)
self.assertTrue(com.have_term(0))
self.assertFalse(com.have_term(1))
self.assertTrue(com.have_nonterm(S))
self.assertFalse(com.have_nonterm(A))
self.assertFalse(com.have_nonterm(B))
def test_simpleTestShouldNotChange(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B],
rules=[RuleSto0, RuleStoA, RuleAtoAB, RuleBto1],
start_symbol=S)
ContextFree.remove_useless_symbols(g)
self.assertTrue(g.have_term([0, 1]))
self.assertTrue(g.have_nonterm([S, A, B]))
def test_simpleTestShouldChange(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B],
rules=[RuleSto0, RuleStoA, RuleAtoAB, RuleBto1],
start_symbol=S)
ContextFree.remove_useless_symbols(g)
self.assertTrue(g.have_term(0))
self.assertFalse(g.have_term(1))
self.assertTrue(g.have_nonterm(S))
self.assertFalse(g.have_nonterm(A))
self.assertFalse(g.have_nonterm(B))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Create simple test for simple removing of useless symbols<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 17.08.2017 14:39
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import ContextFree
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class RuleSto0(Rule): rule = ([S], [0])
class RuleStoA(Rule): rule = ([S], [A])
class RuleAtoAB(Rule): rule = ([A], [A, B])
class RuleBto1(Rule): rule = ([B], [1])
class SimpleTest(TestCase):
def test_simpleTest(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B],
rules=[RuleSto0, RuleStoA, RuleAtoAB, RuleBto1],
start_symbol=S)
com = ContextFree.remove_useless_symbols(g)
self.assertTrue(com.have_term(0))
self.assertFalse(com.have_term(1))
self.assertTrue(com.have_nonterm(S))
self.assertFalse(com.have_nonterm(A))
self.assertFalse(com.have_nonterm(B))
def test_simpleTestShouldNotChange(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B],
rules=[RuleSto0, RuleStoA, RuleAtoAB, RuleBto1],
start_symbol=S)
ContextFree.remove_useless_symbols(g)
self.assertTrue(g.have_term([0, 1]))
self.assertTrue(g.have_nonterm([S, A, B]))
def test_simpleTestShouldChange(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B],
rules=[RuleSto0, RuleStoA, RuleAtoAB, RuleBto1],
start_symbol=S)
ContextFree.remove_useless_symbols(g)
self.assertTrue(g.have_term(0))
self.assertFalse(g.have_term(1))
self.assertTrue(g.have_nonterm(S))
self.assertFalse(g.have_nonterm(A))
self.assertFalse(g.have_nonterm(B))
if __name__ == '__main__':
main()
|
Create simple test for simple removing of useless symbols#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 17.08.2017 14:39
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import ContextFree
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class RuleSto0(Rule): rule = ([S], [0])
class RuleStoA(Rule): rule = ([S], [A])
class RuleAtoAB(Rule): rule = ([A], [A, B])
class RuleBto1(Rule): rule = ([B], [1])
class SimpleTest(TestCase):
def test_simpleTest(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B],
rules=[RuleSto0, RuleStoA, RuleAtoAB, RuleBto1],
start_symbol=S)
com = ContextFree.remove_useless_symbols(g)
self.assertTrue(com.have_term(0))
self.assertFalse(com.have_term(1))
self.assertTrue(com.have_nonterm(S))
self.assertFalse(com.have_nonterm(A))
self.assertFalse(com.have_nonterm(B))
def test_simpleTestShouldNotChange(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B],
rules=[RuleSto0, RuleStoA, RuleAtoAB, RuleBto1],
start_symbol=S)
ContextFree.remove_useless_symbols(g)
self.assertTrue(g.have_term([0, 1]))
self.assertTrue(g.have_nonterm([S, A, B]))
def test_simpleTestShouldChange(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B],
rules=[RuleSto0, RuleStoA, RuleAtoAB, RuleBto1],
start_symbol=S)
ContextFree.remove_useless_symbols(g)
self.assertTrue(g.have_term(0))
self.assertFalse(g.have_term(1))
self.assertTrue(g.have_nonterm(S))
self.assertFalse(g.have_nonterm(A))
self.assertFalse(g.have_nonterm(B))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Create simple test for simple removing of useless symbols<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 17.08.2017 14:39
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import ContextFree
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class RuleSto0(Rule): rule = ([S], [0])
class RuleStoA(Rule): rule = ([S], [A])
class RuleAtoAB(Rule): rule = ([A], [A, B])
class RuleBto1(Rule): rule = ([B], [1])
class SimpleTest(TestCase):
def test_simpleTest(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B],
rules=[RuleSto0, RuleStoA, RuleAtoAB, RuleBto1],
start_symbol=S)
com = ContextFree.remove_useless_symbols(g)
self.assertTrue(com.have_term(0))
self.assertFalse(com.have_term(1))
self.assertTrue(com.have_nonterm(S))
self.assertFalse(com.have_nonterm(A))
self.assertFalse(com.have_nonterm(B))
def test_simpleTestShouldNotChange(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B],
rules=[RuleSto0, RuleStoA, RuleAtoAB, RuleBto1],
start_symbol=S)
ContextFree.remove_useless_symbols(g)
self.assertTrue(g.have_term([0, 1]))
self.assertTrue(g.have_nonterm([S, A, B]))
def test_simpleTestShouldChange(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B],
rules=[RuleSto0, RuleStoA, RuleAtoAB, RuleBto1],
start_symbol=S)
ContextFree.remove_useless_symbols(g)
self.assertTrue(g.have_term(0))
self.assertFalse(g.have_term(1))
self.assertTrue(g.have_nonterm(S))
self.assertFalse(g.have_nonterm(A))
self.assertFalse(g.have_nonterm(B))
if __name__ == '__main__':
main()
|
|
4340b4c1719761c7ebfbd0aba7a27a76604e6ddb
|
test/order/TestOrder.py
|
test/order/TestOrder.py
|
"""
Test that debug symbols have the correct order as specified by the order file.
"""
import os, time
import re
import unittest
import lldb
import lldbtest
class TestClassTypes(lldbtest.TestBase):
mydir = "order"
def test_order(self):
"""Test debug symbols follow the correct order by the order file."""
res = self.res
exe = os.path.join(os.getcwd(), "a.out")
self.ci.HandleCommand("file " + exe, res)
self.assertTrue(res.Succeeded())
# Test that the debug symbols have Function f3 before Function f1.
self.ci.HandleCommand("image dump symtab a.out", res)
self.assertTrue(res.Succeeded())
output = res.GetOutput()
mo_f3 = re.search("Function +.+f3", output)
mo_f1 = re.search("Function +.+f1", output)
# Match objects for f3 and f1 must exist and f3 must come before f1.
self.assertTrue(mo_f3 and mo_f1 and mo_f3.start() < mo_f1.start())
self.ci.HandleCommand("run", res)
self.assertTrue(res.Succeeded())
if __name__ == '__main__':
lldb.SBDebugger.Initialize()
unittest.main()
lldb.SBDebugger.Terminate()
|
Test that debug symbols have the correct order as specified by the order file.
|
Test that debug symbols have the correct order as specified by the order file.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@107844 91177308-0d34-0410-b5e6-96231b3b80d8
|
Python
|
apache-2.0
|
apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb
|
Test that debug symbols have the correct order as specified by the order file.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@107844 91177308-0d34-0410-b5e6-96231b3b80d8
|
"""
Test that debug symbols have the correct order as specified by the order file.
"""
import os, time
import re
import unittest
import lldb
import lldbtest
class TestClassTypes(lldbtest.TestBase):
mydir = "order"
def test_order(self):
"""Test debug symbols follow the correct order by the order file."""
res = self.res
exe = os.path.join(os.getcwd(), "a.out")
self.ci.HandleCommand("file " + exe, res)
self.assertTrue(res.Succeeded())
# Test that the debug symbols have Function f3 before Function f1.
self.ci.HandleCommand("image dump symtab a.out", res)
self.assertTrue(res.Succeeded())
output = res.GetOutput()
mo_f3 = re.search("Function +.+f3", output)
mo_f1 = re.search("Function +.+f1", output)
# Match objects for f3 and f1 must exist and f3 must come before f1.
self.assertTrue(mo_f3 and mo_f1 and mo_f3.start() < mo_f1.start())
self.ci.HandleCommand("run", res)
self.assertTrue(res.Succeeded())
if __name__ == '__main__':
lldb.SBDebugger.Initialize()
unittest.main()
lldb.SBDebugger.Terminate()
|
<commit_before><commit_msg>Test that debug symbols have the correct order as specified by the order file.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@107844 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>
|
"""
Test that debug symbols have the correct order as specified by the order file.
"""
import os, time
import re
import unittest
import lldb
import lldbtest
class TestClassTypes(lldbtest.TestBase):
mydir = "order"
def test_order(self):
"""Test debug symbols follow the correct order by the order file."""
res = self.res
exe = os.path.join(os.getcwd(), "a.out")
self.ci.HandleCommand("file " + exe, res)
self.assertTrue(res.Succeeded())
# Test that the debug symbols have Function f3 before Function f1.
self.ci.HandleCommand("image dump symtab a.out", res)
self.assertTrue(res.Succeeded())
output = res.GetOutput()
mo_f3 = re.search("Function +.+f3", output)
mo_f1 = re.search("Function +.+f1", output)
# Match objects for f3 and f1 must exist and f3 must come before f1.
self.assertTrue(mo_f3 and mo_f1 and mo_f3.start() < mo_f1.start())
self.ci.HandleCommand("run", res)
self.assertTrue(res.Succeeded())
if __name__ == '__main__':
lldb.SBDebugger.Initialize()
unittest.main()
lldb.SBDebugger.Terminate()
|
Test that debug symbols have the correct order as specified by the order file.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@107844 91177308-0d34-0410-b5e6-96231b3b80d8"""
Test that debug symbols have the correct order as specified by the order file.
"""
import os, time
import re
import unittest
import lldb
import lldbtest
class TestClassTypes(lldbtest.TestBase):
mydir = "order"
def test_order(self):
"""Test debug symbols follow the correct order by the order file."""
res = self.res
exe = os.path.join(os.getcwd(), "a.out")
self.ci.HandleCommand("file " + exe, res)
self.assertTrue(res.Succeeded())
# Test that the debug symbols have Function f3 before Function f1.
self.ci.HandleCommand("image dump symtab a.out", res)
self.assertTrue(res.Succeeded())
output = res.GetOutput()
mo_f3 = re.search("Function +.+f3", output)
mo_f1 = re.search("Function +.+f1", output)
# Match objects for f3 and f1 must exist and f3 must come before f1.
self.assertTrue(mo_f3 and mo_f1 and mo_f3.start() < mo_f1.start())
self.ci.HandleCommand("run", res)
self.assertTrue(res.Succeeded())
if __name__ == '__main__':
lldb.SBDebugger.Initialize()
unittest.main()
lldb.SBDebugger.Terminate()
|
<commit_before><commit_msg>Test that debug symbols have the correct order as specified by the order file.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@107844 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>"""
Test that debug symbols have the correct order as specified by the order file.
"""
import os, time
import re
import unittest
import lldb
import lldbtest
class TestClassTypes(lldbtest.TestBase):
mydir = "order"
def test_order(self):
"""Test debug symbols follow the correct order by the order file."""
res = self.res
exe = os.path.join(os.getcwd(), "a.out")
self.ci.HandleCommand("file " + exe, res)
self.assertTrue(res.Succeeded())
# Test that the debug symbols have Function f3 before Function f1.
self.ci.HandleCommand("image dump symtab a.out", res)
self.assertTrue(res.Succeeded())
output = res.GetOutput()
mo_f3 = re.search("Function +.+f3", output)
mo_f1 = re.search("Function +.+f1", output)
# Match objects for f3 and f1 must exist and f3 must come before f1.
self.assertTrue(mo_f3 and mo_f1 and mo_f3.start() < mo_f1.start())
self.ci.HandleCommand("run", res)
self.assertTrue(res.Succeeded())
if __name__ == '__main__':
lldb.SBDebugger.Initialize()
unittest.main()
lldb.SBDebugger.Terminate()
|
|
44f5b9081fb7ce3718c879872aa73c281b0c181d
|
bin/extractpolygonfromgeojsonandaddtags.py
|
bin/extractpolygonfromgeojsonandaddtags.py
|
import json
import sys
import numpy as np
from shapely.geometry import shape, Point
f = open(sys.argv[1], 'r')
js = json.load(f)
f.close()
tags = {}
f = open(sys.argv[2], 'r')
for line in f.readlines():
tag = line.split(': ')
tags[tag[0]] = tag[1][:-1]
#for feature in js['features']:
feature = js['features'][int(tags['parcelle_lineid'])]
if feature['properties']['id'] != tags['parcelle_wayid']:
print >> sys.stderr, "ERROR: wayid %s doesnot match with meta file (%s)" % (feature['properties']['id'], tags['parcelle_wayid'])
sys.exit(1)
for tag in tags.keys():
feature['properties'][tag] = tags[tag]
feature['geometry']['type'] = 'Polygon'
feature['geometry']['coordinates'] = [feature['geometry']['coordinates']]
js['features'] = [feature]
print json.dumps(js);
|
Create a geojson dedicated to the parcelle we are interested in
|
Create a geojson dedicated to the parcelle we are interested in
|
Python
|
agpl-3.0
|
24eme/cadatrava,24eme/cadatrava,24eme/cadatrava
|
Create a geojson dedicated to the parcelle we are interested in
|
import json
import sys
import numpy as np
from shapely.geometry import shape, Point
f = open(sys.argv[1], 'r')
js = json.load(f)
f.close()
tags = {}
f = open(sys.argv[2], 'r')
for line in f.readlines():
tag = line.split(': ')
tags[tag[0]] = tag[1][:-1]
#for feature in js['features']:
feature = js['features'][int(tags['parcelle_lineid'])]
if feature['properties']['id'] != tags['parcelle_wayid']:
print >> sys.stderr, "ERROR: wayid %s doesnot match with meta file (%s)" % (feature['properties']['id'], tags['parcelle_wayid'])
sys.exit(1)
for tag in tags.keys():
feature['properties'][tag] = tags[tag]
feature['geometry']['type'] = 'Polygon'
feature['geometry']['coordinates'] = [feature['geometry']['coordinates']]
js['features'] = [feature]
print json.dumps(js);
|
<commit_before><commit_msg>Create a geojson dedicated to the parcelle we are interested in<commit_after>
|
import json
import sys
import numpy as np
from shapely.geometry import shape, Point
f = open(sys.argv[1], 'r')
js = json.load(f)
f.close()
tags = {}
f = open(sys.argv[2], 'r')
for line in f.readlines():
tag = line.split(': ')
tags[tag[0]] = tag[1][:-1]
#for feature in js['features']:
feature = js['features'][int(tags['parcelle_lineid'])]
if feature['properties']['id'] != tags['parcelle_wayid']:
print >> sys.stderr, "ERROR: wayid %s doesnot match with meta file (%s)" % (feature['properties']['id'], tags['parcelle_wayid'])
sys.exit(1)
for tag in tags.keys():
feature['properties'][tag] = tags[tag]
feature['geometry']['type'] = 'Polygon'
feature['geometry']['coordinates'] = [feature['geometry']['coordinates']]
js['features'] = [feature]
print json.dumps(js);
|
Create a geojson dedicated to the parcelle we are interested inimport json
import sys
import numpy as np
from shapely.geometry import shape, Point
f = open(sys.argv[1], 'r')
js = json.load(f)
f.close()
tags = {}
f = open(sys.argv[2], 'r')
for line in f.readlines():
tag = line.split(': ')
tags[tag[0]] = tag[1][:-1]
#for feature in js['features']:
feature = js['features'][int(tags['parcelle_lineid'])]
if feature['properties']['id'] != tags['parcelle_wayid']:
print >> sys.stderr, "ERROR: wayid %s doesnot match with meta file (%s)" % (feature['properties']['id'], tags['parcelle_wayid'])
sys.exit(1)
for tag in tags.keys():
feature['properties'][tag] = tags[tag]
feature['geometry']['type'] = 'Polygon'
feature['geometry']['coordinates'] = [feature['geometry']['coordinates']]
js['features'] = [feature]
print json.dumps(js);
|
<commit_before><commit_msg>Create a geojson dedicated to the parcelle we are interested in<commit_after>import json
import sys
import numpy as np
from shapely.geometry import shape, Point
f = open(sys.argv[1], 'r')
js = json.load(f)
f.close()
tags = {}
f = open(sys.argv[2], 'r')
for line in f.readlines():
tag = line.split(': ')
tags[tag[0]] = tag[1][:-1]
#for feature in js['features']:
feature = js['features'][int(tags['parcelle_lineid'])]
if feature['properties']['id'] != tags['parcelle_wayid']:
print >> sys.stderr, "ERROR: wayid %s doesnot match with meta file (%s)" % (feature['properties']['id'], tags['parcelle_wayid'])
sys.exit(1)
for tag in tags.keys():
feature['properties'][tag] = tags[tag]
feature['geometry']['type'] = 'Polygon'
feature['geometry']['coordinates'] = [feature['geometry']['coordinates']]
js['features'] = [feature]
print json.dumps(js);
|
|
5486fead4457aa6b704ae1fbbffa0635a040b425
|
toolbox/sporo_data_preparation.py
|
toolbox/sporo_data_preparation.py
|
import vigra
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Take two tiff files, one for the sporozite channel and one for the nucleus channel, \
and create two files needed for further processing: a 3-channel hdf5 volume and a 1-channel nucleus HDF5.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--sporo', required=True, type=str, dest='sporoFilename',
help='Filename of the sporozyte tiff')
parser.add_argument('--nucleus', required=True, type=str, dest='nucleusFilename',
help='Filename of the nucleus tiff')
parser.add_argument('--3channel-out', type=str, dest='threeChannelOut', required=True, help='Filename of the resulting 3 channel HDF5')
parser.add_argument('--nucleus-channel-out', type=str, dest='nucleusChannelOut', required=True, help='Filename of the resulting nucleus channel HDF5')
args = parser.parse_args()
sporoChannel = vigra.impex.readVolume(args.sporoFilename)
nucleusChannel = vigra.impex.readVolume(args.nucleusFilename)
resultVolume = np.zeros((nucleusChannel.shape[0], nucleusChannel.shape[1], nucleusChannel.shape[2], 3), dtype='float32')
resultVolume[...,1] = sporoChannel[...,0]
resultVolume[...,2] = nucleusChannel[...,0]
vigra.impex.writeHDF5(resultVolume, args.threeChannelOut, 'exported_data')
vigra.impex.writeHDF5(nucleusChannel, args.nucleusChannelOut, 'exported_data')
|
Add script to convert the sporozyte data to something ilastik and our pipeline can use
|
Add script to convert the sporozyte data to something ilastik and our pipeline can use
|
Python
|
mit
|
chaubold/hytra,chaubold/hytra,chaubold/hytra
|
Add script to convert the sporozyte data to something ilastik and our pipeline can use
|
import vigra
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Take two tiff files, one for the sporozite channel and one for the nucleus channel, \
and create two files needed for further processing: a 3-channel hdf5 volume and a 1-channel nucleus HDF5.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--sporo', required=True, type=str, dest='sporoFilename',
help='Filename of the sporozyte tiff')
parser.add_argument('--nucleus', required=True, type=str, dest='nucleusFilename',
help='Filename of the nucleus tiff')
parser.add_argument('--3channel-out', type=str, dest='threeChannelOut', required=True, help='Filename of the resulting 3 channel HDF5')
parser.add_argument('--nucleus-channel-out', type=str, dest='nucleusChannelOut', required=True, help='Filename of the resulting nucleus channel HDF5')
args = parser.parse_args()
sporoChannel = vigra.impex.readVolume(args.sporoFilename)
nucleusChannel = vigra.impex.readVolume(args.nucleusFilename)
resultVolume = np.zeros((nucleusChannel.shape[0], nucleusChannel.shape[1], nucleusChannel.shape[2], 3), dtype='float32')
resultVolume[...,1] = sporoChannel[...,0]
resultVolume[...,2] = nucleusChannel[...,0]
vigra.impex.writeHDF5(resultVolume, args.threeChannelOut, 'exported_data')
vigra.impex.writeHDF5(nucleusChannel, args.nucleusChannelOut, 'exported_data')
|
<commit_before><commit_msg>Add script to convert the sporozyte data to something ilastik and our pipeline can use<commit_after>
|
import vigra
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Take two tiff files, one for the sporozite channel and one for the nucleus channel, \
and create two files needed for further processing: a 3-channel hdf5 volume and a 1-channel nucleus HDF5.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--sporo', required=True, type=str, dest='sporoFilename',
help='Filename of the sporozyte tiff')
parser.add_argument('--nucleus', required=True, type=str, dest='nucleusFilename',
help='Filename of the nucleus tiff')
parser.add_argument('--3channel-out', type=str, dest='threeChannelOut', required=True, help='Filename of the resulting 3 channel HDF5')
parser.add_argument('--nucleus-channel-out', type=str, dest='nucleusChannelOut', required=True, help='Filename of the resulting nucleus channel HDF5')
args = parser.parse_args()
sporoChannel = vigra.impex.readVolume(args.sporoFilename)
nucleusChannel = vigra.impex.readVolume(args.nucleusFilename)
resultVolume = np.zeros((nucleusChannel.shape[0], nucleusChannel.shape[1], nucleusChannel.shape[2], 3), dtype='float32')
resultVolume[...,1] = sporoChannel[...,0]
resultVolume[...,2] = nucleusChannel[...,0]
vigra.impex.writeHDF5(resultVolume, args.threeChannelOut, 'exported_data')
vigra.impex.writeHDF5(nucleusChannel, args.nucleusChannelOut, 'exported_data')
|
Add script to convert the sporozyte data to something ilastik and our pipeline can useimport vigra
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Take two tiff files, one for the sporozite channel and one for the nucleus channel, \
and create two files needed for further processing: a 3-channel hdf5 volume and a 1-channel nucleus HDF5.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--sporo', required=True, type=str, dest='sporoFilename',
help='Filename of the sporozyte tiff')
parser.add_argument('--nucleus', required=True, type=str, dest='nucleusFilename',
help='Filename of the nucleus tiff')
parser.add_argument('--3channel-out', type=str, dest='threeChannelOut', required=True, help='Filename of the resulting 3 channel HDF5')
parser.add_argument('--nucleus-channel-out', type=str, dest='nucleusChannelOut', required=True, help='Filename of the resulting nucleus channel HDF5')
args = parser.parse_args()
sporoChannel = vigra.impex.readVolume(args.sporoFilename)
nucleusChannel = vigra.impex.readVolume(args.nucleusFilename)
resultVolume = np.zeros((nucleusChannel.shape[0], nucleusChannel.shape[1], nucleusChannel.shape[2], 3), dtype='float32')
resultVolume[...,1] = sporoChannel[...,0]
resultVolume[...,2] = nucleusChannel[...,0]
vigra.impex.writeHDF5(resultVolume, args.threeChannelOut, 'exported_data')
vigra.impex.writeHDF5(nucleusChannel, args.nucleusChannelOut, 'exported_data')
|
<commit_before><commit_msg>Add script to convert the sporozyte data to something ilastik and our pipeline can use<commit_after>import vigra
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Take two tiff files, one for the sporozite channel and one for the nucleus channel, \
and create two files needed for further processing: a 3-channel hdf5 volume and a 1-channel nucleus HDF5.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--sporo', required=True, type=str, dest='sporoFilename',
help='Filename of the sporozyte tiff')
parser.add_argument('--nucleus', required=True, type=str, dest='nucleusFilename',
help='Filename of the nucleus tiff')
parser.add_argument('--3channel-out', type=str, dest='threeChannelOut', required=True, help='Filename of the resulting 3 channel HDF5')
parser.add_argument('--nucleus-channel-out', type=str, dest='nucleusChannelOut', required=True, help='Filename of the resulting nucleus channel HDF5')
args = parser.parse_args()
sporoChannel = vigra.impex.readVolume(args.sporoFilename)
nucleusChannel = vigra.impex.readVolume(args.nucleusFilename)
resultVolume = np.zeros((nucleusChannel.shape[0], nucleusChannel.shape[1], nucleusChannel.shape[2], 3), dtype='float32')
resultVolume[...,1] = sporoChannel[...,0]
resultVolume[...,2] = nucleusChannel[...,0]
vigra.impex.writeHDF5(resultVolume, args.threeChannelOut, 'exported_data')
vigra.impex.writeHDF5(nucleusChannel, args.nucleusChannelOut, 'exported_data')
|
|
d3619cc31188e289f0b867bb029ff6db84cbb579
|
dictionary/management/commands/writeLiblouisTables.py
|
dictionary/management/commands/writeLiblouisTables.py
|
from daisyproducer.dictionary.brailleTables import writeWhiteListTables, writeLocalTables, writeWordSplitTable
from daisyproducer.dictionary.models import Word
from daisyproducer.documents.models import Document
from django.core.management.base import BaseCommand
class Command(BaseCommand):
args = ''
help = 'Write Liblouis tables from the confirmed words in the dictionary'
def handle(self, *args, **options):
# write new global white lists
writeWhiteListTables(Word.objects.filter(isConfirmed=True).filter(isLocal=False).order_by('untranslated'))
# update local tables
writeLocalTables(Document.objects.all())
# write new word split table
writeWordSplitTable(Word.objects.filter(isConfirmed=True).filter(isLocal=False).filter(use_for_word_splitting=True).order_by('untranslated'))
|
Add a management command to generate the liblouis tables
|
Add a management command to generate the liblouis tables
based on the contents of the dictionary database
|
Python
|
agpl-3.0
|
sbsdev/daisyproducer,sbsdev/daisyproducer,sbsdev/daisyproducer,sbsdev/daisyproducer
|
Add a management command to generate the liblouis tables
based on the contents of the dictionary database
|
from daisyproducer.dictionary.brailleTables import writeWhiteListTables, writeLocalTables, writeWordSplitTable
from daisyproducer.dictionary.models import Word
from daisyproducer.documents.models import Document
from django.core.management.base import BaseCommand
class Command(BaseCommand):
args = ''
help = 'Write Liblouis tables from the confirmed words in the dictionary'
def handle(self, *args, **options):
# write new global white lists
writeWhiteListTables(Word.objects.filter(isConfirmed=True).filter(isLocal=False).order_by('untranslated'))
# update local tables
writeLocalTables(Document.objects.all())
# write new word split table
writeWordSplitTable(Word.objects.filter(isConfirmed=True).filter(isLocal=False).filter(use_for_word_splitting=True).order_by('untranslated'))
|
<commit_before><commit_msg>Add a management command to generate the liblouis tables
based on the contents of the dictionary database<commit_after>
|
from daisyproducer.dictionary.brailleTables import writeWhiteListTables, writeLocalTables, writeWordSplitTable
from daisyproducer.dictionary.models import Word
from daisyproducer.documents.models import Document
from django.core.management.base import BaseCommand
class Command(BaseCommand):
args = ''
help = 'Write Liblouis tables from the confirmed words in the dictionary'
def handle(self, *args, **options):
# write new global white lists
writeWhiteListTables(Word.objects.filter(isConfirmed=True).filter(isLocal=False).order_by('untranslated'))
# update local tables
writeLocalTables(Document.objects.all())
# write new word split table
writeWordSplitTable(Word.objects.filter(isConfirmed=True).filter(isLocal=False).filter(use_for_word_splitting=True).order_by('untranslated'))
|
Add a management command to generate the liblouis tables
based on the contents of the dictionary databasefrom daisyproducer.dictionary.brailleTables import writeWhiteListTables, writeLocalTables, writeWordSplitTable
from daisyproducer.dictionary.models import Word
from daisyproducer.documents.models import Document
from django.core.management.base import BaseCommand
class Command(BaseCommand):
args = ''
help = 'Write Liblouis tables from the confirmed words in the dictionary'
def handle(self, *args, **options):
# write new global white lists
writeWhiteListTables(Word.objects.filter(isConfirmed=True).filter(isLocal=False).order_by('untranslated'))
# update local tables
writeLocalTables(Document.objects.all())
# write new word split table
writeWordSplitTable(Word.objects.filter(isConfirmed=True).filter(isLocal=False).filter(use_for_word_splitting=True).order_by('untranslated'))
|
<commit_before><commit_msg>Add a management command to generate the liblouis tables
based on the contents of the dictionary database<commit_after>from daisyproducer.dictionary.brailleTables import writeWhiteListTables, writeLocalTables, writeWordSplitTable
from daisyproducer.dictionary.models import Word
from daisyproducer.documents.models import Document
from django.core.management.base import BaseCommand
class Command(BaseCommand):
args = ''
help = 'Write Liblouis tables from the confirmed words in the dictionary'
def handle(self, *args, **options):
# write new global white lists
writeWhiteListTables(Word.objects.filter(isConfirmed=True).filter(isLocal=False).order_by('untranslated'))
# update local tables
writeLocalTables(Document.objects.all())
# write new word split table
writeWordSplitTable(Word.objects.filter(isConfirmed=True).filter(isLocal=False).filter(use_for_word_splitting=True).order_by('untranslated'))
|
|
a4492dbf7f215d605c4e171f2dfe51b3bddc715b
|
tests/web/test_disps.py
|
tests/web/test_disps.py
|
#!/usr/bin/env python
from circuits.web import Controller
from circuits.web import BaseServer
from circuits.core.handlers import handler
from circuits.core.components import BaseComponent
from circuits.core.manager import Manager
from .helpers import urlopen
from circuits.web.dispatchers.dispatcher import Dispatcher
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
class PrefixingDispatcher(BaseComponent):
"""Forward to another Dispatcher based on the channel."""
def __init__(self, channel):
super(PrefixingDispatcher, self).__init__(channel=channel)
@handler("request", filter=True, priority=1.0)
def _on_request(self, event, request, response):
path = request.path.strip("/")
path = urljoin("/%s/" % self.channel, path)
request.path = path
class DummyRoot(Controller):
channel = "/"
def index(self):
return "Not used"
class Root1(Controller):
channel = "/site1"
def index(self):
return "Hello from site 1!"
class Root2(Controller):
channel = "/site2"
def index(self):
return "Hello from site 2!"
def test_disps():
manager = Manager()
server1 = BaseServer(("localhost", 8000), channel="site1")
server1.register(manager);
PrefixingDispatcher(channel="site1").register(server1)
Dispatcher(channel="site1").register(server1)
Root1().register(manager)
server2 = BaseServer(("localhost", 8001), channel="site2")
server2.register(manager);
PrefixingDispatcher(channel="site2").register(server2)
Dispatcher(channel="site2").register(server2)
Root2().register(manager)
DummyRoot().register(manager)
manager.start()
f = urlopen(server1.base, timeout=3)
s = f.read()
assert s == b"Hello from site 1!"
f = urlopen(server2.base, timeout=3)
s = f.read()
assert s == b"Hello from site 2!"
|
Test case for two web servers with different channels.
|
Test case for two web servers with different channels.
|
Python
|
mit
|
eriol/circuits,treemo/circuits,treemo/circuits,nizox/circuits,treemo/circuits,eriol/circuits,eriol/circuits
|
Test case for two web servers with different channels.
|
#!/usr/bin/env python
from circuits.web import Controller
from circuits.web import BaseServer
from circuits.core.handlers import handler
from circuits.core.components import BaseComponent
from circuits.core.manager import Manager
from .helpers import urlopen
from circuits.web.dispatchers.dispatcher import Dispatcher
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
class PrefixingDispatcher(BaseComponent):
"""Forward to another Dispatcher based on the channel."""
def __init__(self, channel):
super(PrefixingDispatcher, self).__init__(channel=channel)
@handler("request", filter=True, priority=1.0)
def _on_request(self, event, request, response):
path = request.path.strip("/")
path = urljoin("/%s/" % self.channel, path)
request.path = path
class DummyRoot(Controller):
channel = "/"
def index(self):
return "Not used"
class Root1(Controller):
channel = "/site1"
def index(self):
return "Hello from site 1!"
class Root2(Controller):
channel = "/site2"
def index(self):
return "Hello from site 2!"
def test_disps():
manager = Manager()
server1 = BaseServer(("localhost", 8000), channel="site1")
server1.register(manager);
PrefixingDispatcher(channel="site1").register(server1)
Dispatcher(channel="site1").register(server1)
Root1().register(manager)
server2 = BaseServer(("localhost", 8001), channel="site2")
server2.register(manager);
PrefixingDispatcher(channel="site2").register(server2)
Dispatcher(channel="site2").register(server2)
Root2().register(manager)
DummyRoot().register(manager)
manager.start()
f = urlopen(server1.base, timeout=3)
s = f.read()
assert s == b"Hello from site 1!"
f = urlopen(server2.base, timeout=3)
s = f.read()
assert s == b"Hello from site 2!"
|
<commit_before><commit_msg>Test case for two web servers with different channels.<commit_after>
|
#!/usr/bin/env python
from circuits.web import Controller
from circuits.web import BaseServer
from circuits.core.handlers import handler
from circuits.core.components import BaseComponent
from circuits.core.manager import Manager
from .helpers import urlopen
from circuits.web.dispatchers.dispatcher import Dispatcher
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
class PrefixingDispatcher(BaseComponent):
"""Forward to another Dispatcher based on the channel."""
def __init__(self, channel):
super(PrefixingDispatcher, self).__init__(channel=channel)
@handler("request", filter=True, priority=1.0)
def _on_request(self, event, request, response):
path = request.path.strip("/")
path = urljoin("/%s/" % self.channel, path)
request.path = path
class DummyRoot(Controller):
channel = "/"
def index(self):
return "Not used"
class Root1(Controller):
channel = "/site1"
def index(self):
return "Hello from site 1!"
class Root2(Controller):
channel = "/site2"
def index(self):
return "Hello from site 2!"
def test_disps():
manager = Manager()
server1 = BaseServer(("localhost", 8000), channel="site1")
server1.register(manager);
PrefixingDispatcher(channel="site1").register(server1)
Dispatcher(channel="site1").register(server1)
Root1().register(manager)
server2 = BaseServer(("localhost", 8001), channel="site2")
server2.register(manager);
PrefixingDispatcher(channel="site2").register(server2)
Dispatcher(channel="site2").register(server2)
Root2().register(manager)
DummyRoot().register(manager)
manager.start()
f = urlopen(server1.base, timeout=3)
s = f.read()
assert s == b"Hello from site 1!"
f = urlopen(server2.base, timeout=3)
s = f.read()
assert s == b"Hello from site 2!"
|
Test case for two web servers with different channels.#!/usr/bin/env python
from circuits.web import Controller
from circuits.web import BaseServer
from circuits.core.handlers import handler
from circuits.core.components import BaseComponent
from circuits.core.manager import Manager
from .helpers import urlopen
from circuits.web.dispatchers.dispatcher import Dispatcher
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
class PrefixingDispatcher(BaseComponent):
"""Forward to another Dispatcher based on the channel."""
def __init__(self, channel):
super(PrefixingDispatcher, self).__init__(channel=channel)
@handler("request", filter=True, priority=1.0)
def _on_request(self, event, request, response):
path = request.path.strip("/")
path = urljoin("/%s/" % self.channel, path)
request.path = path
class DummyRoot(Controller):
channel = "/"
def index(self):
return "Not used"
class Root1(Controller):
channel = "/site1"
def index(self):
return "Hello from site 1!"
class Root2(Controller):
channel = "/site2"
def index(self):
return "Hello from site 2!"
def test_disps():
manager = Manager()
server1 = BaseServer(("localhost", 8000), channel="site1")
server1.register(manager);
PrefixingDispatcher(channel="site1").register(server1)
Dispatcher(channel="site1").register(server1)
Root1().register(manager)
server2 = BaseServer(("localhost", 8001), channel="site2")
server2.register(manager);
PrefixingDispatcher(channel="site2").register(server2)
Dispatcher(channel="site2").register(server2)
Root2().register(manager)
DummyRoot().register(manager)
manager.start()
f = urlopen(server1.base, timeout=3)
s = f.read()
assert s == b"Hello from site 1!"
f = urlopen(server2.base, timeout=3)
s = f.read()
assert s == b"Hello from site 2!"
|
<commit_before><commit_msg>Test case for two web servers with different channels.<commit_after>#!/usr/bin/env python
from circuits.web import Controller
from circuits.web import BaseServer
from circuits.core.handlers import handler
from circuits.core.components import BaseComponent
from circuits.core.manager import Manager
from .helpers import urlopen
from circuits.web.dispatchers.dispatcher import Dispatcher
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
class PrefixingDispatcher(BaseComponent):
"""Forward to another Dispatcher based on the channel."""
def __init__(self, channel):
super(PrefixingDispatcher, self).__init__(channel=channel)
@handler("request", filter=True, priority=1.0)
def _on_request(self, event, request, response):
path = request.path.strip("/")
path = urljoin("/%s/" % self.channel, path)
request.path = path
class DummyRoot(Controller):
channel = "/"
def index(self):
return "Not used"
class Root1(Controller):
channel = "/site1"
def index(self):
return "Hello from site 1!"
class Root2(Controller):
channel = "/site2"
def index(self):
return "Hello from site 2!"
def test_disps():
manager = Manager()
server1 = BaseServer(("localhost", 8000), channel="site1")
server1.register(manager);
PrefixingDispatcher(channel="site1").register(server1)
Dispatcher(channel="site1").register(server1)
Root1().register(manager)
server2 = BaseServer(("localhost", 8001), channel="site2")
server2.register(manager);
PrefixingDispatcher(channel="site2").register(server2)
Dispatcher(channel="site2").register(server2)
Root2().register(manager)
DummyRoot().register(manager)
manager.start()
f = urlopen(server1.base, timeout=3)
s = f.read()
assert s == b"Hello from site 1!"
f = urlopen(server2.base, timeout=3)
s = f.read()
assert s == b"Hello from site 2!"
|
|
307abd40d1cd78d77512ef6704ffadda79d50d94
|
examples/calculations/Parse_Angles.py
|
examples/calculations/Parse_Angles.py
|
# Copyright (c) 2015-2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Parse angles
============
Demonstrate how to convert direction strings to angles.
The code below shows how to parse directional text into angles.
It also demonstrates the function's flexibility
in handling various string formatting.
"""
import metpy.calc as mpcalc
###########################################
# Create a test value of a directional text
dir_str = 'SOUTH SOUTH EAST'
print(dir_str)
###########################################
# Now throw that string into the function to calculate
# the corresponding angle
angle_deg = mpcalc.parse_angle(dir_str)
print(angle_deg)
###########################################
# The function can also handle arrays of string
# in many different abbrieviations and capitalizations
dir_str_list = ['ne', 'NE', 'NORTHEAST', 'NORTH_EAST', 'NORTH east']
angle_deg_list = mpcalc.parse_angle(dir_str_list)
print(angle_deg_list)
|
Add parse_angle() example to calculations page.
|
Add parse_angle() example to calculations page.
|
Python
|
bsd-3-clause
|
jrleeman/MetPy,Unidata/MetPy,ahaberlie/MetPy,dopplershift/MetPy,Unidata/MetPy,ahaberlie/MetPy,jrleeman/MetPy,dopplershift/MetPy,ShawnMurd/MetPy
|
Add parse_angle() example to calculations page.
|
# Copyright (c) 2015-2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Parse angles
============
Demonstrate how to convert direction strings to angles.
The code below shows how to parse directional text into angles.
It also demonstrates the function's flexibility
in handling various string formatting.
"""
import metpy.calc as mpcalc
###########################################
# Create a test value of a directional text
dir_str = 'SOUTH SOUTH EAST'
print(dir_str)
###########################################
# Now throw that string into the function to calculate
# the corresponding angle
angle_deg = mpcalc.parse_angle(dir_str)
print(angle_deg)
###########################################
# The function can also handle arrays of string
# in many different abbrieviations and capitalizations
dir_str_list = ['ne', 'NE', 'NORTHEAST', 'NORTH_EAST', 'NORTH east']
angle_deg_list = mpcalc.parse_angle(dir_str_list)
print(angle_deg_list)
|
<commit_before><commit_msg>Add parse_angle() example to calculations page.<commit_after>
|
# Copyright (c) 2015-2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Parse angles
============
Demonstrate how to convert direction strings to angles.
The code below shows how to parse directional text into angles.
It also demonstrates the function's flexibility
in handling various string formatting.
"""
import metpy.calc as mpcalc
###########################################
# Create a test value of a directional text
dir_str = 'SOUTH SOUTH EAST'
print(dir_str)
###########################################
# Now throw that string into the function to calculate
# the corresponding angle
angle_deg = mpcalc.parse_angle(dir_str)
print(angle_deg)
###########################################
# The function can also handle arrays of string
# in many different abbrieviations and capitalizations
dir_str_list = ['ne', 'NE', 'NORTHEAST', 'NORTH_EAST', 'NORTH east']
angle_deg_list = mpcalc.parse_angle(dir_str_list)
print(angle_deg_list)
|
Add parse_angle() example to calculations page.# Copyright (c) 2015-2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Parse angles
============
Demonstrate how to convert direction strings to angles.
The code below shows how to parse directional text into angles.
It also demonstrates the function's flexibility
in handling various string formatting.
"""
import metpy.calc as mpcalc
###########################################
# Create a test value of a directional text
dir_str = 'SOUTH SOUTH EAST'
print(dir_str)
###########################################
# Now throw that string into the function to calculate
# the corresponding angle
angle_deg = mpcalc.parse_angle(dir_str)
print(angle_deg)
###########################################
# The function can also handle arrays of string
# in many different abbrieviations and capitalizations
dir_str_list = ['ne', 'NE', 'NORTHEAST', 'NORTH_EAST', 'NORTH east']
angle_deg_list = mpcalc.parse_angle(dir_str_list)
print(angle_deg_list)
|
<commit_before><commit_msg>Add parse_angle() example to calculations page.<commit_after># Copyright (c) 2015-2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Parse angles
============
Demonstrate how to convert direction strings to angles.
The code below shows how to parse directional text into angles.
It also demonstrates the function's flexibility
in handling various string formatting.
"""
import metpy.calc as mpcalc
###########################################
# Create a test value of a directional text
dir_str = 'SOUTH SOUTH EAST'
print(dir_str)
###########################################
# Now throw that string into the function to calculate
# the corresponding angle
angle_deg = mpcalc.parse_angle(dir_str)
print(angle_deg)
###########################################
# The function can also handle arrays of string
# in many different abbrieviations and capitalizations
dir_str_list = ['ne', 'NE', 'NORTHEAST', 'NORTH_EAST', 'NORTH east']
angle_deg_list = mpcalc.parse_angle(dir_str_list)
print(angle_deg_list)
|
|
4731266a177ec1c634b7e0c6c1332ced0462fa11
|
tests/test_arraylist.py
|
tests/test_arraylist.py
|
from __future__ import absolute_import
import unittest
from jnius import autoclass
class ArrayListTest(unittest.TestCase):
def test_output(self):
alist = autoclass('java.util.ArrayList')()
args = [0, 1, 5, -1, -5, 0.0, 1.0, 5.0, -1.0, -5.0, True, False]
for arg in args:
alist.add(arg)
for idx, arg in enumerate(args):
if isinstance(arg, bool):
self.assertEqual(str(alist[idx]), str(int(arg)))
else:
self.assertEqual(str(alist[idx]), str(arg))
if __name__ == '__main__':
unittest.main()
|
Add test for Python numeric types as parameters
|
Add test for Python numeric types as parameters
|
Python
|
mit
|
kivy/pyjnius,kivy/pyjnius,kivy/pyjnius
|
Add test for Python numeric types as parameters
|
from __future__ import absolute_import
import unittest
from jnius import autoclass
class ArrayListTest(unittest.TestCase):
def test_output(self):
alist = autoclass('java.util.ArrayList')()
args = [0, 1, 5, -1, -5, 0.0, 1.0, 5.0, -1.0, -5.0, True, False]
for arg in args:
alist.add(arg)
for idx, arg in enumerate(args):
if isinstance(arg, bool):
self.assertEqual(str(alist[idx]), str(int(arg)))
else:
self.assertEqual(str(alist[idx]), str(arg))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for Python numeric types as parameters<commit_after>
|
from __future__ import absolute_import
import unittest
from jnius import autoclass
class ArrayListTest(unittest.TestCase):
def test_output(self):
alist = autoclass('java.util.ArrayList')()
args = [0, 1, 5, -1, -5, 0.0, 1.0, 5.0, -1.0, -5.0, True, False]
for arg in args:
alist.add(arg)
for idx, arg in enumerate(args):
if isinstance(arg, bool):
self.assertEqual(str(alist[idx]), str(int(arg)))
else:
self.assertEqual(str(alist[idx]), str(arg))
if __name__ == '__main__':
unittest.main()
|
Add test for Python numeric types as parametersfrom __future__ import absolute_import
import unittest
from jnius import autoclass
class ArrayListTest(unittest.TestCase):
def test_output(self):
alist = autoclass('java.util.ArrayList')()
args = [0, 1, 5, -1, -5, 0.0, 1.0, 5.0, -1.0, -5.0, True, False]
for arg in args:
alist.add(arg)
for idx, arg in enumerate(args):
if isinstance(arg, bool):
self.assertEqual(str(alist[idx]), str(int(arg)))
else:
self.assertEqual(str(alist[idx]), str(arg))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for Python numeric types as parameters<commit_after>from __future__ import absolute_import
import unittest
from jnius import autoclass
class ArrayListTest(unittest.TestCase):
def test_output(self):
alist = autoclass('java.util.ArrayList')()
args = [0, 1, 5, -1, -5, 0.0, 1.0, 5.0, -1.0, -5.0, True, False]
for arg in args:
alist.add(arg)
for idx, arg in enumerate(args):
if isinstance(arg, bool):
self.assertEqual(str(alist[idx]), str(int(arg)))
else:
self.assertEqual(str(alist[idx]), str(arg))
if __name__ == '__main__':
unittest.main()
|
|
54c9e64d1f45e0c81009adca2bff4f404b445799
|
terminal.py
|
terminal.py
|
import sys
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
class Terminal(QtGui.QWidget):
VSIZE = 25
HSIZE = 80
HEIGHT = 378
WIDTH = 644
def __init__(self):
super(Terminal, self).__init__()
self.buffer = []
for y in range(self.VSIZE):
self.buffer.append([])
for _ in range(self.HSIZE):
self.buffer[y].append(" ")
self.resize(self.WIDTH, self.HEIGHT)
self.setWindowTitle("DCPU-16 terminal")
def paintEvent(self, event):
qp = QtGui.QPainter()
qp.begin(self)
qp.fillRect(0, 0, self.WIDTH, self.HEIGHT, QtGui.QBrush(QtGui.QColor(0, 0, 0)))
text = "\n".join("".join(line) for line in self.buffer)
qp.setPen(QtGui.QColor(255, 255, 255))
qp.setFont(QtGui.QFont("Monospace", 10))
qp.drawText(1, 1, self.WIDTH, self.HEIGHT, Qt.AlignLeft | Qt.AlignTop, text)
qp.end()
def main():
app = QtGui.QApplication(sys.argv)
term = Terminal()
term.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
Add Terminal class based on Qt
|
Add Terminal class based on Qt
|
Python
|
mit
|
jtauber/dcpu16py,Olical/dcpu16py,mceier/dcpu16py,n8mob/dcpu16py
|
Add Terminal class based on Qt
|
import sys
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
class Terminal(QtGui.QWidget):
VSIZE = 25
HSIZE = 80
HEIGHT = 378
WIDTH = 644
def __init__(self):
super(Terminal, self).__init__()
self.buffer = []
for y in range(self.VSIZE):
self.buffer.append([])
for _ in range(self.HSIZE):
self.buffer[y].append(" ")
self.resize(self.WIDTH, self.HEIGHT)
self.setWindowTitle("DCPU-16 terminal")
def paintEvent(self, event):
qp = QtGui.QPainter()
qp.begin(self)
qp.fillRect(0, 0, self.WIDTH, self.HEIGHT, QtGui.QBrush(QtGui.QColor(0, 0, 0)))
text = "\n".join("".join(line) for line in self.buffer)
qp.setPen(QtGui.QColor(255, 255, 255))
qp.setFont(QtGui.QFont("Monospace", 10))
qp.drawText(1, 1, self.WIDTH, self.HEIGHT, Qt.AlignLeft | Qt.AlignTop, text)
qp.end()
def main():
app = QtGui.QApplication(sys.argv)
term = Terminal()
term.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add Terminal class based on Qt<commit_after>
|
import sys
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
class Terminal(QtGui.QWidget):
VSIZE = 25
HSIZE = 80
HEIGHT = 378
WIDTH = 644
def __init__(self):
super(Terminal, self).__init__()
self.buffer = []
for y in range(self.VSIZE):
self.buffer.append([])
for _ in range(self.HSIZE):
self.buffer[y].append(" ")
self.resize(self.WIDTH, self.HEIGHT)
self.setWindowTitle("DCPU-16 terminal")
def paintEvent(self, event):
qp = QtGui.QPainter()
qp.begin(self)
qp.fillRect(0, 0, self.WIDTH, self.HEIGHT, QtGui.QBrush(QtGui.QColor(0, 0, 0)))
text = "\n".join("".join(line) for line in self.buffer)
qp.setPen(QtGui.QColor(255, 255, 255))
qp.setFont(QtGui.QFont("Monospace", 10))
qp.drawText(1, 1, self.WIDTH, self.HEIGHT, Qt.AlignLeft | Qt.AlignTop, text)
qp.end()
def main():
app = QtGui.QApplication(sys.argv)
term = Terminal()
term.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
Add Terminal class based on Qtimport sys
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
class Terminal(QtGui.QWidget):
VSIZE = 25
HSIZE = 80
HEIGHT = 378
WIDTH = 644
def __init__(self):
super(Terminal, self).__init__()
self.buffer = []
for y in range(self.VSIZE):
self.buffer.append([])
for _ in range(self.HSIZE):
self.buffer[y].append(" ")
self.resize(self.WIDTH, self.HEIGHT)
self.setWindowTitle("DCPU-16 terminal")
def paintEvent(self, event):
qp = QtGui.QPainter()
qp.begin(self)
qp.fillRect(0, 0, self.WIDTH, self.HEIGHT, QtGui.QBrush(QtGui.QColor(0, 0, 0)))
text = "\n".join("".join(line) for line in self.buffer)
qp.setPen(QtGui.QColor(255, 255, 255))
qp.setFont(QtGui.QFont("Monospace", 10))
qp.drawText(1, 1, self.WIDTH, self.HEIGHT, Qt.AlignLeft | Qt.AlignTop, text)
qp.end()
def main():
app = QtGui.QApplication(sys.argv)
term = Terminal()
term.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add Terminal class based on Qt<commit_after>import sys
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
class Terminal(QtGui.QWidget):
VSIZE = 25
HSIZE = 80
HEIGHT = 378
WIDTH = 644
def __init__(self):
super(Terminal, self).__init__()
self.buffer = []
for y in range(self.VSIZE):
self.buffer.append([])
for _ in range(self.HSIZE):
self.buffer[y].append(" ")
self.resize(self.WIDTH, self.HEIGHT)
self.setWindowTitle("DCPU-16 terminal")
def paintEvent(self, event):
qp = QtGui.QPainter()
qp.begin(self)
qp.fillRect(0, 0, self.WIDTH, self.HEIGHT, QtGui.QBrush(QtGui.QColor(0, 0, 0)))
text = "\n".join("".join(line) for line in self.buffer)
qp.setPen(QtGui.QColor(255, 255, 255))
qp.setFont(QtGui.QFont("Monospace", 10))
qp.drawText(1, 1, self.WIDTH, self.HEIGHT, Qt.AlignLeft | Qt.AlignTop, text)
qp.end()
def main():
app = QtGui.QApplication(sys.argv)
term = Terminal()
term.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
|
db25f3cbab70f8367aadb8c17adf342fb687f075
|
d1_common_python/src/d1_common/node.py
|
d1_common_python/src/d1_common/node.py
|
# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for handling the DataONE Node and NodeList types
"""
def pyxb_to_dict(node_list_pyxb):
"""Return a dict representation of {node_list_pyxb}, keyed on
the Node identifier (urn:node:*). E.g.:
{
u'urn:node:ARCTIC': {
'base_url': u'https://arcticdata.io/metacat/d1/mn',
'description': u'The US National Science Foundation...',
'name': u'Arctic Data Center',
'ping': None,
'replicate': 0,
'state': u'up',
'synchronize': 1,
'type': u'mn'
},
u'urn:node:BCODMO': {
'base_url': u'https://www.bco-dmo.org/d1/mn',
'description': u'Biological and Chemical Oceanography Data...',
'name': u'Biological and Chemical Oceanography Data...',
'ping': None,
'replicate': 0,
'state': u'up',
'synchronize': 1,
'type': u'mn'
},
}
"""
f_dict = {}
for f_pyxb in sorted(node_list_pyxb.node, key=lambda x: x.identifier.value()):
f_dict[f_pyxb.identifier.value()] = {
'name': f_pyxb.name,
'description': f_pyxb.description,
'base_url': f_pyxb.baseURL,
'ping': f_pyxb.ping,
'replicate': f_pyxb.replicate,
'synchronize': f_pyxb.synchronize,
'type': f_pyxb.type,
'state': f_pyxb.state,
}
# TODO:
# f_pyxb.services
# f_pyxb.synchronization
# f_pyxb.subject
# f_pyxb.contactSubject
# f_pyxb.nodeReplicationPolicy,
return f_dict
|
Add utility module for working with the Node type
|
Add utility module for working with the Node type
|
Python
|
apache-2.0
|
DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python
|
Add utility module for working with the Node type
|
# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for handling the DataONE Node and NodeList types
"""
def pyxb_to_dict(node_list_pyxb):
"""Return a dict representation of {node_list_pyxb}, keyed on
the Node identifier (urn:node:*). E.g.:
{
u'urn:node:ARCTIC': {
'base_url': u'https://arcticdata.io/metacat/d1/mn',
'description': u'The US National Science Foundation...',
'name': u'Arctic Data Center',
'ping': None,
'replicate': 0,
'state': u'up',
'synchronize': 1,
'type': u'mn'
},
u'urn:node:BCODMO': {
'base_url': u'https://www.bco-dmo.org/d1/mn',
'description': u'Biological and Chemical Oceanography Data...',
'name': u'Biological and Chemical Oceanography Data...',
'ping': None,
'replicate': 0,
'state': u'up',
'synchronize': 1,
'type': u'mn'
},
}
"""
f_dict = {}
for f_pyxb in sorted(node_list_pyxb.node, key=lambda x: x.identifier.value()):
f_dict[f_pyxb.identifier.value()] = {
'name': f_pyxb.name,
'description': f_pyxb.description,
'base_url': f_pyxb.baseURL,
'ping': f_pyxb.ping,
'replicate': f_pyxb.replicate,
'synchronize': f_pyxb.synchronize,
'type': f_pyxb.type,
'state': f_pyxb.state,
}
# TODO:
# f_pyxb.services
# f_pyxb.synchronization
# f_pyxb.subject
# f_pyxb.contactSubject
# f_pyxb.nodeReplicationPolicy,
return f_dict
|
<commit_before><commit_msg>Add utility module for working with the Node type<commit_after>
|
# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for handling the DataONE Node and NodeList types
"""
def pyxb_to_dict(node_list_pyxb):
"""Return a dict representation of {node_list_pyxb}, keyed on
the Node identifier (urn:node:*). E.g.:
{
u'urn:node:ARCTIC': {
'base_url': u'https://arcticdata.io/metacat/d1/mn',
'description': u'The US National Science Foundation...',
'name': u'Arctic Data Center',
'ping': None,
'replicate': 0,
'state': u'up',
'synchronize': 1,
'type': u'mn'
},
u'urn:node:BCODMO': {
'base_url': u'https://www.bco-dmo.org/d1/mn',
'description': u'Biological and Chemical Oceanography Data...',
'name': u'Biological and Chemical Oceanography Data...',
'ping': None,
'replicate': 0,
'state': u'up',
'synchronize': 1,
'type': u'mn'
},
}
"""
f_dict = {}
for f_pyxb in sorted(node_list_pyxb.node, key=lambda x: x.identifier.value()):
f_dict[f_pyxb.identifier.value()] = {
'name': f_pyxb.name,
'description': f_pyxb.description,
'base_url': f_pyxb.baseURL,
'ping': f_pyxb.ping,
'replicate': f_pyxb.replicate,
'synchronize': f_pyxb.synchronize,
'type': f_pyxb.type,
'state': f_pyxb.state,
}
# TODO:
# f_pyxb.services
# f_pyxb.synchronization
# f_pyxb.subject
# f_pyxb.contactSubject
# f_pyxb.nodeReplicationPolicy,
return f_dict
|
Add utility module for working with the Node type# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for handling the DataONE Node and NodeList types
"""
def pyxb_to_dict(node_list_pyxb):
"""Return a dict representation of {node_list_pyxb}, keyed on
the Node identifier (urn:node:*). E.g.:
{
u'urn:node:ARCTIC': {
'base_url': u'https://arcticdata.io/metacat/d1/mn',
'description': u'The US National Science Foundation...',
'name': u'Arctic Data Center',
'ping': None,
'replicate': 0,
'state': u'up',
'synchronize': 1,
'type': u'mn'
},
u'urn:node:BCODMO': {
'base_url': u'https://www.bco-dmo.org/d1/mn',
'description': u'Biological and Chemical Oceanography Data...',
'name': u'Biological and Chemical Oceanography Data...',
'ping': None,
'replicate': 0,
'state': u'up',
'synchronize': 1,
'type': u'mn'
},
}
"""
f_dict = {}
for f_pyxb in sorted(node_list_pyxb.node, key=lambda x: x.identifier.value()):
f_dict[f_pyxb.identifier.value()] = {
'name': f_pyxb.name,
'description': f_pyxb.description,
'base_url': f_pyxb.baseURL,
'ping': f_pyxb.ping,
'replicate': f_pyxb.replicate,
'synchronize': f_pyxb.synchronize,
'type': f_pyxb.type,
'state': f_pyxb.state,
}
# TODO:
# f_pyxb.services
# f_pyxb.synchronization
# f_pyxb.subject
# f_pyxb.contactSubject
# f_pyxb.nodeReplicationPolicy,
return f_dict
|
<commit_before><commit_msg>Add utility module for working with the Node type<commit_after># -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for handling the DataONE Node and NodeList types
"""
def pyxb_to_dict(node_list_pyxb):
"""Return a dict representation of {node_list_pyxb}, keyed on
the Node identifier (urn:node:*). E.g.:
{
u'urn:node:ARCTIC': {
'base_url': u'https://arcticdata.io/metacat/d1/mn',
'description': u'The US National Science Foundation...',
'name': u'Arctic Data Center',
'ping': None,
'replicate': 0,
'state': u'up',
'synchronize': 1,
'type': u'mn'
},
u'urn:node:BCODMO': {
'base_url': u'https://www.bco-dmo.org/d1/mn',
'description': u'Biological and Chemical Oceanography Data...',
'name': u'Biological and Chemical Oceanography Data...',
'ping': None,
'replicate': 0,
'state': u'up',
'synchronize': 1,
'type': u'mn'
},
}
"""
f_dict = {}
for f_pyxb in sorted(node_list_pyxb.node, key=lambda x: x.identifier.value()):
f_dict[f_pyxb.identifier.value()] = {
'name': f_pyxb.name,
'description': f_pyxb.description,
'base_url': f_pyxb.baseURL,
'ping': f_pyxb.ping,
'replicate': f_pyxb.replicate,
'synchronize': f_pyxb.synchronize,
'type': f_pyxb.type,
'state': f_pyxb.state,
}
# TODO:
# f_pyxb.services
# f_pyxb.synchronization
# f_pyxb.subject
# f_pyxb.contactSubject
# f_pyxb.nodeReplicationPolicy,
return f_dict
|
|
9d69b5498b8fb4d983ba213edbc9ec1b5f102f92
|
yunity/tests/test_crossbar_authorizer.py
|
yunity/tests/test_crossbar_authorizer.py
|
from django.core.cache import cache
from django.test import TestCase
from utils.crossbar import YunityAuthorizer
from utils.session import SharedSessionData
class TestCrossbarAuthorizer(TestCase):
def setUp(self):
self.s = YunityAuthorizer()
self.session_data = SharedSessionData(False)
self.session_data.r.flushall()
def test_default(self):
self.assertFalse(self.s.authorize('1234', 'yunity.misc', 'subscribe'))
def test_public(self):
self.assertTrue(self.s.authorize('1234', 'yunity.public.blub', 'subscribe'))
def test_user_wrong(self):
self.assertFalse(self.s.authorize('1234', 'yunity.user.23.bla', 'subscribe'))
def test_user_right(self):
self.session_data.set_user_session('1234', 23)
self.assertTrue(self.s.authorize('1234', 'yunity.user.23.bla', 'subscribe'))
def test_user_right_publish(self):
self.session_data.set_user_session('1234', 23)
self.assertFalse(self.s.authorize('1234', 'yunity.user.23.bla', 'publish'))
|
Add Unit/Integration test for Crossbar authorizer
|
Add Unit/Integration test for Crossbar authorizer
|
Python
|
agpl-3.0
|
yunity/foodsaving-backend,yunity/yunity-core,yunity/foodsaving-backend,yunity/yunity-core,yunity/foodsaving-backend
|
Add Unit/Integration test for Crossbar authorizer
|
from django.core.cache import cache
from django.test import TestCase
from utils.crossbar import YunityAuthorizer
from utils.session import SharedSessionData
class TestCrossbarAuthorizer(TestCase):
def setUp(self):
self.s = YunityAuthorizer()
self.session_data = SharedSessionData(False)
self.session_data.r.flushall()
def test_default(self):
self.assertFalse(self.s.authorize('1234', 'yunity.misc', 'subscribe'))
def test_public(self):
self.assertTrue(self.s.authorize('1234', 'yunity.public.blub', 'subscribe'))
def test_user_wrong(self):
self.assertFalse(self.s.authorize('1234', 'yunity.user.23.bla', 'subscribe'))
def test_user_right(self):
self.session_data.set_user_session('1234', 23)
self.assertTrue(self.s.authorize('1234', 'yunity.user.23.bla', 'subscribe'))
def test_user_right_publish(self):
self.session_data.set_user_session('1234', 23)
self.assertFalse(self.s.authorize('1234', 'yunity.user.23.bla', 'publish'))
|
<commit_before><commit_msg>Add Unit/Integration test for Crossbar authorizer<commit_after>
|
from django.core.cache import cache
from django.test import TestCase
from utils.crossbar import YunityAuthorizer
from utils.session import SharedSessionData
class TestCrossbarAuthorizer(TestCase):
def setUp(self):
self.s = YunityAuthorizer()
self.session_data = SharedSessionData(False)
self.session_data.r.flushall()
def test_default(self):
self.assertFalse(self.s.authorize('1234', 'yunity.misc', 'subscribe'))
def test_public(self):
self.assertTrue(self.s.authorize('1234', 'yunity.public.blub', 'subscribe'))
def test_user_wrong(self):
self.assertFalse(self.s.authorize('1234', 'yunity.user.23.bla', 'subscribe'))
def test_user_right(self):
self.session_data.set_user_session('1234', 23)
self.assertTrue(self.s.authorize('1234', 'yunity.user.23.bla', 'subscribe'))
def test_user_right_publish(self):
self.session_data.set_user_session('1234', 23)
self.assertFalse(self.s.authorize('1234', 'yunity.user.23.bla', 'publish'))
|
Add Unit/Integration test for Crossbar authorizerfrom django.core.cache import cache
from django.test import TestCase
from utils.crossbar import YunityAuthorizer
from utils.session import SharedSessionData
class TestCrossbarAuthorizer(TestCase):
def setUp(self):
self.s = YunityAuthorizer()
self.session_data = SharedSessionData(False)
self.session_data.r.flushall()
def test_default(self):
self.assertFalse(self.s.authorize('1234', 'yunity.misc', 'subscribe'))
def test_public(self):
self.assertTrue(self.s.authorize('1234', 'yunity.public.blub', 'subscribe'))
def test_user_wrong(self):
self.assertFalse(self.s.authorize('1234', 'yunity.user.23.bla', 'subscribe'))
def test_user_right(self):
self.session_data.set_user_session('1234', 23)
self.assertTrue(self.s.authorize('1234', 'yunity.user.23.bla', 'subscribe'))
def test_user_right_publish(self):
self.session_data.set_user_session('1234', 23)
self.assertFalse(self.s.authorize('1234', 'yunity.user.23.bla', 'publish'))
|
<commit_before><commit_msg>Add Unit/Integration test for Crossbar authorizer<commit_after>from django.core.cache import cache
from django.test import TestCase
from utils.crossbar import YunityAuthorizer
from utils.session import SharedSessionData
class TestCrossbarAuthorizer(TestCase):
def setUp(self):
self.s = YunityAuthorizer()
self.session_data = SharedSessionData(False)
self.session_data.r.flushall()
def test_default(self):
self.assertFalse(self.s.authorize('1234', 'yunity.misc', 'subscribe'))
def test_public(self):
self.assertTrue(self.s.authorize('1234', 'yunity.public.blub', 'subscribe'))
def test_user_wrong(self):
self.assertFalse(self.s.authorize('1234', 'yunity.user.23.bla', 'subscribe'))
def test_user_right(self):
self.session_data.set_user_session('1234', 23)
self.assertTrue(self.s.authorize('1234', 'yunity.user.23.bla', 'subscribe'))
def test_user_right_publish(self):
self.session_data.set_user_session('1234', 23)
self.assertFalse(self.s.authorize('1234', 'yunity.user.23.bla', 'publish'))
|
|
36716fe51800a19567c49e734d320b38d441054e
|
zerver/migrations/0003_custom_indexes.py
|
zerver/migrations/0003_custom_indexes.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('zerver', '0002_django_1_8'),
]
operations = [
migrations.RunSQL("CREATE INDEX upper_subject_idx ON zerver_message ((upper(subject)));",
reverse_sql="DROP INDEX upper_subject_idx;"),
migrations.RunSQL("CREATE INDEX upper_stream_name_idx ON zerver_stream ((upper(name)));",
reverse_sql="DROP INDEX upper_stream_name_idx;")
]
|
Add remaining custom indexes that were created by South migrations
|
Add remaining custom indexes that were created by South migrations
(imported from commit 9798afa8161af4ae6b3fa0c5f4894a3211b77cd4)
|
Python
|
apache-2.0
|
punchagan/zulip,yocome/zulip,bluesea/zulip,armooo/zulip,zacps/zulip,vabs22/zulip,Juanvulcano/zulip,hj3938/zulip,zwily/zulip,grave-w-grave/zulip,dxq-git/zulip,LeeRisk/zulip,so0k/zulip,jerryge/zulip,technicalpickles/zulip,guiquanz/zulip,alliejones/zulip,firstblade/zulip,Drooids/zulip,xuanhan863/zulip,praveenaki/zulip,ApsOps/zulip,thomasboyt/zulip,vakila/zulip,mdavid/zulip,sonali0901/zulip,hustlzp/zulip,LeeRisk/zulip,andersk/zulip,vikas-parashar/zulip,zacps/zulip,jainayush975/zulip,umkay/zulip,ahmadassaf/zulip,wavelets/zulip,Juanvulcano/zulip,SmartPeople/zulip,joshisa/zulip,ericzhou2008/zulip,nicholasbs/zulip,LeeRisk/zulip,wavelets/zulip,shaunstanislaus/zulip,xuxiao/zulip,lfranchi/zulip,jessedhillon/zulip,ahmadassaf/zulip,esander91/zulip,stamhe/zulip,johnny9/zulip,Diptanshu8/zulip,akuseru/zulip,Vallher/zulip,eeshangarg/zulip,joyhchen/zulip,karamcnair/zulip,JanzTam/zulip,dawran6/zulip,m1ssou/zulip,Jianchun1/zulip,wangdeshui/zulip,Batterfii/zulip,udxxabp/zulip,hengqujushi/zulip,bluesea/zulip,dawran6/zulip,rishig/zulip,themass/zulip,AZtheAsian/zulip,hayderimran7/zulip,schatt/zulip,atomic-labs/zulip,praveenaki/zulip,seapasulli/zulip,firstblade/zulip,jessedhillon/zulip,Frouk/zulip,rht/zulip,MariaFaBella85/zulip,Suninus/zulip,voidException/zulip,JanzTam/zulip,zulip/zulip,zofuthan/zulip,udxxabp/zulip,lfranchi/zulip,amyliu345/zulip,kokoar/zulip,peguin40/zulip,wangdeshui/zulip,noroot/zulip,Drooids/zulip,glovebx/zulip,wavelets/zulip,AZtheAsian/zulip,bitemyapp/zulip,punchagan/zulip,bowlofstew/zulip,he15his/zulip,TigorC/zulip,codeKonami/zulip,rishig/zulip,ipernet/zulip,amallia/zulip,tdr130/zulip,luyifan/zulip,amyliu345/zulip,johnnygaddarr/zulip,bssrdf/zulip,Jianchun1/zulip,dwrpayne/zulip,krtkmj/zulip,aakash-cr7/zulip,Cheppers/zulip,yuvipanda/zulip,PhilSk/zulip,johnnygaddarr/zulip,aliceriot/zulip,isht3/zulip,DazWorrall/zulip,bastianh/zulip,DazWorrall/zulip,jonesgithub/zulip,souravbadami/zulip,kokoar/zulip,jphilipsen05/zulip,Vallher/zulip,amanharitsh123/zulip,Juanvulcano/zulip,armooo/zulip,he15his/zulip,dxq-git/zulip,Gabriel0402/zulip,KingxBanana/zulip,ikasumiwt/zulip,EasonYi/zulip,dnmfarrell/zulip,zachallaun/zulip,jonesgithub/zulip,hayderimran7/zulip,mohsenSy/zulip,huangkebo/zulip,adnanh/zulip,swinghu/zulip,so0k/zulip,hackerkid/zulip,akuseru/zulip,jerryge/zulip,PhilSk/zulip,sharmaeklavya2/zulip,hackerkid/zulip,saitodisse/zulip,dxq-git/zulip,jrowan/zulip,brainwane/zulip,brockwhittaker/zulip,Suninus/zulip,kaiyuanheshang/zulip,udxxabp/zulip,amallia/zulip,adnanh/zulip,armooo/zulip,AZtheAsian/zulip,verma-varsha/zulip,zwily/zulip,nicholasbs/zulip,susansls/zulip,xuxiao/zulip,huangkebo/zulip,rht/zulip,JanzTam/zulip,dhcrzf/zulip,ipernet/zulip,mahim97/zulip,he15his/zulip,rishig/zulip,jackrzhang/zulip,atomic-labs/zulip,akuseru/zulip,so0k/zulip,zhaoweigg/zulip,avastu/zulip,timabbott/zulip,jimmy54/zulip,technicalpickles/zulip,johnny9/zulip,eastlhu/zulip,ipernet/zulip,adnanh/zulip,calvinleenyc/zulip,wdaher/zulip,dawran6/zulip,vaidap/zulip,MariaFaBella85/zulip,glovebx/zulip,tdr130/zulip,showell/zulip,hayderimran7/zulip,gkotian/zulip,RobotCaleb/zulip,babbage/zulip,bluesea/zulip,ashwinirudrappa/zulip,mohsenSy/zulip,christi3k/zulip,souravbadami/zulip,technicalpickles/zulip,mdavid/zulip,joshisa/zulip,wavelets/zulip,jimmy54/zulip,shubhamdhama/zulip,shaunstanislaus/zulip,thomasboyt/zulip,stamhe/zulip,itnihao/zulip,vabs22/zulip,andersk/zulip,jerryge/zulip,sup95/zulip,MayB/zulip,bastianh/zulip,esander91/zulip,saitodisse/zulip,aakash-cr7/zulip,calvinleenyc/zulip,kaiyuanheshang/zulip,shubhamdhama/zulip,souravbadami/zulip,susansls/zulip,seapasulli/zulip,arpitpanwar/zulip,firstblade/zulip,eastlhu/zulip,Suninus/zulip,kaiyuanheshang/zulip,tiansiyuan/zulip,sup95/zulip,dnmfarrell/zulip,noroot/zulip,RobotCaleb/zulip,sharmaeklavya2/zulip,bitemyapp/zulip,mahim97/zulip,technicalpickles/zulip,zhaoweigg/zulip,armooo/zulip,Cheppers/zulip,xuxiao/zulip,vikas-parashar/zulip,saitodisse/zulip,thomasboyt/zulip,jonesgithub/zulip,sonali0901/zulip,kaiyuanheshang/zulip,noroot/zulip,nicholasbs/zulip,Diptanshu8/zulip,vakila/zulip,dxq-git/zulip,shubhamdhama/zulip,ufosky-server/zulip,dnmfarrell/zulip,hackerkid/zulip,arpitpanwar/zulip,AZtheAsian/zulip,JPJPJPOPOP/zulip,Suninus/zulip,christi3k/zulip,cosmicAsymmetry/zulip,christi3k/zulip,pradiptad/zulip,KingxBanana/zulip,proliming/zulip,adnanh/zulip,vabs22/zulip,hustlzp/zulip,praveenaki/zulip,ericzhou2008/zulip,zorojean/zulip,mansilladev/zulip,fw1121/zulip,willingc/zulip,KingxBanana/zulip,bowlofstew/zulip,avastu/zulip,dhcrzf/zulip,vikas-parashar/zulip,LeeRisk/zulip,themass/zulip,kou/zulip,developerfm/zulip,sharmaeklavya2/zulip,dxq-git/zulip,tdr130/zulip,zachallaun/zulip,adnanh/zulip,bitemyapp/zulip,rht/zulip,moria/zulip,aliceriot/zulip,jphilipsen05/zulip,moria/zulip,dotcool/zulip,ryansnowboarder/zulip,esander91/zulip,sup95/zulip,grave-w-grave/zulip,eeshangarg/zulip,gigawhitlocks/zulip,KJin99/zulip,synicalsyntax/zulip,akuseru/zulip,tbutter/zulip,Cheppers/zulip,Diptanshu8/zulip,punchagan/zulip,dotcool/zulip,samatdav/zulip,timabbott/zulip,Galexrt/zulip,fw1121/zulip,zorojean/zulip,thomasboyt/zulip,zhaoweigg/zulip,littledogboy/zulip,EasonYi/zulip,jackrzhang/zulip,hafeez3000/zulip,easyfmxu/zulip,showell/zulip,mdavid/zulip,hackerkid/zulip,amallia/zulip,dattatreya303/zulip,mohsenSy/zulip,jeffcao/zulip,suxinde2009/zulip,tiansiyuan/zulip,christi3k/zulip,gkotian/zulip,susansls/zulip,Juanvulcano/zulip,tiansiyuan/zulip,wweiradio/zulip,LAndreas/zulip,hustlzp/zulip,andersk/zulip,brainwane/zulip,dhcrzf/zulip,JPJPJPOPOP/zulip,krtkmj/zulip,ufosky-server/zulip,pradiptad/zulip,lfranchi/zulip,amanharitsh123/zulip,niftynei/zulip,technicalpickles/zulip,paxapy/zulip,kou/zulip,PaulPetring/zulip,cosmicAsymmetry/zulip,zulip/zulip,umkay/zulip,calvinleenyc/zulip,fw1121/zulip,sup95/zulip,joyhchen/zulip,glovebx/zulip,yocome/zulip,Cheppers/zulip,ericzhou2008/zulip,cosmicAsymmetry/zulip,fw1121/zulip,Frouk/zulip,sonali0901/zulip,vakila/zulip,rishig/zulip,nicholasbs/zulip,j831/zulip,ufosky-server/zulip,shrikrishnaholla/zulip,yocome/zulip,wangdeshui/zulip,aliceriot/zulip,proliming/zulip,jainayush975/zulip,LAndreas/zulip,punchagan/zulip,ryanbackman/zulip,synicalsyntax/zulip,hustlzp/zulip,Qgap/zulip,bssrdf/zulip,levixie/zulip,easyfmxu/zulip,natanovia/zulip,peguin40/zulip,amanharitsh123/zulip,so0k/zulip,brockwhittaker/zulip,TigorC/zulip,huangkebo/zulip,MayB/zulip,glovebx/zulip,timabbott/zulip,tommyip/zulip,zofuthan/zulip,ahmadassaf/zulip,samatdav/zulip,peguin40/zulip,armooo/zulip,nicholasbs/zulip,nicholasbs/zulip,levixie/zulip,karamcnair/zulip,shaunstanislaus/zulip,zofuthan/zulip,TigorC/zulip,tbutter/zulip,paxapy/zulip,yuvipanda/zulip,zorojean/zulip,showell/zulip,kokoar/zulip,LAndreas/zulip,tommyip/zulip,littledogboy/zulip,verma-varsha/zulip,jessedhillon/zulip,alliejones/zulip,amyliu345/zulip,ericzhou2008/zulip,voidException/zulip,willingc/zulip,swinghu/zulip,jessedhillon/zulip,bssrdf/zulip,pradiptad/zulip,showell/zulip,dotcool/zulip,umkay/zulip,sharmaeklavya2/zulip,deer-hope/zulip,shrikrishnaholla/zulip,KJin99/zulip,gigawhitlocks/zulip,peiwei/zulip,tbutter/zulip,xuanhan863/zulip,jackrzhang/zulip,vikas-parashar/zulip,bluesea/zulip,joshisa/zulip,jerryge/zulip,qq1012803704/zulip,shaunstanislaus/zulip,bluesea/zulip,vikas-parashar/zulip,showell/zulip,ashwinirudrappa/zulip,natanovia/zulip,kou/zulip,vaidap/zulip,niftynei/zulip,brainwane/zulip,dnmfarrell/zulip,stamhe/zulip,tommyip/zulip,itnihao/zulip,ryansnowboarder/zulip,amallia/zulip,tommyip/zulip,hayderimran7/zulip,Cheppers/zulip,AZtheAsian/zulip,firstblade/zulip,christi3k/zulip,firstblade/zulip,atomic-labs/zulip,Jianchun1/zulip,armooo/zulip,Galexrt/zulip,dwrpayne/zulip,thomasboyt/zulip,Gabriel0402/zulip,ufosky-server/zulip,zofuthan/zulip,codeKonami/zulip,wweiradio/zulip,JPJPJPOPOP/zulip,dotcool/zulip,schatt/zulip,tommyip/zulip,reyha/zulip,moria/zulip,avastu/zulip,SmartPeople/zulip,udxxabp/zulip,tiansiyuan/zulip,SmartPeople/zulip,themass/zulip,littledogboy/zulip,dattatreya303/zulip,eastlhu/zulip,Gabriel0402/zulip,gigawhitlocks/zulip,seapasulli/zulip,Jianchun1/zulip,fw1121/zulip,ericzhou2008/zulip,codeKonami/zulip,easyfmxu/zulip,zwily/zulip,verma-varsha/zulip,niftynei/zulip,itnihao/zulip,jerryge/zulip,alliejones/zulip,hj3938/zulip,isht3/zulip,Jianchun1/zulip,Drooids/zulip,natanovia/zulip,rht/zulip,Frouk/zulip,susansls/zulip,arpith/zulip,ryansnowboarder/zulip,Jianchun1/zulip,armooo/zulip,Batterfii/zulip,sup95/zulip,hengqujushi/zulip,eastlhu/zulip,Frouk/zulip,Gabriel0402/zulip,brockwhittaker/zulip,ahmadassaf/zulip,JPJPJPOPOP/zulip,deer-hope/zulip,Qgap/zulip,ahmadassaf/zulip,jphilipsen05/zulip,jonesgithub/zulip,bssrdf/zulip,kaiyuanheshang/zulip,bluesea/zulip,dawran6/zulip,firstblade/zulip,mahim97/zulip,wdaher/zulip,esander91/zulip,peiwei/zulip,amyliu345/zulip,EasonYi/zulip,hayderimran7/zulip,alliejones/zulip,guiquanz/zulip,lfranchi/zulip,MariaFaBella85/zulip,thomasboyt/zulip,zwily/zulip,JanzTam/zulip,guiquanz/zulip,LAndreas/zulip,grave-w-grave/zulip,levixie/zulip,deer-hope/zulip,jessedhillon/zulip,shrikrishnaholla/zulip,easyfmxu/zulip,eastlhu/zulip,jimmy54/zulip,peiwei/zulip,dattatreya303/zulip,luyifan/zulip,alliejones/zulip,aps-sids/zulip,jimmy54/zulip,jackrzhang/zulip,pradiptad/zulip,ericzhou2008/zulip,vaidap/zulip,synicalsyntax/zulip,karamcnair/zulip,blaze225/zulip,xuanhan863/zulip,eastlhu/zulip,brainwane/zulip,ashwinirudrappa/zulip,isht3/zulip,JanzTam/zulip,bitemyapp/zulip,littledogboy/zulip,mohsenSy/zulip,kou/zulip,LeeRisk/zulip,Vallher/zulip,DazWorrall/zulip,proliming/zulip,eeshangarg/zulip,moria/zulip,aps-sids/zulip,joshisa/zulip,ericzhou2008/zulip,atomic-labs/zulip,tiansiyuan/zulip,Drooids/zulip,ufosky-server/zulip,MayB/zulip,umkay/zulip,developerfm/zulip,atomic-labs/zulip,hayderimran7/zulip,pradiptad/zulip,easyfmxu/zulip,m1ssou/zulip,EasonYi/zulip,schatt/zulip,ipernet/zulip,brockwhittaker/zulip,dotcool/zulip,ikasumiwt/zulip,qq1012803704/zulip,esander91/zulip,ApsOps/zulip,sonali0901/zulip,calvinleenyc/zulip,kokoar/zulip,DazWorrall/zulip,EasonYi/zulip,arpith/zulip,tommyip/zulip,ryansnowboarder/zulip,zacps/zulip,qq1012803704/zulip,vakila/zulip,easyfmxu/zulip,rishig/zulip,Suninus/zulip,amanharitsh123/zulip,deer-hope/zulip,johnnygaddarr/zulip,ApsOps/zulip,stamhe/zulip,dnmfarrell/zulip,developerfm/zulip,ryanbackman/zulip,swinghu/zulip,vakila/zulip,thomasboyt/zulip,rht/zulip,bssrdf/zulip,eeshangarg/zulip,fw1121/zulip,kokoar/zulip,MayB/zulip,brainwane/zulip,littledogboy/zulip,noroot/zulip,johnny9/zulip,esander91/zulip,timabbott/zulip,seapasulli/zulip,bitemyapp/zulip,shubhamdhama/zulip,rishig/zulip,amyliu345/zulip,SmartPeople/zulip,Frouk/zulip,Gabriel0402/zulip,zorojean/zulip,ryansnowboarder/zulip,wweiradio/zulip,hafeez3000/zulip,babbage/zulip,levixie/zulip,Qgap/zulip,shaunstanislaus/zulip,krtkmj/zulip,babbage/zulip,seapasulli/zulip,wweiradio/zulip,hengqujushi/zulip,KJin99/zulip,Diptanshu8/zulip,hafeez3000/zulip,LeeRisk/zulip,samatdav/zulip,littledogboy/zulip,udxxabp/zulip,zofuthan/zulip,zulip/zulip,AZtheAsian/zulip,saitodisse/zulip,swinghu/zulip,esander91/zulip,luyifan/zulip,johnnygaddarr/zulip,vabs22/zulip,johnny9/zulip,atomic-labs/zulip,johnnygaddarr/zulip,karamcnair/zulip,zulip/zulip,shrikrishnaholla/zulip,brainwane/zulip,dattatreya303/zulip,eeshangarg/zulip,amallia/zulip,adnanh/zulip,Suninus/zulip,moria/zulip,zacps/zulip,Qgap/zulip,noroot/zulip,amallia/zulip,qq1012803704/zulip,j831/zulip,j831/zulip,jeffcao/zulip,vikas-parashar/zulip,kaiyuanheshang/zulip,KJin99/zulip,ikasumiwt/zulip,codeKonami/zulip,ApsOps/zulip,dotcool/zulip,jainayush975/zulip,bluesea/zulip,ashwinirudrappa/zulip,yocome/zulip,jrowan/zulip,jerryge/zulip,MayB/zulip,KJin99/zulip,LAndreas/zulip,wangdeshui/zulip,jimmy54/zulip,dotcool/zulip,peiwei/zulip,mdavid/zulip,ryansnowboarder/zulip,udxxabp/zulip,glovebx/zulip,bastianh/zulip,hackerkid/zulip,isht3/zulip,KJin99/zulip,itnihao/zulip,arpith/zulip,paxapy/zulip,hengqujushi/zulip,punchagan/zulip,deer-hope/zulip,souravbadami/zulip,TigorC/zulip,stamhe/zulip,amanharitsh123/zulip,levixie/zulip,yocome/zulip,zachallaun/zulip,glovebx/zulip,aps-sids/zulip,hj3938/zulip,zhaoweigg/zulip,hafeez3000/zulip,samatdav/zulip,zulip/zulip,brockwhittaker/zulip,KJin99/zulip,akuseru/zulip,DazWorrall/zulip,aliceriot/zulip,ashwinirudrappa/zulip,arpith/zulip,peiwei/zulip,hj3938/zulip,xuanhan863/zulip,andersk/zulip,dnmfarrell/zulip,swinghu/zulip,xuxiao/zulip,calvinleenyc/zulip,kokoar/zulip,Galexrt/zulip,Gabriel0402/zulip,bssrdf/zulip,joshisa/zulip,niftynei/zulip,mahim97/zulip,karamcnair/zulip,ryanbackman/zulip,paxapy/zulip,qq1012803704/zulip,zwily/zulip,karamcnair/zulip,tommyip/zulip,grave-w-grave/zulip,itnihao/zulip,susansls/zulip,krtkmj/zulip,hj3938/zulip,joshisa/zulip,aliceriot/zulip,MayB/zulip,MariaFaBella85/zulip,andersk/zulip,avastu/zulip,rishig/zulip,vaidap/zulip,luyifan/zulip,peguin40/zulip,jphilipsen05/zulip,mansilladev/zulip,jeffcao/zulip,babbage/zulip,gigawhitlocks/zulip,kokoar/zulip,RobotCaleb/zulip,developerfm/zulip,dhcrzf/zulip,PaulPetring/zulip,arpith/zulip,wdaher/zulip,yocome/zulip,vakila/zulip,ApsOps/zulip,dattatreya303/zulip,so0k/zulip,dwrpayne/zulip,Cheppers/zulip,zacps/zulip,eastlhu/zulip,RobotCaleb/zulip,aps-sids/zulip,natanovia/zulip,hafeez3000/zulip,RobotCaleb/zulip,shrikrishnaholla/zulip,tbutter/zulip,he15his/zulip,guiquanz/zulip,Qgap/zulip,voidException/zulip,adnanh/zulip,natanovia/zulip,wangdeshui/zulip,bowlofstew/zulip,codeKonami/zulip,Batterfii/zulip,ufosky-server/zulip,shubhamdhama/zulip,wweiradio/zulip,wdaher/zulip,jackrzhang/zulip,yuvipanda/zulip,arpitpanwar/zulip,joshisa/zulip,mdavid/zulip,firstblade/zulip,huangkebo/zulip,avastu/zulip,umkay/zulip,andersk/zulip,tbutter/zulip,tiansiyuan/zulip,verma-varsha/zulip,ryanbackman/zulip,arpitpanwar/zulip,tdr130/zulip,xuanhan863/zulip,themass/zulip,samatdav/zulip,verma-varsha/zulip,niftynei/zulip,deer-hope/zulip,jainayush975/zulip,samatdav/zulip,Batterfii/zulip,synicalsyntax/zulip,aakash-cr7/zulip,JPJPJPOPOP/zulip,cosmicAsymmetry/zulip,shubhamdhama/zulip,PaulPetring/zulip,bssrdf/zulip,johnny9/zulip,akuseru/zulip,bastianh/zulip,tdr130/zulip,itnihao/zulip,guiquanz/zulip,yuvipanda/zulip,MariaFaBella85/zulip,Juanvulcano/zulip,shrikrishnaholla/zulip,ahmadassaf/zulip,Batterfii/zulip,zwily/zulip,JPJPJPOPOP/zulip,Drooids/zulip,zorojean/zulip,mohsenSy/zulip,he15his/zulip,babbage/zulip,suxinde2009/zulip,gkotian/zulip,fw1121/zulip,seapasulli/zulip,schatt/zulip,lfranchi/zulip,RobotCaleb/zulip,JanzTam/zulip,souravbadami/zulip,shrikrishnaholla/zulip,gigawhitlocks/zulip,hengqujushi/zulip,KingxBanana/zulip,m1ssou/zulip,JanzTam/zulip,synicalsyntax/zulip,xuanhan863/zulip,hj3938/zulip,praveenaki/zulip,punchagan/zulip,Vallher/zulip,jimmy54/zulip,hayderimran7/zulip,gigawhitlocks/zulip,brainwane/zulip,gigawhitlocks/zulip,tbutter/zulip,sup95/zulip,ikasumiwt/zulip,Vallher/zulip,Vallher/zulip,christi3k/zulip,itnihao/zulip,alliejones/zulip,PaulPetring/zulip,MariaFaBella85/zulip,reyha/zulip,jerryge/zulip,noroot/zulip,developerfm/zulip,glovebx/zulip,dawran6/zulip,susansls/zulip,hustlzp/zulip,dxq-git/zulip,voidException/zulip,ipernet/zulip,avastu/zulip,mdavid/zulip,xuanhan863/zulip,atomic-labs/zulip,johnny9/zulip,joyhchen/zulip,jonesgithub/zulip,akuseru/zulip,willingc/zulip,bowlofstew/zulip,peguin40/zulip,jonesgithub/zulip,johnny9/zulip,hackerkid/zulip,rht/zulip,MariaFaBella85/zulip,PhilSk/zulip,showell/zulip,developerfm/zulip,PhilSk/zulip,jphilipsen05/zulip,m1ssou/zulip,bastianh/zulip,KingxBanana/zulip,Juanvulcano/zulip,levixie/zulip,jeffcao/zulip,SmartPeople/zulip,arpitpanwar/zulip,PaulPetring/zulip,swinghu/zulip,m1ssou/zulip,sharmaeklavya2/zulip,jrowan/zulip,jonesgithub/zulip,sonali0901/zulip,pradiptad/zulip,zhaoweigg/zulip,themass/zulip,ryanbackman/zulip,peiwei/zulip,ashwinirudrappa/zulip,swinghu/zulip,xuxiao/zulip,blaze225/zulip,zachallaun/zulip,proliming/zulip,willingc/zulip,jeffcao/zulip,eeshangarg/zulip,amallia/zulip,reyha/zulip,kaiyuanheshang/zulip,TigorC/zulip,xuxiao/zulip,stamhe/zulip,dxq-git/zulip,zofuthan/zulip,hafeez3000/zulip,isht3/zulip,krtkmj/zulip,wweiradio/zulip,codeKonami/zulip,LAndreas/zulip,showell/zulip,luyifan/zulip,jrowan/zulip,easyfmxu/zulip,mohsenSy/zulip,so0k/zulip,dwrpayne/zulip,wdaher/zulip,schatt/zulip,zacps/zulip,aakash-cr7/zulip,mansilladev/zulip,zhaoweigg/zulip,blaze225/zulip,alliejones/zulip,zwily/zulip,zachallaun/zulip,zulip/zulip,j831/zulip,yuvipanda/zulip,johnnygaddarr/zulip,ufosky-server/zulip,he15his/zulip,dwrpayne/zulip,hengqujushi/zulip,technicalpickles/zulip,andersk/zulip,grave-w-grave/zulip,praveenaki/zulip,aps-sids/zulip,hengqujushi/zulip,ikasumiwt/zulip,wangdeshui/zulip,Suninus/zulip,Drooids/zulip,bowlofstew/zulip,peiwei/zulip,mansilladev/zulip,zhaoweigg/zulip,johnnygaddarr/zulip,jrowan/zulip,dhcrzf/zulip,seapasulli/zulip,bitemyapp/zulip,willingc/zulip,hafeez3000/zulip,voidException/zulip,themass/zulip,krtkmj/zulip,praveenaki/zulip,wangdeshui/zulip,Frouk/zulip,hackerkid/zulip,calvinleenyc/zulip,voidException/zulip,blaze225/zulip,stamhe/zulip,willingc/zulip,technicalpickles/zulip,Diptanshu8/zulip,jeffcao/zulip,peguin40/zulip,joyhchen/zulip,aps-sids/zulip,zorojean/zulip,tiansiyuan/zulip,PhilSk/zulip,yuvipanda/zulip,qq1012803704/zulip,yocome/zulip,paxapy/zulip,vabs22/zulip,EasonYi/zulip,Gabriel0402/zulip,wavelets/zulip,bitemyapp/zulip,pradiptad/zulip,reyha/zulip,tdr130/zulip,karamcnair/zulip,wavelets/zulip,m1ssou/zulip,krtkmj/zulip,timabbott/zulip,Vallher/zulip,RobotCaleb/zulip,timabbott/zulip,suxinde2009/zulip,zachallaun/zulip,grave-w-grave/zulip,jainayush975/zulip,voidException/zulip,yuvipanda/zulip,synicalsyntax/zulip,souravbadami/zulip,dhcrzf/zulip,sharmaeklavya2/zulip,dhcrzf/zulip,blaze225/zulip,zulip/zulip,rht/zulip,PhilSk/zulip,mansilladev/zulip,ahmadassaf/zulip,nicholasbs/zulip,jrowan/zulip,reyha/zulip,ApsOps/zulip,tbutter/zulip,noroot/zulip,KingxBanana/zulip,verma-varsha/zulip,kou/zulip,blaze225/zulip,m1ssou/zulip,saitodisse/zulip,jessedhillon/zulip,j831/zulip,kou/zulip,ryansnowboarder/zulip,dwrpayne/zulip,levixie/zulip,PaulPetring/zulip,wweiradio/zulip,moria/zulip,dattatreya303/zulip,amanharitsh123/zulip,jainayush975/zulip,luyifan/zulip,lfranchi/zulip,shaunstanislaus/zulip,saitodisse/zulip,tdr130/zulip,Cheppers/zulip,gkotian/zulip,deer-hope/zulip,jackrzhang/zulip,he15his/zulip,aakash-cr7/zulip,suxinde2009/zulip,zofuthan/zulip,suxinde2009/zulip,ikasumiwt/zulip,punchagan/zulip,codeKonami/zulip,jeffcao/zulip,MayB/zulip,umkay/zulip,isht3/zulip,saitodisse/zulip,cosmicAsymmetry/zulip,themass/zulip,vaidap/zulip,DazWorrall/zulip,paxapy/zulip,dnmfarrell/zulip,willingc/zulip,developerfm/zulip,LeeRisk/zulip,proliming/zulip,EasonYi/zulip,joyhchen/zulip,bastianh/zulip,schatt/zulip,sonali0901/zulip,niftynei/zulip,qq1012803704/zulip,mansilladev/zulip,brockwhittaker/zulip,jessedhillon/zulip,ikasumiwt/zulip,so0k/zulip,natanovia/zulip,synicalsyntax/zulip,dwrpayne/zulip,amyliu345/zulip,guiquanz/zulip,mahim97/zulip,natanovia/zulip,PaulPetring/zulip,timabbott/zulip,jphilipsen05/zulip,Galexrt/zulip,ApsOps/zulip,schatt/zulip,moria/zulip,shubhamdhama/zulip,littledogboy/zulip,jackrzhang/zulip,Batterfii/zulip,j831/zulip,jimmy54/zulip,Galexrt/zulip,shaunstanislaus/zulip,mahim97/zulip,wdaher/zulip,aliceriot/zulip,umkay/zulip,kou/zulip,aps-sids/zulip,hustlzp/zulip,vaidap/zulip,bowlofstew/zulip,arpitpanwar/zulip,wdaher/zulip,arpitpanwar/zulip,mansilladev/zulip,gkotian/zulip,eeshangarg/zulip,LAndreas/zulip,DazWorrall/zulip,proliming/zulip,hustlzp/zulip,SmartPeople/zulip,joyhchen/zulip,Qgap/zulip,babbage/zulip,ipernet/zulip,huangkebo/zulip,praveenaki/zulip,lfranchi/zulip,hj3938/zulip,guiquanz/zulip,suxinde2009/zulip,Drooids/zulip,bastianh/zulip,ashwinirudrappa/zulip,suxinde2009/zulip,Galexrt/zulip,gkotian/zulip,babbage/zulip,vakila/zulip,cosmicAsymmetry/zulip,proliming/zulip,Galexrt/zulip,aliceriot/zulip,ipernet/zulip,xuxiao/zulip,Qgap/zulip,vabs22/zulip,luyifan/zulip,ryanbackman/zulip,Diptanshu8/zulip,udxxabp/zulip,wavelets/zulip,gkotian/zulip,reyha/zulip,zorojean/zulip,dawran6/zulip,Frouk/zulip,huangkebo/zulip,TigorC/zulip,bowlofstew/zulip,zachallaun/zulip,avastu/zulip,aakash-cr7/zulip,huangkebo/zulip,mdavid/zulip,arpith/zulip,Batterfii/zulip
|
Add remaining custom indexes that were created by South migrations
(imported from commit 9798afa8161af4ae6b3fa0c5f4894a3211b77cd4)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('zerver', '0002_django_1_8'),
]
operations = [
migrations.RunSQL("CREATE INDEX upper_subject_idx ON zerver_message ((upper(subject)));",
reverse_sql="DROP INDEX upper_subject_idx;"),
migrations.RunSQL("CREATE INDEX upper_stream_name_idx ON zerver_stream ((upper(name)));",
reverse_sql="DROP INDEX upper_stream_name_idx;")
]
|
<commit_before><commit_msg>Add remaining custom indexes that were created by South migrations
(imported from commit 9798afa8161af4ae6b3fa0c5f4894a3211b77cd4)<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('zerver', '0002_django_1_8'),
]
operations = [
migrations.RunSQL("CREATE INDEX upper_subject_idx ON zerver_message ((upper(subject)));",
reverse_sql="DROP INDEX upper_subject_idx;"),
migrations.RunSQL("CREATE INDEX upper_stream_name_idx ON zerver_stream ((upper(name)));",
reverse_sql="DROP INDEX upper_stream_name_idx;")
]
|
Add remaining custom indexes that were created by South migrations
(imported from commit 9798afa8161af4ae6b3fa0c5f4894a3211b77cd4)# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('zerver', '0002_django_1_8'),
]
operations = [
migrations.RunSQL("CREATE INDEX upper_subject_idx ON zerver_message ((upper(subject)));",
reverse_sql="DROP INDEX upper_subject_idx;"),
migrations.RunSQL("CREATE INDEX upper_stream_name_idx ON zerver_stream ((upper(name)));",
reverse_sql="DROP INDEX upper_stream_name_idx;")
]
|
<commit_before><commit_msg>Add remaining custom indexes that were created by South migrations
(imported from commit 9798afa8161af4ae6b3fa0c5f4894a3211b77cd4)<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('zerver', '0002_django_1_8'),
]
operations = [
migrations.RunSQL("CREATE INDEX upper_subject_idx ON zerver_message ((upper(subject)));",
reverse_sql="DROP INDEX upper_subject_idx;"),
migrations.RunSQL("CREATE INDEX upper_stream_name_idx ON zerver_stream ((upper(name)));",
reverse_sql="DROP INDEX upper_stream_name_idx;")
]
|
|
d7349cb444b5774b24096eec64996ff2fa334392
|
libnamebench/config_test.py
|
libnamebench/config_test.py
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the config module."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import unittest
import config
class ConfigTest(unittest.TestCase):
def testParseFullLine(self):
line = '129.250.35.251=NTT (2) # y.ns.gin.ntt.net,39.569,-104.8582 (Englewood/CO/US)'
expected = {'name': 'NTT (2)', 'service': 'NTT', 'ip': '129.250.35.251',
'lon': '-104.8582', 'instance': '2', 'country_code': 'US',
'lat': '39.569'}
self.assertEquals(config._ParseServerLine(line), expected)
def testOpenDNSLine(self):
line = '208.67.220.220=OpenDNS # resolver2.opendns.com'
expected = {'name': 'OpenDNS', 'service': 'OpenDNS', 'ip': '208.67.220.220',
'lon': None, 'instance': None, 'country_code': None,
'lat': None}
self.assertEquals(config._ParseServerLine(line), expected)
def testLineWithNoRegion(self):
line = '4.2.2.2=Level/GTEI-2 (3) # vnsc-bak.sys.gtei.net,38.0,-97.0 (US) '
expected = {'name': 'Level/GTEI-2 (3)', 'service': 'Level/GTEI-2',
'ip': '4.2.2.2', 'lon': '-97.0', 'instance': '3',
'country_code': 'US', 'lat': '38.0'}
self.assertEquals(config._ParseServerLine(line), expected)
if __name__ == '__main__':
unittest.main()
|
Add some tests for dns config parsing
|
Add some tests for dns config parsing
|
Python
|
apache-2.0
|
google/namebench,google/namebench,rogers0/namebench,google/namebench,protron/namebench
|
Add some tests for dns config parsing
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the config module."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import unittest
import config
class ConfigTest(unittest.TestCase):
def testParseFullLine(self):
line = '129.250.35.251=NTT (2) # y.ns.gin.ntt.net,39.569,-104.8582 (Englewood/CO/US)'
expected = {'name': 'NTT (2)', 'service': 'NTT', 'ip': '129.250.35.251',
'lon': '-104.8582', 'instance': '2', 'country_code': 'US',
'lat': '39.569'}
self.assertEquals(config._ParseServerLine(line), expected)
def testOpenDNSLine(self):
line = '208.67.220.220=OpenDNS # resolver2.opendns.com'
expected = {'name': 'OpenDNS', 'service': 'OpenDNS', 'ip': '208.67.220.220',
'lon': None, 'instance': None, 'country_code': None,
'lat': None}
self.assertEquals(config._ParseServerLine(line), expected)
def testLineWithNoRegion(self):
line = '4.2.2.2=Level/GTEI-2 (3) # vnsc-bak.sys.gtei.net,38.0,-97.0 (US) '
expected = {'name': 'Level/GTEI-2 (3)', 'service': 'Level/GTEI-2',
'ip': '4.2.2.2', 'lon': '-97.0', 'instance': '3',
'country_code': 'US', 'lat': '38.0'}
self.assertEquals(config._ParseServerLine(line), expected)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add some tests for dns config parsing<commit_after>
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the config module."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import unittest
import config
class ConfigTest(unittest.TestCase):
def testParseFullLine(self):
line = '129.250.35.251=NTT (2) # y.ns.gin.ntt.net,39.569,-104.8582 (Englewood/CO/US)'
expected = {'name': 'NTT (2)', 'service': 'NTT', 'ip': '129.250.35.251',
'lon': '-104.8582', 'instance': '2', 'country_code': 'US',
'lat': '39.569'}
self.assertEquals(config._ParseServerLine(line), expected)
def testOpenDNSLine(self):
line = '208.67.220.220=OpenDNS # resolver2.opendns.com'
expected = {'name': 'OpenDNS', 'service': 'OpenDNS', 'ip': '208.67.220.220',
'lon': None, 'instance': None, 'country_code': None,
'lat': None}
self.assertEquals(config._ParseServerLine(line), expected)
def testLineWithNoRegion(self):
line = '4.2.2.2=Level/GTEI-2 (3) # vnsc-bak.sys.gtei.net,38.0,-97.0 (US) '
expected = {'name': 'Level/GTEI-2 (3)', 'service': 'Level/GTEI-2',
'ip': '4.2.2.2', 'lon': '-97.0', 'instance': '3',
'country_code': 'US', 'lat': '38.0'}
self.assertEquals(config._ParseServerLine(line), expected)
if __name__ == '__main__':
unittest.main()
|
Add some tests for dns config parsing#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the config module."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import unittest
import config
class ConfigTest(unittest.TestCase):
def testParseFullLine(self):
line = '129.250.35.251=NTT (2) # y.ns.gin.ntt.net,39.569,-104.8582 (Englewood/CO/US)'
expected = {'name': 'NTT (2)', 'service': 'NTT', 'ip': '129.250.35.251',
'lon': '-104.8582', 'instance': '2', 'country_code': 'US',
'lat': '39.569'}
self.assertEquals(config._ParseServerLine(line), expected)
def testOpenDNSLine(self):
line = '208.67.220.220=OpenDNS # resolver2.opendns.com'
expected = {'name': 'OpenDNS', 'service': 'OpenDNS', 'ip': '208.67.220.220',
'lon': None, 'instance': None, 'country_code': None,
'lat': None}
self.assertEquals(config._ParseServerLine(line), expected)
def testLineWithNoRegion(self):
line = '4.2.2.2=Level/GTEI-2 (3) # vnsc-bak.sys.gtei.net,38.0,-97.0 (US) '
expected = {'name': 'Level/GTEI-2 (3)', 'service': 'Level/GTEI-2',
'ip': '4.2.2.2', 'lon': '-97.0', 'instance': '3',
'country_code': 'US', 'lat': '38.0'}
self.assertEquals(config._ParseServerLine(line), expected)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add some tests for dns config parsing<commit_after>#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the config module."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import unittest
import config
class ConfigTest(unittest.TestCase):
def testParseFullLine(self):
line = '129.250.35.251=NTT (2) # y.ns.gin.ntt.net,39.569,-104.8582 (Englewood/CO/US)'
expected = {'name': 'NTT (2)', 'service': 'NTT', 'ip': '129.250.35.251',
'lon': '-104.8582', 'instance': '2', 'country_code': 'US',
'lat': '39.569'}
self.assertEquals(config._ParseServerLine(line), expected)
def testOpenDNSLine(self):
line = '208.67.220.220=OpenDNS # resolver2.opendns.com'
expected = {'name': 'OpenDNS', 'service': 'OpenDNS', 'ip': '208.67.220.220',
'lon': None, 'instance': None, 'country_code': None,
'lat': None}
self.assertEquals(config._ParseServerLine(line), expected)
def testLineWithNoRegion(self):
line = '4.2.2.2=Level/GTEI-2 (3) # vnsc-bak.sys.gtei.net,38.0,-97.0 (US) '
expected = {'name': 'Level/GTEI-2 (3)', 'service': 'Level/GTEI-2',
'ip': '4.2.2.2', 'lon': '-97.0', 'instance': '3',
'country_code': 'US', 'lat': '38.0'}
self.assertEquals(config._ParseServerLine(line), expected)
if __name__ == '__main__':
unittest.main()
|
|
353817ad22c5c3f1102474e573032383309545de
|
py/image-smoother.py
|
py/image-smoother.py
|
class Solution(object):
def imageSmoother(self, M):
"""
:type M: List[List[int]]
:rtype: List[List[int]]
"""
if not M or not M[0]:
return M
R = len(M)
C = len(M[0])
subsum = [[0] * (len(M[0]) + 3) for _ in xrange(len(M) + 3)]
for r in xrange(2, R + 3):
for c in xrange(2, C + 3):
v = M[r - 2][c - 2] if r - 2 < R and c - 2 < C else 0
subsum[r][c] = subsum[r][c - 1] + subsum[r - 1][c] - subsum[r - 1][c - 1] + v
for r, row in enumerate(M):
for c, v in enumerate(row):
row[c] = (subsum[r + 3][c + 3] - subsum[r + 3][c] - subsum[r][c + 3] + subsum[r][c]) / (min(r + 2, 3, R - r + 1, R) * min(c + 2, 3, C - c + 1, C))
return M
|
Add py solution for 661. Image Smoother
|
Add py solution for 661. Image Smoother
661. Image Smoother: https://leetcode.com/problems/image-smoother/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 661. Image Smoother
661. Image Smoother: https://leetcode.com/problems/image-smoother/
|
class Solution(object):
def imageSmoother(self, M):
"""
:type M: List[List[int]]
:rtype: List[List[int]]
"""
if not M or not M[0]:
return M
R = len(M)
C = len(M[0])
subsum = [[0] * (len(M[0]) + 3) for _ in xrange(len(M) + 3)]
for r in xrange(2, R + 3):
for c in xrange(2, C + 3):
v = M[r - 2][c - 2] if r - 2 < R and c - 2 < C else 0
subsum[r][c] = subsum[r][c - 1] + subsum[r - 1][c] - subsum[r - 1][c - 1] + v
for r, row in enumerate(M):
for c, v in enumerate(row):
row[c] = (subsum[r + 3][c + 3] - subsum[r + 3][c] - subsum[r][c + 3] + subsum[r][c]) / (min(r + 2, 3, R - r + 1, R) * min(c + 2, 3, C - c + 1, C))
return M
|
<commit_before><commit_msg>Add py solution for 661. Image Smoother
661. Image Smoother: https://leetcode.com/problems/image-smoother/<commit_after>
|
class Solution(object):
def imageSmoother(self, M):
"""
:type M: List[List[int]]
:rtype: List[List[int]]
"""
if not M or not M[0]:
return M
R = len(M)
C = len(M[0])
subsum = [[0] * (len(M[0]) + 3) for _ in xrange(len(M) + 3)]
for r in xrange(2, R + 3):
for c in xrange(2, C + 3):
v = M[r - 2][c - 2] if r - 2 < R and c - 2 < C else 0
subsum[r][c] = subsum[r][c - 1] + subsum[r - 1][c] - subsum[r - 1][c - 1] + v
for r, row in enumerate(M):
for c, v in enumerate(row):
row[c] = (subsum[r + 3][c + 3] - subsum[r + 3][c] - subsum[r][c + 3] + subsum[r][c]) / (min(r + 2, 3, R - r + 1, R) * min(c + 2, 3, C - c + 1, C))
return M
|
Add py solution for 661. Image Smoother
661. Image Smoother: https://leetcode.com/problems/image-smoother/class Solution(object):
def imageSmoother(self, M):
"""
:type M: List[List[int]]
:rtype: List[List[int]]
"""
if not M or not M[0]:
return M
R = len(M)
C = len(M[0])
subsum = [[0] * (len(M[0]) + 3) for _ in xrange(len(M) + 3)]
for r in xrange(2, R + 3):
for c in xrange(2, C + 3):
v = M[r - 2][c - 2] if r - 2 < R and c - 2 < C else 0
subsum[r][c] = subsum[r][c - 1] + subsum[r - 1][c] - subsum[r - 1][c - 1] + v
for r, row in enumerate(M):
for c, v in enumerate(row):
row[c] = (subsum[r + 3][c + 3] - subsum[r + 3][c] - subsum[r][c + 3] + subsum[r][c]) / (min(r + 2, 3, R - r + 1, R) * min(c + 2, 3, C - c + 1, C))
return M
|
<commit_before><commit_msg>Add py solution for 661. Image Smoother
661. Image Smoother: https://leetcode.com/problems/image-smoother/<commit_after>class Solution(object):
def imageSmoother(self, M):
"""
:type M: List[List[int]]
:rtype: List[List[int]]
"""
if not M or not M[0]:
return M
R = len(M)
C = len(M[0])
subsum = [[0] * (len(M[0]) + 3) for _ in xrange(len(M) + 3)]
for r in xrange(2, R + 3):
for c in xrange(2, C + 3):
v = M[r - 2][c - 2] if r - 2 < R and c - 2 < C else 0
subsum[r][c] = subsum[r][c - 1] + subsum[r - 1][c] - subsum[r - 1][c - 1] + v
for r, row in enumerate(M):
for c, v in enumerate(row):
row[c] = (subsum[r + 3][c + 3] - subsum[r + 3][c] - subsum[r][c + 3] + subsum[r][c]) / (min(r + 2, 3, R - r + 1, R) * min(c + 2, 3, C - c + 1, C))
return M
|
|
011b97d60c6e3464a668ca525efda1f5414c0516
|
tests/test_recursion.py
|
tests/test_recursion.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import properties
class TestRecursion(unittest.TestCase):
def test_basic_recursion(self):
class HasHasProps(properties.HasProperties):
my_hp = properties.Instance('dangerous', properties.HasProperties)
my_int = properties.Integer('an int')
hhp = HasHasProps(my_int=5)
with self.assertRaises(ValueError):
hhp.validate()
hhp.my_hp = hhp
hhp.validate()
hhp.my_int = properties.undefined
with self.assertRaises(ValueError):
hhp.validate()
def test_list_recursion(self):
class HasInteger(properties.HasProperties):
my_int = properties.Integer('an int')
class HasHasProps(properties.HasProperties):
my_hp = properties.Instance('dangerous', properties.HasProperties)
class HasHasPropsList(properties.HasProperties):
my_list = properties.List('dangerous', properties.HasProperties)
hi_valid = HasInteger(my_int=5)
hi_invalid = HasInteger()
hhp = HasHasProps()
hhpl = HasHasPropsList()
hhp.my_hp = hi_valid
hhp.validate()
hhpl.my_list = [hhp]
hhpl.validate()
hhpl.my_list += [hi_invalid]
with self.assertRaises(ValueError):
hhpl.validate()
hhpl.my_list[1] = hi_valid
hhpl.validate()
hhp.my_hp = hhpl
hhpl.validate()
hhpl.my_list += [hi_invalid]
with self.assertRaises(ValueError):
hhpl.validate()
hhpl.serialize()
if __name__ == '__main__':
unittest.main()
|
Add test for infinite recursion
|
Add test for infinite recursion
|
Python
|
mit
|
aranzgeo/properties,3ptscience/properties
|
Add test for infinite recursion
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import properties
class TestRecursion(unittest.TestCase):
def test_basic_recursion(self):
class HasHasProps(properties.HasProperties):
my_hp = properties.Instance('dangerous', properties.HasProperties)
my_int = properties.Integer('an int')
hhp = HasHasProps(my_int=5)
with self.assertRaises(ValueError):
hhp.validate()
hhp.my_hp = hhp
hhp.validate()
hhp.my_int = properties.undefined
with self.assertRaises(ValueError):
hhp.validate()
def test_list_recursion(self):
class HasInteger(properties.HasProperties):
my_int = properties.Integer('an int')
class HasHasProps(properties.HasProperties):
my_hp = properties.Instance('dangerous', properties.HasProperties)
class HasHasPropsList(properties.HasProperties):
my_list = properties.List('dangerous', properties.HasProperties)
hi_valid = HasInteger(my_int=5)
hi_invalid = HasInteger()
hhp = HasHasProps()
hhpl = HasHasPropsList()
hhp.my_hp = hi_valid
hhp.validate()
hhpl.my_list = [hhp]
hhpl.validate()
hhpl.my_list += [hi_invalid]
with self.assertRaises(ValueError):
hhpl.validate()
hhpl.my_list[1] = hi_valid
hhpl.validate()
hhp.my_hp = hhpl
hhpl.validate()
hhpl.my_list += [hi_invalid]
with self.assertRaises(ValueError):
hhpl.validate()
hhpl.serialize()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for infinite recursion<commit_after>
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import properties
class TestRecursion(unittest.TestCase):
def test_basic_recursion(self):
class HasHasProps(properties.HasProperties):
my_hp = properties.Instance('dangerous', properties.HasProperties)
my_int = properties.Integer('an int')
hhp = HasHasProps(my_int=5)
with self.assertRaises(ValueError):
hhp.validate()
hhp.my_hp = hhp
hhp.validate()
hhp.my_int = properties.undefined
with self.assertRaises(ValueError):
hhp.validate()
def test_list_recursion(self):
class HasInteger(properties.HasProperties):
my_int = properties.Integer('an int')
class HasHasProps(properties.HasProperties):
my_hp = properties.Instance('dangerous', properties.HasProperties)
class HasHasPropsList(properties.HasProperties):
my_list = properties.List('dangerous', properties.HasProperties)
hi_valid = HasInteger(my_int=5)
hi_invalid = HasInteger()
hhp = HasHasProps()
hhpl = HasHasPropsList()
hhp.my_hp = hi_valid
hhp.validate()
hhpl.my_list = [hhp]
hhpl.validate()
hhpl.my_list += [hi_invalid]
with self.assertRaises(ValueError):
hhpl.validate()
hhpl.my_list[1] = hi_valid
hhpl.validate()
hhp.my_hp = hhpl
hhpl.validate()
hhpl.my_list += [hi_invalid]
with self.assertRaises(ValueError):
hhpl.validate()
hhpl.serialize()
if __name__ == '__main__':
unittest.main()
|
Add test for infinite recursionfrom __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import properties
class TestRecursion(unittest.TestCase):
def test_basic_recursion(self):
class HasHasProps(properties.HasProperties):
my_hp = properties.Instance('dangerous', properties.HasProperties)
my_int = properties.Integer('an int')
hhp = HasHasProps(my_int=5)
with self.assertRaises(ValueError):
hhp.validate()
hhp.my_hp = hhp
hhp.validate()
hhp.my_int = properties.undefined
with self.assertRaises(ValueError):
hhp.validate()
def test_list_recursion(self):
class HasInteger(properties.HasProperties):
my_int = properties.Integer('an int')
class HasHasProps(properties.HasProperties):
my_hp = properties.Instance('dangerous', properties.HasProperties)
class HasHasPropsList(properties.HasProperties):
my_list = properties.List('dangerous', properties.HasProperties)
hi_valid = HasInteger(my_int=5)
hi_invalid = HasInteger()
hhp = HasHasProps()
hhpl = HasHasPropsList()
hhp.my_hp = hi_valid
hhp.validate()
hhpl.my_list = [hhp]
hhpl.validate()
hhpl.my_list += [hi_invalid]
with self.assertRaises(ValueError):
hhpl.validate()
hhpl.my_list[1] = hi_valid
hhpl.validate()
hhp.my_hp = hhpl
hhpl.validate()
hhpl.my_list += [hi_invalid]
with self.assertRaises(ValueError):
hhpl.validate()
hhpl.serialize()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for infinite recursion<commit_after>from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import properties
class TestRecursion(unittest.TestCase):
def test_basic_recursion(self):
class HasHasProps(properties.HasProperties):
my_hp = properties.Instance('dangerous', properties.HasProperties)
my_int = properties.Integer('an int')
hhp = HasHasProps(my_int=5)
with self.assertRaises(ValueError):
hhp.validate()
hhp.my_hp = hhp
hhp.validate()
hhp.my_int = properties.undefined
with self.assertRaises(ValueError):
hhp.validate()
def test_list_recursion(self):
class HasInteger(properties.HasProperties):
my_int = properties.Integer('an int')
class HasHasProps(properties.HasProperties):
my_hp = properties.Instance('dangerous', properties.HasProperties)
class HasHasPropsList(properties.HasProperties):
my_list = properties.List('dangerous', properties.HasProperties)
hi_valid = HasInteger(my_int=5)
hi_invalid = HasInteger()
hhp = HasHasProps()
hhpl = HasHasPropsList()
hhp.my_hp = hi_valid
hhp.validate()
hhpl.my_list = [hhp]
hhpl.validate()
hhpl.my_list += [hi_invalid]
with self.assertRaises(ValueError):
hhpl.validate()
hhpl.my_list[1] = hi_valid
hhpl.validate()
hhp.my_hp = hhpl
hhpl.validate()
hhpl.my_list += [hi_invalid]
with self.assertRaises(ValueError):
hhpl.validate()
hhpl.serialize()
if __name__ == '__main__':
unittest.main()
|
|
4e66df468b104d8e95184322082a7775838a4f87
|
example_lxd.py
|
example_lxd.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.container.types import Provider
from libcloud.container.providers import get_driver
def main():
# LXD API specification can be found at:
# https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersnamemetadata
# LXD host change accordingly
host_lxd = 'https://192.168.2.4'
# port that LXD server is listening at
# change this according to your configuration
port_id = 8443
# get the libcloud LXD driver
lxd_driver = get_driver(Provider.LXD)
# acquire the connection.
# certificates should have been added to the LXD server
# here we assume they are on the same directory change
# accordingly
conn = lxd_driver(key='', secret='', secure=False,
host=host_lxd, port=port_id, key_file='lxd.key', cert_file='lxd.crt')
# this API call does not require authentication
api_end_points = conn.get_api_endpoints()
print(api_end_points.parse_body())
# this API call is allowed for everyone (but result varies)
api_version = conn.get_to_version()
print(api_version.parse_body())
# get the list of the containers
containers = conn.list_containers()
print(containers)
if __name__ == '__main__':
main()
|
Add basic example for using the LXDContainerDriver
|
Add basic example for using the LXDContainerDriver
|
Python
|
apache-2.0
|
Kami/libcloud,apache/libcloud,andrewsomething/libcloud,apache/libcloud,Kami/libcloud,Kami/libcloud,andrewsomething/libcloud,mistio/libcloud,mistio/libcloud,apache/libcloud,mistio/libcloud,andrewsomething/libcloud
|
Add basic example for using the LXDContainerDriver
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.container.types import Provider
from libcloud.container.providers import get_driver
def main():
# LXD API specification can be found at:
# https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersnamemetadata
# LXD host change accordingly
host_lxd = 'https://192.168.2.4'
# port that LXD server is listening at
# change this according to your configuration
port_id = 8443
# get the libcloud LXD driver
lxd_driver = get_driver(Provider.LXD)
# acquire the connection.
# certificates should have been added to the LXD server
# here we assume they are on the same directory change
# accordingly
conn = lxd_driver(key='', secret='', secure=False,
host=host_lxd, port=port_id, key_file='lxd.key', cert_file='lxd.crt')
# this API call does not require authentication
api_end_points = conn.get_api_endpoints()
print(api_end_points.parse_body())
# this API call is allowed for everyone (but result varies)
api_version = conn.get_to_version()
print(api_version.parse_body())
# get the list of the containers
containers = conn.list_containers()
print(containers)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add basic example for using the LXDContainerDriver<commit_after>
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.container.types import Provider
from libcloud.container.providers import get_driver
def main():
# LXD API specification can be found at:
# https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersnamemetadata
# LXD host change accordingly
host_lxd = 'https://192.168.2.4'
# port that LXD server is listening at
# change this according to your configuration
port_id = 8443
# get the libcloud LXD driver
lxd_driver = get_driver(Provider.LXD)
# acquire the connection.
# certificates should have been added to the LXD server
# here we assume they are on the same directory change
# accordingly
conn = lxd_driver(key='', secret='', secure=False,
host=host_lxd, port=port_id, key_file='lxd.key', cert_file='lxd.crt')
# this API call does not require authentication
api_end_points = conn.get_api_endpoints()
print(api_end_points.parse_body())
# this API call is allowed for everyone (but result varies)
api_version = conn.get_to_version()
print(api_version.parse_body())
# get the list of the containers
containers = conn.list_containers()
print(containers)
if __name__ == '__main__':
main()
|
Add basic example for using the LXDContainerDriver# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.container.types import Provider
from libcloud.container.providers import get_driver
def main():
# LXD API specification can be found at:
# https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersnamemetadata
# LXD host change accordingly
host_lxd = 'https://192.168.2.4'
# port that LXD server is listening at
# change this according to your configuration
port_id = 8443
# get the libcloud LXD driver
lxd_driver = get_driver(Provider.LXD)
# acquire the connection.
# certificates should have been added to the LXD server
# here we assume they are on the same directory change
# accordingly
conn = lxd_driver(key='', secret='', secure=False,
host=host_lxd, port=port_id, key_file='lxd.key', cert_file='lxd.crt')
# this API call does not require authentication
api_end_points = conn.get_api_endpoints()
print(api_end_points.parse_body())
# this API call is allowed for everyone (but result varies)
api_version = conn.get_to_version()
print(api_version.parse_body())
# get the list of the containers
containers = conn.list_containers()
print(containers)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add basic example for using the LXDContainerDriver<commit_after># Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.container.types import Provider
from libcloud.container.providers import get_driver
def main():
# LXD API specification can be found at:
# https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersnamemetadata
# LXD host change accordingly
host_lxd = 'https://192.168.2.4'
# port that LXD server is listening at
# change this according to your configuration
port_id = 8443
# get the libcloud LXD driver
lxd_driver = get_driver(Provider.LXD)
# acquire the connection.
# certificates should have been added to the LXD server
# here we assume they are on the same directory change
# accordingly
conn = lxd_driver(key='', secret='', secure=False,
host=host_lxd, port=port_id, key_file='lxd.key', cert_file='lxd.crt')
# this API call does not require authentication
api_end_points = conn.get_api_endpoints()
print(api_end_points.parse_body())
# this API call is allowed for everyone (but result varies)
api_version = conn.get_to_version()
print(api_version.parse_body())
# get the list of the containers
containers = conn.list_containers()
print(containers)
if __name__ == '__main__':
main()
|
|
317ff51fd9edbcd16f950c1f75ba07d531bc8e4b
|
support/update-converity-branch.py
|
support/update-converity-branch.py
|
#!/usr/bin/env python
# Update the coverity branch from the master branch.
# It is not done automatically because Coverity Scan limits
# the number of submissions per day.
from __future__ import print_function
import shutil, tempfile
from subprocess import check_call
class Git:
def __init__(self, dir):
self.dir = dir
def __call__(self, *args):
check_call(['git'] + list(args), cwd=self.dir)
dir = tempfile.mkdtemp()
try:
git = Git(dir)
git('clone', '-b', 'coverity', 'git@github.com:cppformat/cppformat.git', dir)
git('merge', '-X', 'theirs', '--no-commit', 'origin/master')
git('reset', 'HEAD', '.travis.yml')
git('checkout', '--', '.travis.yml')
git('commit', '-m', 'Update coverity branch')
git('push')
finally:
shutil.rmtree(dir)
|
Add a script to update the coverity branch
|
Add a script to update the coverity branch
|
Python
|
bsd-2-clause
|
alabuzhev/fmt,dean0x7d/cppformat,lightslife/cppformat,cppformat/cppformat,Jopie64/cppformat,mojoBrendan/fmt,alabuzhev/fmt,Jopie64/cppformat,cppformat/cppformat,mojoBrendan/fmt,dean0x7d/cppformat,lightslife/cppformat,lightslife/cppformat,cppformat/cppformat,alabuzhev/fmt,mojoBrendan/fmt,Jopie64/cppformat,dean0x7d/cppformat
|
Add a script to update the coverity branch
|
#!/usr/bin/env python
# Update the coverity branch from the master branch.
# It is not done automatically because Coverity Scan limits
# the number of submissions per day.
from __future__ import print_function
import shutil, tempfile
from subprocess import check_call
class Git:
def __init__(self, dir):
self.dir = dir
def __call__(self, *args):
check_call(['git'] + list(args), cwd=self.dir)
dir = tempfile.mkdtemp()
try:
git = Git(dir)
git('clone', '-b', 'coverity', 'git@github.com:cppformat/cppformat.git', dir)
git('merge', '-X', 'theirs', '--no-commit', 'origin/master')
git('reset', 'HEAD', '.travis.yml')
git('checkout', '--', '.travis.yml')
git('commit', '-m', 'Update coverity branch')
git('push')
finally:
shutil.rmtree(dir)
|
<commit_before><commit_msg>Add a script to update the coverity branch<commit_after>
|
#!/usr/bin/env python
# Update the coverity branch from the master branch.
# It is not done automatically because Coverity Scan limits
# the number of submissions per day.
from __future__ import print_function
import shutil, tempfile
from subprocess import check_call
class Git:
def __init__(self, dir):
self.dir = dir
def __call__(self, *args):
check_call(['git'] + list(args), cwd=self.dir)
dir = tempfile.mkdtemp()
try:
git = Git(dir)
git('clone', '-b', 'coverity', 'git@github.com:cppformat/cppformat.git', dir)
git('merge', '-X', 'theirs', '--no-commit', 'origin/master')
git('reset', 'HEAD', '.travis.yml')
git('checkout', '--', '.travis.yml')
git('commit', '-m', 'Update coverity branch')
git('push')
finally:
shutil.rmtree(dir)
|
Add a script to update the coverity branch#!/usr/bin/env python
# Update the coverity branch from the master branch.
# It is not done automatically because Coverity Scan limits
# the number of submissions per day.
from __future__ import print_function
import shutil, tempfile
from subprocess import check_call
class Git:
def __init__(self, dir):
self.dir = dir
def __call__(self, *args):
check_call(['git'] + list(args), cwd=self.dir)
dir = tempfile.mkdtemp()
try:
git = Git(dir)
git('clone', '-b', 'coverity', 'git@github.com:cppformat/cppformat.git', dir)
git('merge', '-X', 'theirs', '--no-commit', 'origin/master')
git('reset', 'HEAD', '.travis.yml')
git('checkout', '--', '.travis.yml')
git('commit', '-m', 'Update coverity branch')
git('push')
finally:
shutil.rmtree(dir)
|
<commit_before><commit_msg>Add a script to update the coverity branch<commit_after>#!/usr/bin/env python
# Update the coverity branch from the master branch.
# It is not done automatically because Coverity Scan limits
# the number of submissions per day.
from __future__ import print_function
import shutil, tempfile
from subprocess import check_call
class Git:
def __init__(self, dir):
self.dir = dir
def __call__(self, *args):
check_call(['git'] + list(args), cwd=self.dir)
dir = tempfile.mkdtemp()
try:
git = Git(dir)
git('clone', '-b', 'coverity', 'git@github.com:cppformat/cppformat.git', dir)
git('merge', '-X', 'theirs', '--no-commit', 'origin/master')
git('reset', 'HEAD', '.travis.yml')
git('checkout', '--', '.travis.yml')
git('commit', '-m', 'Update coverity branch')
git('push')
finally:
shutil.rmtree(dir)
|
|
7589819fd2e2ef0c1e2ca193fbb43c858017210f
|
test/src/unittests/stats/test_entropy.py
|
test/src/unittests/stats/test_entropy.py
|
#!/usr/bin/env python
# Copyright (C) 2006-2022 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import os.path
import random
from essentia_test import *
import numpy as np
import math
class TestEntropy(TestCase):
def testEmpty(self):
with self.assertRaises(EssentiaException):
Entropy()(np.empty(0, dtype=np.single))
def testNegative(self):
with self.assertRaises(EssentiaException):
Entropy()(np.array([-1], dtype=np.single))
with self.assertRaises(EssentiaException):
Entropy()(np.sin(np.linspace(0, np.pi * 1000, num=44100, dtype=np.single)))
def testConstructedData(self):
# Array with identical values
self.assertAlmostEqual(0.0, Entropy()(np.zeros(100, dtype=np.single)))
self.assertAlmostEqual(-math.log2(0.01), Entropy()(np.ones(100, dtype=np.single)), precision=1e-5)
self.assertAlmostEqual(-math.log2(0.01), Entropy()(np.full(100, 5, dtype=np.single)), precision=1e-5)
# Trivial distribution
arr = np.zeros(100, dtype=np.single)
arr[0] = 100
self.assertAlmostEqual(0.0, Entropy()(arr))
del arr
def testRandomData(self):
def calc_entropy(arr):
arr /= np.sum(arr)
return -np.sum(np.nan_to_num(np.log2(arr)) * arr)
for _ in range(10):
arr = np.array([random.random() for i in range(100)], dtype=np.single)
self.assertAlmostEqual(calc_entropy(arr), Entropy()(arr), precision=1e-5)
suite = allTests(TestEntropy)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
Add test of Entropy algorithm
|
Add test of Entropy algorithm
|
Python
|
agpl-3.0
|
MTG/essentia,MTG/essentia,MTG/essentia,MTG/essentia,MTG/essentia
|
Add test of Entropy algorithm
|
#!/usr/bin/env python
# Copyright (C) 2006-2022 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import os.path
import random
from essentia_test import *
import numpy as np
import math
class TestEntropy(TestCase):
def testEmpty(self):
with self.assertRaises(EssentiaException):
Entropy()(np.empty(0, dtype=np.single))
def testNegative(self):
with self.assertRaises(EssentiaException):
Entropy()(np.array([-1], dtype=np.single))
with self.assertRaises(EssentiaException):
Entropy()(np.sin(np.linspace(0, np.pi * 1000, num=44100, dtype=np.single)))
def testConstructedData(self):
# Array with identical values
self.assertAlmostEqual(0.0, Entropy()(np.zeros(100, dtype=np.single)))
self.assertAlmostEqual(-math.log2(0.01), Entropy()(np.ones(100, dtype=np.single)), precision=1e-5)
self.assertAlmostEqual(-math.log2(0.01), Entropy()(np.full(100, 5, dtype=np.single)), precision=1e-5)
# Trivial distribution
arr = np.zeros(100, dtype=np.single)
arr[0] = 100
self.assertAlmostEqual(0.0, Entropy()(arr))
del arr
def testRandomData(self):
def calc_entropy(arr):
arr /= np.sum(arr)
return -np.sum(np.nan_to_num(np.log2(arr)) * arr)
for _ in range(10):
arr = np.array([random.random() for i in range(100)], dtype=np.single)
self.assertAlmostEqual(calc_entropy(arr), Entropy()(arr), precision=1e-5)
suite = allTests(TestEntropy)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
<commit_before><commit_msg>Add test of Entropy algorithm<commit_after>
|
#!/usr/bin/env python
# Copyright (C) 2006-2022 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import os.path
import random
from essentia_test import *
import numpy as np
import math
class TestEntropy(TestCase):
def testEmpty(self):
with self.assertRaises(EssentiaException):
Entropy()(np.empty(0, dtype=np.single))
def testNegative(self):
with self.assertRaises(EssentiaException):
Entropy()(np.array([-1], dtype=np.single))
with self.assertRaises(EssentiaException):
Entropy()(np.sin(np.linspace(0, np.pi * 1000, num=44100, dtype=np.single)))
def testConstructedData(self):
# Array with identical values
self.assertAlmostEqual(0.0, Entropy()(np.zeros(100, dtype=np.single)))
self.assertAlmostEqual(-math.log2(0.01), Entropy()(np.ones(100, dtype=np.single)), precision=1e-5)
self.assertAlmostEqual(-math.log2(0.01), Entropy()(np.full(100, 5, dtype=np.single)), precision=1e-5)
# Trivial distribution
arr = np.zeros(100, dtype=np.single)
arr[0] = 100
self.assertAlmostEqual(0.0, Entropy()(arr))
del arr
def testRandomData(self):
def calc_entropy(arr):
arr /= np.sum(arr)
return -np.sum(np.nan_to_num(np.log2(arr)) * arr)
for _ in range(10):
arr = np.array([random.random() for i in range(100)], dtype=np.single)
self.assertAlmostEqual(calc_entropy(arr), Entropy()(arr), precision=1e-5)
suite = allTests(TestEntropy)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
Add test of Entropy algorithm#!/usr/bin/env python
# Copyright (C) 2006-2022 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import os.path
import random
from essentia_test import *
import numpy as np
import math
class TestEntropy(TestCase):
def testEmpty(self):
with self.assertRaises(EssentiaException):
Entropy()(np.empty(0, dtype=np.single))
def testNegative(self):
with self.assertRaises(EssentiaException):
Entropy()(np.array([-1], dtype=np.single))
with self.assertRaises(EssentiaException):
Entropy()(np.sin(np.linspace(0, np.pi * 1000, num=44100, dtype=np.single)))
def testConstructedData(self):
# Array with identical values
self.assertAlmostEqual(0.0, Entropy()(np.zeros(100, dtype=np.single)))
self.assertAlmostEqual(-math.log2(0.01), Entropy()(np.ones(100, dtype=np.single)), precision=1e-5)
self.assertAlmostEqual(-math.log2(0.01), Entropy()(np.full(100, 5, dtype=np.single)), precision=1e-5)
# Trivial distribution
arr = np.zeros(100, dtype=np.single)
arr[0] = 100
self.assertAlmostEqual(0.0, Entropy()(arr))
del arr
def testRandomData(self):
def calc_entropy(arr):
arr /= np.sum(arr)
return -np.sum(np.nan_to_num(np.log2(arr)) * arr)
for _ in range(10):
arr = np.array([random.random() for i in range(100)], dtype=np.single)
self.assertAlmostEqual(calc_entropy(arr), Entropy()(arr), precision=1e-5)
suite = allTests(TestEntropy)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
<commit_before><commit_msg>Add test of Entropy algorithm<commit_after>#!/usr/bin/env python
# Copyright (C) 2006-2022 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import os.path
import random
from essentia_test import *
import numpy as np
import math
class TestEntropy(TestCase):
def testEmpty(self):
with self.assertRaises(EssentiaException):
Entropy()(np.empty(0, dtype=np.single))
def testNegative(self):
with self.assertRaises(EssentiaException):
Entropy()(np.array([-1], dtype=np.single))
with self.assertRaises(EssentiaException):
Entropy()(np.sin(np.linspace(0, np.pi * 1000, num=44100, dtype=np.single)))
def testConstructedData(self):
# Array with identical values
self.assertAlmostEqual(0.0, Entropy()(np.zeros(100, dtype=np.single)))
self.assertAlmostEqual(-math.log2(0.01), Entropy()(np.ones(100, dtype=np.single)), precision=1e-5)
self.assertAlmostEqual(-math.log2(0.01), Entropy()(np.full(100, 5, dtype=np.single)), precision=1e-5)
# Trivial distribution
arr = np.zeros(100, dtype=np.single)
arr[0] = 100
self.assertAlmostEqual(0.0, Entropy()(arr))
del arr
def testRandomData(self):
def calc_entropy(arr):
arr /= np.sum(arr)
return -np.sum(np.nan_to_num(np.log2(arr)) * arr)
for _ in range(10):
arr = np.array([random.random() for i in range(100)], dtype=np.single)
self.assertAlmostEqual(calc_entropy(arr), Entropy()(arr), precision=1e-5)
suite = allTests(TestEntropy)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
|
bb527e99074db4085abda9bcf31e8eb11bcf52d9
|
notification/tests/test_notification_urls.py
|
notification/tests/test_notification_urls.py
|
from django.core.urlresolvers import resolve
from unittest import TestCase
from ..views import device_token_receive
class NotificationUrlsTestCase(TestCase):
def test_url_resolves_to_device_token_receive(self):
found = resolve('/receive')
self.assertEqual(found.func, device_token_receive)
|
Add notification's url test case
|
Add notification's url test case
|
Python
|
mit
|
nnsnodnb/django-ios-notifications,nnsnodnb/django-ios-notifications
|
Add notification's url test case
|
from django.core.urlresolvers import resolve
from unittest import TestCase
from ..views import device_token_receive
class NotificationUrlsTestCase(TestCase):
def test_url_resolves_to_device_token_receive(self):
found = resolve('/receive')
self.assertEqual(found.func, device_token_receive)
|
<commit_before><commit_msg>Add notification's url test case<commit_after>
|
from django.core.urlresolvers import resolve
from unittest import TestCase
from ..views import device_token_receive
class NotificationUrlsTestCase(TestCase):
def test_url_resolves_to_device_token_receive(self):
found = resolve('/receive')
self.assertEqual(found.func, device_token_receive)
|
Add notification's url test casefrom django.core.urlresolvers import resolve
from unittest import TestCase
from ..views import device_token_receive
class NotificationUrlsTestCase(TestCase):
def test_url_resolves_to_device_token_receive(self):
found = resolve('/receive')
self.assertEqual(found.func, device_token_receive)
|
<commit_before><commit_msg>Add notification's url test case<commit_after>from django.core.urlresolvers import resolve
from unittest import TestCase
from ..views import device_token_receive
class NotificationUrlsTestCase(TestCase):
def test_url_resolves_to_device_token_receive(self):
found = resolve('/receive')
self.assertEqual(found.func, device_token_receive)
|
|
7c7da87ab1a703227aa0365708e6506cbd064004
|
py/employee-importance.py
|
py/employee-importance.py
|
"""
# Employee info
class Employee(object):
def __init__(self, id, importance, subordinates):
# It's the unique id of each node.
# unique id of this employee
self.id = id
# the importance value of this employee
self.importance = importance
# the id of direct subordinates
self.subordinates = subordinates
"""
class Solution(object):
def getImportance(self, employees, id):
"""
:type employees: Employee
:type id: int
:rtype: int
"""
self.employees = {e.id: e for e in employees}
return self.do_getImportance(id)
def do_getImportance(self, id):
return self.employees[id].importance + sum(map(self.do_getImportance, self.employees[id].subordinates))
|
Add py solution for 690. Employee Importance
|
Add py solution for 690. Employee Importance
690. Employee Importance: https://leetcode.com/problems/employee-importance/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 690. Employee Importance
690. Employee Importance: https://leetcode.com/problems/employee-importance/
|
"""
# Employee info
class Employee(object):
def __init__(self, id, importance, subordinates):
# It's the unique id of each node.
# unique id of this employee
self.id = id
# the importance value of this employee
self.importance = importance
# the id of direct subordinates
self.subordinates = subordinates
"""
class Solution(object):
def getImportance(self, employees, id):
"""
:type employees: Employee
:type id: int
:rtype: int
"""
self.employees = {e.id: e for e in employees}
return self.do_getImportance(id)
def do_getImportance(self, id):
return self.employees[id].importance + sum(map(self.do_getImportance, self.employees[id].subordinates))
|
<commit_before><commit_msg>Add py solution for 690. Employee Importance
690. Employee Importance: https://leetcode.com/problems/employee-importance/<commit_after>
|
"""
# Employee info
class Employee(object):
def __init__(self, id, importance, subordinates):
# It's the unique id of each node.
# unique id of this employee
self.id = id
# the importance value of this employee
self.importance = importance
# the id of direct subordinates
self.subordinates = subordinates
"""
class Solution(object):
def getImportance(self, employees, id):
"""
:type employees: Employee
:type id: int
:rtype: int
"""
self.employees = {e.id: e for e in employees}
return self.do_getImportance(id)
def do_getImportance(self, id):
return self.employees[id].importance + sum(map(self.do_getImportance, self.employees[id].subordinates))
|
Add py solution for 690. Employee Importance
690. Employee Importance: https://leetcode.com/problems/employee-importance/"""
# Employee info
class Employee(object):
def __init__(self, id, importance, subordinates):
# It's the unique id of each node.
# unique id of this employee
self.id = id
# the importance value of this employee
self.importance = importance
# the id of direct subordinates
self.subordinates = subordinates
"""
class Solution(object):
def getImportance(self, employees, id):
"""
:type employees: Employee
:type id: int
:rtype: int
"""
self.employees = {e.id: e for e in employees}
return self.do_getImportance(id)
def do_getImportance(self, id):
return self.employees[id].importance + sum(map(self.do_getImportance, self.employees[id].subordinates))
|
<commit_before><commit_msg>Add py solution for 690. Employee Importance
690. Employee Importance: https://leetcode.com/problems/employee-importance/<commit_after>"""
# Employee info
class Employee(object):
def __init__(self, id, importance, subordinates):
# It's the unique id of each node.
# unique id of this employee
self.id = id
# the importance value of this employee
self.importance = importance
# the id of direct subordinates
self.subordinates = subordinates
"""
class Solution(object):
def getImportance(self, employees, id):
"""
:type employees: Employee
:type id: int
:rtype: int
"""
self.employees = {e.id: e for e in employees}
return self.do_getImportance(id)
def do_getImportance(self, id):
return self.employees[id].importance + sum(map(self.do_getImportance, self.employees[id].subordinates))
|
|
c504069e34eb6c932567e023d23bdd980be9daf5
|
tests/test_cli_parse.py
|
tests/test_cli_parse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Dominik Gresch <greschd@gmx.ch>
import os
import pytest
import tempfile
from click.testing import CliRunner
import tbmodels
from tbmodels._cli import cli
from parameters import SAMPLES_DIR
@pytest.mark.parametrize('prefix', ['silicon', 'bi'])
def test_cli_parse(models_equal, prefix):
runner = CliRunner()
with tempfile.TemporaryDirectory() as d:
out_file = os.path.join(d, 'model_out.hdf5')
runner.invoke(cli, ['parse', '-o', out_file, '-f', SAMPLES_DIR, '-p', prefix])
model_res = tbmodels.Model.from_hdf5_file(out_file)
model_reference = tbmodels.Model.from_wannier_folder(folder=SAMPLES_DIR, prefix=prefix)
models_equal(model_res, model_reference)
|
Add test for CLI parse
|
Add test for CLI parse
|
Python
|
apache-2.0
|
Z2PackDev/TBmodels,Z2PackDev/TBmodels
|
Add test for CLI parse
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Dominik Gresch <greschd@gmx.ch>
import os
import pytest
import tempfile
from click.testing import CliRunner
import tbmodels
from tbmodels._cli import cli
from parameters import SAMPLES_DIR
@pytest.mark.parametrize('prefix', ['silicon', 'bi'])
def test_cli_parse(models_equal, prefix):
runner = CliRunner()
with tempfile.TemporaryDirectory() as d:
out_file = os.path.join(d, 'model_out.hdf5')
runner.invoke(cli, ['parse', '-o', out_file, '-f', SAMPLES_DIR, '-p', prefix])
model_res = tbmodels.Model.from_hdf5_file(out_file)
model_reference = tbmodels.Model.from_wannier_folder(folder=SAMPLES_DIR, prefix=prefix)
models_equal(model_res, model_reference)
|
<commit_before><commit_msg>Add test for CLI parse<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Dominik Gresch <greschd@gmx.ch>
import os
import pytest
import tempfile
from click.testing import CliRunner
import tbmodels
from tbmodels._cli import cli
from parameters import SAMPLES_DIR
@pytest.mark.parametrize('prefix', ['silicon', 'bi'])
def test_cli_parse(models_equal, prefix):
runner = CliRunner()
with tempfile.TemporaryDirectory() as d:
out_file = os.path.join(d, 'model_out.hdf5')
runner.invoke(cli, ['parse', '-o', out_file, '-f', SAMPLES_DIR, '-p', prefix])
model_res = tbmodels.Model.from_hdf5_file(out_file)
model_reference = tbmodels.Model.from_wannier_folder(folder=SAMPLES_DIR, prefix=prefix)
models_equal(model_res, model_reference)
|
Add test for CLI parse#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Dominik Gresch <greschd@gmx.ch>
import os
import pytest
import tempfile
from click.testing import CliRunner
import tbmodels
from tbmodels._cli import cli
from parameters import SAMPLES_DIR
@pytest.mark.parametrize('prefix', ['silicon', 'bi'])
def test_cli_parse(models_equal, prefix):
runner = CliRunner()
with tempfile.TemporaryDirectory() as d:
out_file = os.path.join(d, 'model_out.hdf5')
runner.invoke(cli, ['parse', '-o', out_file, '-f', SAMPLES_DIR, '-p', prefix])
model_res = tbmodels.Model.from_hdf5_file(out_file)
model_reference = tbmodels.Model.from_wannier_folder(folder=SAMPLES_DIR, prefix=prefix)
models_equal(model_res, model_reference)
|
<commit_before><commit_msg>Add test for CLI parse<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Dominik Gresch <greschd@gmx.ch>
import os
import pytest
import tempfile
from click.testing import CliRunner
import tbmodels
from tbmodels._cli import cli
from parameters import SAMPLES_DIR
@pytest.mark.parametrize('prefix', ['silicon', 'bi'])
def test_cli_parse(models_equal, prefix):
runner = CliRunner()
with tempfile.TemporaryDirectory() as d:
out_file = os.path.join(d, 'model_out.hdf5')
runner.invoke(cli, ['parse', '-o', out_file, '-f', SAMPLES_DIR, '-p', prefix])
model_res = tbmodels.Model.from_hdf5_file(out_file)
model_reference = tbmodels.Model.from_wannier_folder(folder=SAMPLES_DIR, prefix=prefix)
models_equal(model_res, model_reference)
|
|
73b1eb250cbb263e7b7efc523fb94443fae8b7ff
|
pombola/nigeria/management/commands/nigeria_update_party_memberships.py
|
pombola/nigeria/management/commands/nigeria_update_party_memberships.py
|
"""Change party of everyone currently in ACN, ANPP and CPP to APC.
Update Positions of every current member of ACN, ANPP, and CPP to
have an end_date of 2013-07-31, and for each of them, make a new
Position with that as the start date, no end date, and organisation
APC.
This is all very hard-coded, but might be useful as the bones of
a change party command later on.
"""
from django.core.management.base import NoArgsCommand
from django_date_extensions.fields import ApproximateDate
from pombola.core.models import Position, Organisation
class Command(NoArgsCommand):
def handle_noargs(self, **options):
# acn, anpp, cpc
old_party_slugs = (
'action-congress-of-nigeria',
'all-nigeria-peoples-party',
'congress-for-progressive-change',
)
change_date = ApproximateDate(2013, 7, 31)
new_party_slug = 'all-progressives-congress-apc'
new_organisation = Organisation.objects.get(slug=new_party_slug)
positions = (Position.objects.filter(
title__slug='member',
organisation__slug__in=old_party_slugs,
)
.currently_active()
)
# Not using a bulk update because we want to the save
# method of the Positions to be called
for position in positions:
position.end_date = change_date
position.save()
# Blank the primary key of the Position so that it will
# get a new one when saved.
position.pk = None
position.start_date = change_date
position.end_date = None
position.organisation = new_organisation
position.save()
|
Add script to change party memberships from ACN, ANPP and CPC to APC.
|
Add script to change party memberships from ACN, ANPP and CPC to APC.
This is currently a very hard-coded script for Nigeria, but could be
worked on and moved later to make something more general.
|
Python
|
agpl-3.0
|
mysociety/pombola,geoffkilpin/pombola,patricmutwiri/pombola,patricmutwiri/pombola,hzj123/56th,hzj123/56th,ken-muturi/pombola,hzj123/56th,ken-muturi/pombola,patricmutwiri/pombola,geoffkilpin/pombola,ken-muturi/pombola,patricmutwiri/pombola,geoffkilpin/pombola,hzj123/56th,mysociety/pombola,ken-muturi/pombola,hzj123/56th,ken-muturi/pombola,ken-muturi/pombola,hzj123/56th,geoffkilpin/pombola,patricmutwiri/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,patricmutwiri/pombola,geoffkilpin/pombola,geoffkilpin/pombola
|
Add script to change party memberships from ACN, ANPP and CPC to APC.
This is currently a very hard-coded script for Nigeria, but could be
worked on and moved later to make something more general.
|
"""Change party of everyone currently in ACN, ANPP and CPP to APC.
Update Positions of every current member of ACN, ANPP, and CPP to
have an end_date of 2013-07-31, and for each of them, make a new
Position with that as the start date, no end date, and organisation
APC.
This is all very hard-coded, but might be useful as the bones of
a change party command later on.
"""
from django.core.management.base import NoArgsCommand
from django_date_extensions.fields import ApproximateDate
from pombola.core.models import Position, Organisation
class Command(NoArgsCommand):
def handle_noargs(self, **options):
# acn, anpp, cpc
old_party_slugs = (
'action-congress-of-nigeria',
'all-nigeria-peoples-party',
'congress-for-progressive-change',
)
change_date = ApproximateDate(2013, 7, 31)
new_party_slug = 'all-progressives-congress-apc'
new_organisation = Organisation.objects.get(slug=new_party_slug)
positions = (Position.objects.filter(
title__slug='member',
organisation__slug__in=old_party_slugs,
)
.currently_active()
)
# Not using a bulk update because we want to the save
# method of the Positions to be called
for position in positions:
position.end_date = change_date
position.save()
# Blank the primary key of the Position so that it will
# get a new one when saved.
position.pk = None
position.start_date = change_date
position.end_date = None
position.organisation = new_organisation
position.save()
|
<commit_before><commit_msg>Add script to change party memberships from ACN, ANPP and CPC to APC.
This is currently a very hard-coded script for Nigeria, but could be
worked on and moved later to make something more general.<commit_after>
|
"""Change party of everyone currently in ACN, ANPP and CPP to APC.
Update Positions of every current member of ACN, ANPP, and CPP to
have an end_date of 2013-07-31, and for each of them, make a new
Position with that as the start date, no end date, and organisation
APC.
This is all very hard-coded, but might be useful as the bones of
a change party command later on.
"""
from django.core.management.base import NoArgsCommand
from django_date_extensions.fields import ApproximateDate
from pombola.core.models import Position, Organisation
class Command(NoArgsCommand):
def handle_noargs(self, **options):
# acn, anpp, cpc
old_party_slugs = (
'action-congress-of-nigeria',
'all-nigeria-peoples-party',
'congress-for-progressive-change',
)
change_date = ApproximateDate(2013, 7, 31)
new_party_slug = 'all-progressives-congress-apc'
new_organisation = Organisation.objects.get(slug=new_party_slug)
positions = (Position.objects.filter(
title__slug='member',
organisation__slug__in=old_party_slugs,
)
.currently_active()
)
# Not using a bulk update because we want to the save
# method of the Positions to be called
for position in positions:
position.end_date = change_date
position.save()
# Blank the primary key of the Position so that it will
# get a new one when saved.
position.pk = None
position.start_date = change_date
position.end_date = None
position.organisation = new_organisation
position.save()
|
Add script to change party memberships from ACN, ANPP and CPC to APC.
This is currently a very hard-coded script for Nigeria, but could be
worked on and moved later to make something more general."""Change party of everyone currently in ACN, ANPP and CPP to APC.
Update Positions of every current member of ACN, ANPP, and CPP to
have an end_date of 2013-07-31, and for each of them, make a new
Position with that as the start date, no end date, and organisation
APC.
This is all very hard-coded, but might be useful as the bones of
a change party command later on.
"""
from django.core.management.base import NoArgsCommand
from django_date_extensions.fields import ApproximateDate
from pombola.core.models import Position, Organisation
class Command(NoArgsCommand):
def handle_noargs(self, **options):
# acn, anpp, cpc
old_party_slugs = (
'action-congress-of-nigeria',
'all-nigeria-peoples-party',
'congress-for-progressive-change',
)
change_date = ApproximateDate(2013, 7, 31)
new_party_slug = 'all-progressives-congress-apc'
new_organisation = Organisation.objects.get(slug=new_party_slug)
positions = (Position.objects.filter(
title__slug='member',
organisation__slug__in=old_party_slugs,
)
.currently_active()
)
# Not using a bulk update because we want to the save
# method of the Positions to be called
for position in positions:
position.end_date = change_date
position.save()
# Blank the primary key of the Position so that it will
# get a new one when saved.
position.pk = None
position.start_date = change_date
position.end_date = None
position.organisation = new_organisation
position.save()
|
<commit_before><commit_msg>Add script to change party memberships from ACN, ANPP and CPC to APC.
This is currently a very hard-coded script for Nigeria, but could be
worked on and moved later to make something more general.<commit_after>"""Change party of everyone currently in ACN, ANPP and CPP to APC.
Update Positions of every current member of ACN, ANPP, and CPP to
have an end_date of 2013-07-31, and for each of them, make a new
Position with that as the start date, no end date, and organisation
APC.
This is all very hard-coded, but might be useful as the bones of
a change party command later on.
"""
from django.core.management.base import NoArgsCommand
from django_date_extensions.fields import ApproximateDate
from pombola.core.models import Position, Organisation
class Command(NoArgsCommand):
def handle_noargs(self, **options):
# acn, anpp, cpc
old_party_slugs = (
'action-congress-of-nigeria',
'all-nigeria-peoples-party',
'congress-for-progressive-change',
)
change_date = ApproximateDate(2013, 7, 31)
new_party_slug = 'all-progressives-congress-apc'
new_organisation = Organisation.objects.get(slug=new_party_slug)
positions = (Position.objects.filter(
title__slug='member',
organisation__slug__in=old_party_slugs,
)
.currently_active()
)
# Not using a bulk update because we want to the save
# method of the Positions to be called
for position in positions:
position.end_date = change_date
position.save()
# Blank the primary key of the Position so that it will
# get a new one when saved.
position.pk = None
position.start_date = change_date
position.end_date = None
position.organisation = new_organisation
position.save()
|
|
a7045df1b90b7b352541151e0de2462eb0301422
|
tests/test_gene_page.py
|
tests/test_gene_page.py
|
import unittest
import runserver
class GenePageTestCase(unittest.TestCase):
def setUp(self):
runserver.app.config['TESTING'] = True
self.app = runserver.app.test_client()
def tearDown(self):
pass
# import this TODO LMTW
def login(self, username, password):
return self.app.post('/login', data=dict(
username=username,
password=password
), follow_redirects=True)
def gene_page(self, geneName):
return self.app.get('/gene/'+geneName, follow_redirects=True)
def test_gene_page(self):
rv = self.login('demo', 'demo123')
rv = self.gene_page('TTLL5')
assert 'TTLL5' in rv.data
if __name__ == '__main__':
unittest.main()
|
Test of TTLL5 page passes.
|
Test of TTLL5 page passes.
|
Python
|
mit
|
pontikos/phenopolis,phenopolis/phenopolis,pontikos/phenopolis,pontikos/phenopolis,logust79/phenopolis,logust79/phenopolis,Withington/phenopolis,pontikos/phenopolis,phenopolis/phenopolis,Withington/phenopolis,phenopolis/phenopolis,logust79/phenopolis,logust79/phenopolis,phenopolis/phenopolis,Withington/phenopolis,pontikos/phenopolis,Withington/phenopolis
|
Test of TTLL5 page passes.
|
import unittest
import runserver
class GenePageTestCase(unittest.TestCase):
def setUp(self):
runserver.app.config['TESTING'] = True
self.app = runserver.app.test_client()
def tearDown(self):
pass
# import this TODO LMTW
def login(self, username, password):
return self.app.post('/login', data=dict(
username=username,
password=password
), follow_redirects=True)
def gene_page(self, geneName):
return self.app.get('/gene/'+geneName, follow_redirects=True)
def test_gene_page(self):
rv = self.login('demo', 'demo123')
rv = self.gene_page('TTLL5')
assert 'TTLL5' in rv.data
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test of TTLL5 page passes.<commit_after>
|
import unittest
import runserver
class GenePageTestCase(unittest.TestCase):
def setUp(self):
runserver.app.config['TESTING'] = True
self.app = runserver.app.test_client()
def tearDown(self):
pass
# import this TODO LMTW
def login(self, username, password):
return self.app.post('/login', data=dict(
username=username,
password=password
), follow_redirects=True)
def gene_page(self, geneName):
return self.app.get('/gene/'+geneName, follow_redirects=True)
def test_gene_page(self):
rv = self.login('demo', 'demo123')
rv = self.gene_page('TTLL5')
assert 'TTLL5' in rv.data
if __name__ == '__main__':
unittest.main()
|
Test of TTLL5 page passes.
import unittest
import runserver
class GenePageTestCase(unittest.TestCase):
def setUp(self):
runserver.app.config['TESTING'] = True
self.app = runserver.app.test_client()
def tearDown(self):
pass
# import this TODO LMTW
def login(self, username, password):
return self.app.post('/login', data=dict(
username=username,
password=password
), follow_redirects=True)
def gene_page(self, geneName):
return self.app.get('/gene/'+geneName, follow_redirects=True)
def test_gene_page(self):
rv = self.login('demo', 'demo123')
rv = self.gene_page('TTLL5')
assert 'TTLL5' in rv.data
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test of TTLL5 page passes.<commit_after>
import unittest
import runserver
class GenePageTestCase(unittest.TestCase):
def setUp(self):
runserver.app.config['TESTING'] = True
self.app = runserver.app.test_client()
def tearDown(self):
pass
# import this TODO LMTW
def login(self, username, password):
return self.app.post('/login', data=dict(
username=username,
password=password
), follow_redirects=True)
def gene_page(self, geneName):
return self.app.get('/gene/'+geneName, follow_redirects=True)
def test_gene_page(self):
rv = self.login('demo', 'demo123')
rv = self.gene_page('TTLL5')
assert 'TTLL5' in rv.data
if __name__ == '__main__':
unittest.main()
|
|
7db7f5bd85f5702fdd9bdeba82f1faa6ced24ba3
|
tools/change_version.py
|
tools/change_version.py
|
"""Change version."""
import fileinput
import sys
VERSION = sys.argv[1]
for line in fileinput.input('../PKGBUILD', inplace=True):
if 'pkgver=' in line:
line = 'pkgver=%s' % VERSION
print line.rstrip('\n')
for line in fileinput.input('../setup.py', inplace=True):
if 'VERSION = ' in line:
line = 'VERSION = \'%s\'' % VERSION
print line.rstrip('\n')
for file_path in ['build_arch.sh', 'vagrant_build_ubuntu.sh']:
for line in fileinput.input(file_path, inplace=True):
if 'VERSION=' in line:
line = 'VERSION=\'%s\'' % VERSION
print line.rstrip('\n')
|
Add script to change version
|
Add script to change version
|
Python
|
agpl-3.0
|
drivnal/drivnal,drivnal/drivnal,drivnal/drivnal
|
Add script to change version
|
"""Change version."""
import fileinput
import sys
VERSION = sys.argv[1]
for line in fileinput.input('../PKGBUILD', inplace=True):
if 'pkgver=' in line:
line = 'pkgver=%s' % VERSION
print line.rstrip('\n')
for line in fileinput.input('../setup.py', inplace=True):
if 'VERSION = ' in line:
line = 'VERSION = \'%s\'' % VERSION
print line.rstrip('\n')
for file_path in ['build_arch.sh', 'vagrant_build_ubuntu.sh']:
for line in fileinput.input(file_path, inplace=True):
if 'VERSION=' in line:
line = 'VERSION=\'%s\'' % VERSION
print line.rstrip('\n')
|
<commit_before><commit_msg>Add script to change version<commit_after>
|
"""Change version."""
import fileinput
import sys
VERSION = sys.argv[1]
for line in fileinput.input('../PKGBUILD', inplace=True):
if 'pkgver=' in line:
line = 'pkgver=%s' % VERSION
print line.rstrip('\n')
for line in fileinput.input('../setup.py', inplace=True):
if 'VERSION = ' in line:
line = 'VERSION = \'%s\'' % VERSION
print line.rstrip('\n')
for file_path in ['build_arch.sh', 'vagrant_build_ubuntu.sh']:
for line in fileinput.input(file_path, inplace=True):
if 'VERSION=' in line:
line = 'VERSION=\'%s\'' % VERSION
print line.rstrip('\n')
|
Add script to change version"""Change version."""
import fileinput
import sys
VERSION = sys.argv[1]
for line in fileinput.input('../PKGBUILD', inplace=True):
if 'pkgver=' in line:
line = 'pkgver=%s' % VERSION
print line.rstrip('\n')
for line in fileinput.input('../setup.py', inplace=True):
if 'VERSION = ' in line:
line = 'VERSION = \'%s\'' % VERSION
print line.rstrip('\n')
for file_path in ['build_arch.sh', 'vagrant_build_ubuntu.sh']:
for line in fileinput.input(file_path, inplace=True):
if 'VERSION=' in line:
line = 'VERSION=\'%s\'' % VERSION
print line.rstrip('\n')
|
<commit_before><commit_msg>Add script to change version<commit_after>"""Change version."""
import fileinput
import sys
VERSION = sys.argv[1]
for line in fileinput.input('../PKGBUILD', inplace=True):
if 'pkgver=' in line:
line = 'pkgver=%s' % VERSION
print line.rstrip('\n')
for line in fileinput.input('../setup.py', inplace=True):
if 'VERSION = ' in line:
line = 'VERSION = \'%s\'' % VERSION
print line.rstrip('\n')
for file_path in ['build_arch.sh', 'vagrant_build_ubuntu.sh']:
for line in fileinput.input(file_path, inplace=True):
if 'VERSION=' in line:
line = 'VERSION=\'%s\'' % VERSION
print line.rstrip('\n')
|
|
ff0215544c8ee175c76f9bcc938dd6c4e0675109
|
examples/restartable.py
|
examples/restartable.py
|
import os
import sys
import asyncio
import signal
import logbook.more
import saltyrtc
def env(name, default=None):
return os.environ.get(name, default)
def require_env(name):
value = env(name)
if value is None:
print("Missing '{}' env variable".format(name))
sys.exit(1)
return value
def main():
"""
Run the SaltyRTC server until Ctrl+C has been pressed.
The signal *HUP* will restart the server.
"""
while True:
loop = asyncio.get_event_loop()
# Create SSL context
ssl_context = saltyrtc.util.create_ssl_context(
certfile=require_env('SALTYRTC_TLS_CERT'),
keyfile=require_env('SALTYRTC_TLS_KEY'),
)
# Start server
coroutine = saltyrtc.serve(ssl_context, port=8765)
server = loop.run_until_complete(coroutine)
# Restart server on HUP signal
restart_signal = asyncio.Future(loop=loop)
def restart_signal_handler(*_):
restart_signal.set_result(True)
# Register restart server routine
signal.signal(signal.SIGHUP, restart_signal_handler)
# Wait until Ctrl+C has been pressed
try:
loop.run_until_complete(restart_signal)
except KeyboardInterrupt:
pass
# Wait until server is closed and close the event loop
server.close()
loop.run_until_complete(server.wait_closed())
# Stop?
if not restart_signal.done():
break
if __name__ == '__main__':
# Enable asyncio debug logging
os.environ['PYTHONASYNCIODEBUG'] = '1'
# Enable logging
saltyrtc.util.enable_logging(level=logbook.TRACE, redirect_loggers={
'asyncio': logbook.DEBUG,
'websockets': logbook.DEBUG,
})
# Run 'main'
logging_handler = logbook.more.ColorizedStderrHandler()
with logging_handler.applicationbound():
main()
|
Add example server that restarts on SIGHUP
|
Add example server that restarts on SIGHUP
|
Python
|
mit
|
saltyrtc/saltyrtc-server-python,saltyrtc/saltyrtc-server-python
|
Add example server that restarts on SIGHUP
|
import os
import sys
import asyncio
import signal
import logbook.more
import saltyrtc
def env(name, default=None):
return os.environ.get(name, default)
def require_env(name):
value = env(name)
if value is None:
print("Missing '{}' env variable".format(name))
sys.exit(1)
return value
def main():
"""
Run the SaltyRTC server until Ctrl+C has been pressed.
The signal *HUP* will restart the server.
"""
while True:
loop = asyncio.get_event_loop()
# Create SSL context
ssl_context = saltyrtc.util.create_ssl_context(
certfile=require_env('SALTYRTC_TLS_CERT'),
keyfile=require_env('SALTYRTC_TLS_KEY'),
)
# Start server
coroutine = saltyrtc.serve(ssl_context, port=8765)
server = loop.run_until_complete(coroutine)
# Restart server on HUP signal
restart_signal = asyncio.Future(loop=loop)
def restart_signal_handler(*_):
restart_signal.set_result(True)
# Register restart server routine
signal.signal(signal.SIGHUP, restart_signal_handler)
# Wait until Ctrl+C has been pressed
try:
loop.run_until_complete(restart_signal)
except KeyboardInterrupt:
pass
# Wait until server is closed and close the event loop
server.close()
loop.run_until_complete(server.wait_closed())
# Stop?
if not restart_signal.done():
break
if __name__ == '__main__':
# Enable asyncio debug logging
os.environ['PYTHONASYNCIODEBUG'] = '1'
# Enable logging
saltyrtc.util.enable_logging(level=logbook.TRACE, redirect_loggers={
'asyncio': logbook.DEBUG,
'websockets': logbook.DEBUG,
})
# Run 'main'
logging_handler = logbook.more.ColorizedStderrHandler()
with logging_handler.applicationbound():
main()
|
<commit_before><commit_msg>Add example server that restarts on SIGHUP<commit_after>
|
import os
import sys
import asyncio
import signal
import logbook.more
import saltyrtc
def env(name, default=None):
return os.environ.get(name, default)
def require_env(name):
value = env(name)
if value is None:
print("Missing '{}' env variable".format(name))
sys.exit(1)
return value
def main():
"""
Run the SaltyRTC server until Ctrl+C has been pressed.
The signal *HUP* will restart the server.
"""
while True:
loop = asyncio.get_event_loop()
# Create SSL context
ssl_context = saltyrtc.util.create_ssl_context(
certfile=require_env('SALTYRTC_TLS_CERT'),
keyfile=require_env('SALTYRTC_TLS_KEY'),
)
# Start server
coroutine = saltyrtc.serve(ssl_context, port=8765)
server = loop.run_until_complete(coroutine)
# Restart server on HUP signal
restart_signal = asyncio.Future(loop=loop)
def restart_signal_handler(*_):
restart_signal.set_result(True)
# Register restart server routine
signal.signal(signal.SIGHUP, restart_signal_handler)
# Wait until Ctrl+C has been pressed
try:
loop.run_until_complete(restart_signal)
except KeyboardInterrupt:
pass
# Wait until server is closed and close the event loop
server.close()
loop.run_until_complete(server.wait_closed())
# Stop?
if not restart_signal.done():
break
if __name__ == '__main__':
# Enable asyncio debug logging
os.environ['PYTHONASYNCIODEBUG'] = '1'
# Enable logging
saltyrtc.util.enable_logging(level=logbook.TRACE, redirect_loggers={
'asyncio': logbook.DEBUG,
'websockets': logbook.DEBUG,
})
# Run 'main'
logging_handler = logbook.more.ColorizedStderrHandler()
with logging_handler.applicationbound():
main()
|
Add example server that restarts on SIGHUPimport os
import sys
import asyncio
import signal
import logbook.more
import saltyrtc
def env(name, default=None):
return os.environ.get(name, default)
def require_env(name):
value = env(name)
if value is None:
print("Missing '{}' env variable".format(name))
sys.exit(1)
return value
def main():
"""
Run the SaltyRTC server until Ctrl+C has been pressed.
The signal *HUP* will restart the server.
"""
while True:
loop = asyncio.get_event_loop()
# Create SSL context
ssl_context = saltyrtc.util.create_ssl_context(
certfile=require_env('SALTYRTC_TLS_CERT'),
keyfile=require_env('SALTYRTC_TLS_KEY'),
)
# Start server
coroutine = saltyrtc.serve(ssl_context, port=8765)
server = loop.run_until_complete(coroutine)
# Restart server on HUP signal
restart_signal = asyncio.Future(loop=loop)
def restart_signal_handler(*_):
restart_signal.set_result(True)
# Register restart server routine
signal.signal(signal.SIGHUP, restart_signal_handler)
# Wait until Ctrl+C has been pressed
try:
loop.run_until_complete(restart_signal)
except KeyboardInterrupt:
pass
# Wait until server is closed and close the event loop
server.close()
loop.run_until_complete(server.wait_closed())
# Stop?
if not restart_signal.done():
break
if __name__ == '__main__':
# Enable asyncio debug logging
os.environ['PYTHONASYNCIODEBUG'] = '1'
# Enable logging
saltyrtc.util.enable_logging(level=logbook.TRACE, redirect_loggers={
'asyncio': logbook.DEBUG,
'websockets': logbook.DEBUG,
})
# Run 'main'
logging_handler = logbook.more.ColorizedStderrHandler()
with logging_handler.applicationbound():
main()
|
<commit_before><commit_msg>Add example server that restarts on SIGHUP<commit_after>import os
import sys
import asyncio
import signal
import logbook.more
import saltyrtc
def env(name, default=None):
return os.environ.get(name, default)
def require_env(name):
value = env(name)
if value is None:
print("Missing '{}' env variable".format(name))
sys.exit(1)
return value
def main():
"""
Run the SaltyRTC server until Ctrl+C has been pressed.
The signal *HUP* will restart the server.
"""
while True:
loop = asyncio.get_event_loop()
# Create SSL context
ssl_context = saltyrtc.util.create_ssl_context(
certfile=require_env('SALTYRTC_TLS_CERT'),
keyfile=require_env('SALTYRTC_TLS_KEY'),
)
# Start server
coroutine = saltyrtc.serve(ssl_context, port=8765)
server = loop.run_until_complete(coroutine)
# Restart server on HUP signal
restart_signal = asyncio.Future(loop=loop)
def restart_signal_handler(*_):
restart_signal.set_result(True)
# Register restart server routine
signal.signal(signal.SIGHUP, restart_signal_handler)
# Wait until Ctrl+C has been pressed
try:
loop.run_until_complete(restart_signal)
except KeyboardInterrupt:
pass
# Wait until server is closed and close the event loop
server.close()
loop.run_until_complete(server.wait_closed())
# Stop?
if not restart_signal.done():
break
if __name__ == '__main__':
# Enable asyncio debug logging
os.environ['PYTHONASYNCIODEBUG'] = '1'
# Enable logging
saltyrtc.util.enable_logging(level=logbook.TRACE, redirect_loggers={
'asyncio': logbook.DEBUG,
'websockets': logbook.DEBUG,
})
# Run 'main'
logging_handler = logbook.more.ColorizedStderrHandler()
with logging_handler.applicationbound():
main()
|
|
8b0b0fb1e18dae98737a7de65ee014403da71b67
|
bmi_tester/bmipytest.py
|
bmi_tester/bmipytest.py
|
#! /usr/bin/env python
import os
import textwrap
import argparse
import pkg_resources
import pytest
def test(package, input_file=None, verbosity=None, bmi_version='1.1'):
tests = [
pkg_resources.resource_filename(__name__, os.path.join('tests_pytest'))
]
os.environ['BMITEST_CLASS'] = package
os.environ['BMITEST_INPUT_FILE'] = input_file
os.environ['BMI_VERSION_STRING'] = bmi_version
if verbosity:
tests += ['-' + 'v' * verbosity]
pytest.main(tests)
def configure_parser_test(sub_parsers=None):
help = "Test a BMI class."
example = textwrap.dedent("""
Examples:
bmi test bmimodule:BmiClass
""")
if sub_parsers is None:
p = argparse.ArgumentParser(
description=help,
fromfile_prefix_chars='@',
epilog=example,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
else:
p = sub_parsers.add_parser(
'test',
help=help,
description=help,
epilog=example,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
p.add_argument(
'cls',
help='Full name of class to test.',
)
p.add_argument(
'--infile',
default='',
help='Name of input file for init method.',
)
p.add_argument(
'--bmi-version',
default='1.1',
help='BMI version to test against',
)
p.add_argument(
'-v', '--verbose',
action='count',
dest='verbose',
default=1,
help='increase verbosity',
)
p.add_argument(
'--no-doctests',
action='store_false',
dest='doctests',
default=True,
help='Do not run doctests in module',
)
p.set_defaults(func=execute)
return p
def execute(args):
test(args.cls, input_file=args.infile, verbosity=args.verbose,
bmi_version=args.bmi_version)
def main():
p = configure_parser_test()
args = p.parse_args()
sys.exit(args.func(args))
|
Add bmi-test command that uses pytest.
|
Add bmi-test command that uses pytest.
|
Python
|
mit
|
csdms/bmi-tester
|
Add bmi-test command that uses pytest.
|
#! /usr/bin/env python
import os
import textwrap
import argparse
import pkg_resources
import pytest
def test(package, input_file=None, verbosity=None, bmi_version='1.1'):
tests = [
pkg_resources.resource_filename(__name__, os.path.join('tests_pytest'))
]
os.environ['BMITEST_CLASS'] = package
os.environ['BMITEST_INPUT_FILE'] = input_file
os.environ['BMI_VERSION_STRING'] = bmi_version
if verbosity:
tests += ['-' + 'v' * verbosity]
pytest.main(tests)
def configure_parser_test(sub_parsers=None):
help = "Test a BMI class."
example = textwrap.dedent("""
Examples:
bmi test bmimodule:BmiClass
""")
if sub_parsers is None:
p = argparse.ArgumentParser(
description=help,
fromfile_prefix_chars='@',
epilog=example,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
else:
p = sub_parsers.add_parser(
'test',
help=help,
description=help,
epilog=example,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
p.add_argument(
'cls',
help='Full name of class to test.',
)
p.add_argument(
'--infile',
default='',
help='Name of input file for init method.',
)
p.add_argument(
'--bmi-version',
default='1.1',
help='BMI version to test against',
)
p.add_argument(
'-v', '--verbose',
action='count',
dest='verbose',
default=1,
help='increase verbosity',
)
p.add_argument(
'--no-doctests',
action='store_false',
dest='doctests',
default=True,
help='Do not run doctests in module',
)
p.set_defaults(func=execute)
return p
def execute(args):
test(args.cls, input_file=args.infile, verbosity=args.verbose,
bmi_version=args.bmi_version)
def main():
p = configure_parser_test()
args = p.parse_args()
sys.exit(args.func(args))
|
<commit_before><commit_msg>Add bmi-test command that uses pytest.<commit_after>
|
#! /usr/bin/env python
import os
import textwrap
import argparse
import pkg_resources
import pytest
def test(package, input_file=None, verbosity=None, bmi_version='1.1'):
tests = [
pkg_resources.resource_filename(__name__, os.path.join('tests_pytest'))
]
os.environ['BMITEST_CLASS'] = package
os.environ['BMITEST_INPUT_FILE'] = input_file
os.environ['BMI_VERSION_STRING'] = bmi_version
if verbosity:
tests += ['-' + 'v' * verbosity]
pytest.main(tests)
def configure_parser_test(sub_parsers=None):
help = "Test a BMI class."
example = textwrap.dedent("""
Examples:
bmi test bmimodule:BmiClass
""")
if sub_parsers is None:
p = argparse.ArgumentParser(
description=help,
fromfile_prefix_chars='@',
epilog=example,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
else:
p = sub_parsers.add_parser(
'test',
help=help,
description=help,
epilog=example,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
p.add_argument(
'cls',
help='Full name of class to test.',
)
p.add_argument(
'--infile',
default='',
help='Name of input file for init method.',
)
p.add_argument(
'--bmi-version',
default='1.1',
help='BMI version to test against',
)
p.add_argument(
'-v', '--verbose',
action='count',
dest='verbose',
default=1,
help='increase verbosity',
)
p.add_argument(
'--no-doctests',
action='store_false',
dest='doctests',
default=True,
help='Do not run doctests in module',
)
p.set_defaults(func=execute)
return p
def execute(args):
test(args.cls, input_file=args.infile, verbosity=args.verbose,
bmi_version=args.bmi_version)
def main():
p = configure_parser_test()
args = p.parse_args()
sys.exit(args.func(args))
|
Add bmi-test command that uses pytest.#! /usr/bin/env python
import os
import textwrap
import argparse
import pkg_resources
import pytest
def test(package, input_file=None, verbosity=None, bmi_version='1.1'):
tests = [
pkg_resources.resource_filename(__name__, os.path.join('tests_pytest'))
]
os.environ['BMITEST_CLASS'] = package
os.environ['BMITEST_INPUT_FILE'] = input_file
os.environ['BMI_VERSION_STRING'] = bmi_version
if verbosity:
tests += ['-' + 'v' * verbosity]
pytest.main(tests)
def configure_parser_test(sub_parsers=None):
help = "Test a BMI class."
example = textwrap.dedent("""
Examples:
bmi test bmimodule:BmiClass
""")
if sub_parsers is None:
p = argparse.ArgumentParser(
description=help,
fromfile_prefix_chars='@',
epilog=example,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
else:
p = sub_parsers.add_parser(
'test',
help=help,
description=help,
epilog=example,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
p.add_argument(
'cls',
help='Full name of class to test.',
)
p.add_argument(
'--infile',
default='',
help='Name of input file for init method.',
)
p.add_argument(
'--bmi-version',
default='1.1',
help='BMI version to test against',
)
p.add_argument(
'-v', '--verbose',
action='count',
dest='verbose',
default=1,
help='increase verbosity',
)
p.add_argument(
'--no-doctests',
action='store_false',
dest='doctests',
default=True,
help='Do not run doctests in module',
)
p.set_defaults(func=execute)
return p
def execute(args):
test(args.cls, input_file=args.infile, verbosity=args.verbose,
bmi_version=args.bmi_version)
def main():
p = configure_parser_test()
args = p.parse_args()
sys.exit(args.func(args))
|
<commit_before><commit_msg>Add bmi-test command that uses pytest.<commit_after>#! /usr/bin/env python
import os
import textwrap
import argparse
import pkg_resources
import pytest
def test(package, input_file=None, verbosity=None, bmi_version='1.1'):
tests = [
pkg_resources.resource_filename(__name__, os.path.join('tests_pytest'))
]
os.environ['BMITEST_CLASS'] = package
os.environ['BMITEST_INPUT_FILE'] = input_file
os.environ['BMI_VERSION_STRING'] = bmi_version
if verbosity:
tests += ['-' + 'v' * verbosity]
pytest.main(tests)
def configure_parser_test(sub_parsers=None):
help = "Test a BMI class."
example = textwrap.dedent("""
Examples:
bmi test bmimodule:BmiClass
""")
if sub_parsers is None:
p = argparse.ArgumentParser(
description=help,
fromfile_prefix_chars='@',
epilog=example,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
else:
p = sub_parsers.add_parser(
'test',
help=help,
description=help,
epilog=example,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
p.add_argument(
'cls',
help='Full name of class to test.',
)
p.add_argument(
'--infile',
default='',
help='Name of input file for init method.',
)
p.add_argument(
'--bmi-version',
default='1.1',
help='BMI version to test against',
)
p.add_argument(
'-v', '--verbose',
action='count',
dest='verbose',
default=1,
help='increase verbosity',
)
p.add_argument(
'--no-doctests',
action='store_false',
dest='doctests',
default=True,
help='Do not run doctests in module',
)
p.set_defaults(func=execute)
return p
def execute(args):
test(args.cls, input_file=args.infile, verbosity=args.verbose,
bmi_version=args.bmi_version)
def main():
p = configure_parser_test()
args = p.parse_args()
sys.exit(args.func(args))
|
|
25d616b6c20baa22c47becb71a6669e2982d5980
|
bin/filter_gtf_for_genes_in_genome.py
|
bin/filter_gtf_for_genes_in_genome.py
|
#!/usr/bin/env python
from __future__ import print_function
import logging
from itertools import groupby
import argparse
# Create a logger
logging.basicConfig(format='%(name)s - %(asctime)s %(levelname)s: %(message)s')
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
def extract_fasta_seq_names(fasta_name):
"""
modified from Brent Pedersen
Correct Way To Parse A Fasta File In Python
given a fasta file. yield tuples of header, sequence
from https://www.biostars.org/p/710/
"""
"first open the file outside "
fh = open(fasta_name)
# ditch the boolean (x[0]) and just keep the header or sequence since
# we know they alternate.
faiter = (x[1] for x in groupby(fh, lambda line: line[0] == ">"))
for header in faiter:
# drop the ">"
headerStr = header.__next__()[1:].strip()
yield headerStr
def extract_genes_in_genome(fasta, gtf_in, gtf_out):
seq_names_in_genome = set(extract_fasta_seq_names(fasta))
logger.info("Extracted chromosome sequence names from : %s" % fasta)
n_total_lines = 0
n_lines_in_genome = 0
with open(gtf_out, 'w') as f:
with open(gtf_in) as g:
for line in g.readlines():
n_total_lines += 1
if line.split('\t')[0] in seq_names_in_genome:
n_lines_in_genome += 1
f.write(line)
logger.info("Extracted %d / %d lines from %s matching sequences in %s" %
(n_lines_in_genome, n_total_lines, gtf_in, fasta))
logger.info("Wrote matching lines to %s" % gtf_out)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""Filter GTF only for features in the genome""")
parser.add_argument("--gtf", type=str, help="GTF file")
parser.add_argument("--fasta", type=str, help="Genome fasta file")
parser.add_argument("-o", "--output", dest='output',
default='genes_in_genome.gtf',
type=str, help="GTF features on fasta genome sequences")
args = parser.parse_args()
extract_genes_in_genome(args.gtf, args.fasta, args.output)
|
Add script to filter gtf on seqnames in genome fasta
|
Add script to filter gtf on seqnames in genome fasta
|
Python
|
mit
|
ewels/NGI-RNAseq,ewels/NGI-RNAseq,ewels/NGI-RNAseq,ewels/NGI-RNAseq,ewels/NGI-RNAseq
|
Add script to filter gtf on seqnames in genome fasta
|
#!/usr/bin/env python
from __future__ import print_function
import logging
from itertools import groupby
import argparse
# Create a logger
logging.basicConfig(format='%(name)s - %(asctime)s %(levelname)s: %(message)s')
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
def extract_fasta_seq_names(fasta_name):
"""
modified from Brent Pedersen
Correct Way To Parse A Fasta File In Python
given a fasta file. yield tuples of header, sequence
from https://www.biostars.org/p/710/
"""
"first open the file outside "
fh = open(fasta_name)
# ditch the boolean (x[0]) and just keep the header or sequence since
# we know they alternate.
faiter = (x[1] for x in groupby(fh, lambda line: line[0] == ">"))
for header in faiter:
# drop the ">"
headerStr = header.__next__()[1:].strip()
yield headerStr
def extract_genes_in_genome(fasta, gtf_in, gtf_out):
seq_names_in_genome = set(extract_fasta_seq_names(fasta))
logger.info("Extracted chromosome sequence names from : %s" % fasta)
n_total_lines = 0
n_lines_in_genome = 0
with open(gtf_out, 'w') as f:
with open(gtf_in) as g:
for line in g.readlines():
n_total_lines += 1
if line.split('\t')[0] in seq_names_in_genome:
n_lines_in_genome += 1
f.write(line)
logger.info("Extracted %d / %d lines from %s matching sequences in %s" %
(n_lines_in_genome, n_total_lines, gtf_in, fasta))
logger.info("Wrote matching lines to %s" % gtf_out)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""Filter GTF only for features in the genome""")
parser.add_argument("--gtf", type=str, help="GTF file")
parser.add_argument("--fasta", type=str, help="Genome fasta file")
parser.add_argument("-o", "--output", dest='output',
default='genes_in_genome.gtf',
type=str, help="GTF features on fasta genome sequences")
args = parser.parse_args()
extract_genes_in_genome(args.gtf, args.fasta, args.output)
|
<commit_before><commit_msg>Add script to filter gtf on seqnames in genome fasta<commit_after>
|
#!/usr/bin/env python
from __future__ import print_function
import logging
from itertools import groupby
import argparse
# Create a logger
logging.basicConfig(format='%(name)s - %(asctime)s %(levelname)s: %(message)s')
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
def extract_fasta_seq_names(fasta_name):
"""
modified from Brent Pedersen
Correct Way To Parse A Fasta File In Python
given a fasta file. yield tuples of header, sequence
from https://www.biostars.org/p/710/
"""
"first open the file outside "
fh = open(fasta_name)
# ditch the boolean (x[0]) and just keep the header or sequence since
# we know they alternate.
faiter = (x[1] for x in groupby(fh, lambda line: line[0] == ">"))
for header in faiter:
# drop the ">"
headerStr = header.__next__()[1:].strip()
yield headerStr
def extract_genes_in_genome(fasta, gtf_in, gtf_out):
seq_names_in_genome = set(extract_fasta_seq_names(fasta))
logger.info("Extracted chromosome sequence names from : %s" % fasta)
n_total_lines = 0
n_lines_in_genome = 0
with open(gtf_out, 'w') as f:
with open(gtf_in) as g:
for line in g.readlines():
n_total_lines += 1
if line.split('\t')[0] in seq_names_in_genome:
n_lines_in_genome += 1
f.write(line)
logger.info("Extracted %d / %d lines from %s matching sequences in %s" %
(n_lines_in_genome, n_total_lines, gtf_in, fasta))
logger.info("Wrote matching lines to %s" % gtf_out)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""Filter GTF only for features in the genome""")
parser.add_argument("--gtf", type=str, help="GTF file")
parser.add_argument("--fasta", type=str, help="Genome fasta file")
parser.add_argument("-o", "--output", dest='output',
default='genes_in_genome.gtf',
type=str, help="GTF features on fasta genome sequences")
args = parser.parse_args()
extract_genes_in_genome(args.gtf, args.fasta, args.output)
|
Add script to filter gtf on seqnames in genome fasta#!/usr/bin/env python
from __future__ import print_function
import logging
from itertools import groupby
import argparse
# Create a logger
logging.basicConfig(format='%(name)s - %(asctime)s %(levelname)s: %(message)s')
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
def extract_fasta_seq_names(fasta_name):
"""
modified from Brent Pedersen
Correct Way To Parse A Fasta File In Python
given a fasta file. yield tuples of header, sequence
from https://www.biostars.org/p/710/
"""
"first open the file outside "
fh = open(fasta_name)
# ditch the boolean (x[0]) and just keep the header or sequence since
# we know they alternate.
faiter = (x[1] for x in groupby(fh, lambda line: line[0] == ">"))
for header in faiter:
# drop the ">"
headerStr = header.__next__()[1:].strip()
yield headerStr
def extract_genes_in_genome(fasta, gtf_in, gtf_out):
seq_names_in_genome = set(extract_fasta_seq_names(fasta))
logger.info("Extracted chromosome sequence names from : %s" % fasta)
n_total_lines = 0
n_lines_in_genome = 0
with open(gtf_out, 'w') as f:
with open(gtf_in) as g:
for line in g.readlines():
n_total_lines += 1
if line.split('\t')[0] in seq_names_in_genome:
n_lines_in_genome += 1
f.write(line)
logger.info("Extracted %d / %d lines from %s matching sequences in %s" %
(n_lines_in_genome, n_total_lines, gtf_in, fasta))
logger.info("Wrote matching lines to %s" % gtf_out)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""Filter GTF only for features in the genome""")
parser.add_argument("--gtf", type=str, help="GTF file")
parser.add_argument("--fasta", type=str, help="Genome fasta file")
parser.add_argument("-o", "--output", dest='output',
default='genes_in_genome.gtf',
type=str, help="GTF features on fasta genome sequences")
args = parser.parse_args()
extract_genes_in_genome(args.gtf, args.fasta, args.output)
|
<commit_before><commit_msg>Add script to filter gtf on seqnames in genome fasta<commit_after>#!/usr/bin/env python
from __future__ import print_function
import logging
from itertools import groupby
import argparse
# Create a logger
logging.basicConfig(format='%(name)s - %(asctime)s %(levelname)s: %(message)s')
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
def extract_fasta_seq_names(fasta_name):
"""
modified from Brent Pedersen
Correct Way To Parse A Fasta File In Python
given a fasta file. yield tuples of header, sequence
from https://www.biostars.org/p/710/
"""
"first open the file outside "
fh = open(fasta_name)
# ditch the boolean (x[0]) and just keep the header or sequence since
# we know they alternate.
faiter = (x[1] for x in groupby(fh, lambda line: line[0] == ">"))
for header in faiter:
# drop the ">"
headerStr = header.__next__()[1:].strip()
yield headerStr
def extract_genes_in_genome(fasta, gtf_in, gtf_out):
seq_names_in_genome = set(extract_fasta_seq_names(fasta))
logger.info("Extracted chromosome sequence names from : %s" % fasta)
n_total_lines = 0
n_lines_in_genome = 0
with open(gtf_out, 'w') as f:
with open(gtf_in) as g:
for line in g.readlines():
n_total_lines += 1
if line.split('\t')[0] in seq_names_in_genome:
n_lines_in_genome += 1
f.write(line)
logger.info("Extracted %d / %d lines from %s matching sequences in %s" %
(n_lines_in_genome, n_total_lines, gtf_in, fasta))
logger.info("Wrote matching lines to %s" % gtf_out)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""Filter GTF only for features in the genome""")
parser.add_argument("--gtf", type=str, help="GTF file")
parser.add_argument("--fasta", type=str, help="Genome fasta file")
parser.add_argument("-o", "--output", dest='output',
default='genes_in_genome.gtf',
type=str, help="GTF features on fasta genome sequences")
args = parser.parse_args()
extract_genes_in_genome(args.gtf, args.fasta, args.output)
|
|
f0ef5834e8cf877a333453152b0ed567b41ef2c3
|
scripts/motion_sensor_log.py
|
scripts/motion_sensor_log.py
|
import os
import time
from datetime import datetime
import RPi.GPIO as GPIO
from blink2 import Blink
pir_pin = 23
log = True
log_file = 'motion_log'
speed = 2
with open(log_file, 'w') as log:
time_now = datetime.now().strftime('%d-%m-%Y | %H:%M:%S')
log.write('Starting log... %s\n' % time_now)
GPIO.setmode(GPIO.BCM)
GPIO.setup(pir_pin, GPIO.IN) # activate input
i = 0
while True:
if GPIO.input(pir_pin):
i += 1
print("%i PIR ALARM!" % i)
Blink(27, 3, 0.3)
GPIO.setmode(GPIO.BCM)
GPIO.setup(pir_pin, GPIO.IN)
with open(log_file, 'a') as log:
time_now = datetime.now().strftime('%d-%m-%Y | %H:%M:%S')
log.write('%3i | %s\n' % (i, time_now))
time.sleep(speed)
|
Add motion sensor log script.
|
Add motion sensor log script.
|
Python
|
bsd-3-clause
|
kbsezginel/raspberry-pi,kbsezginel/raspberry-pi,kbsezginel/raspberry-pi,kbsezginel/raspberry-pi
|
Add motion sensor log script.
|
import os
import time
from datetime import datetime
import RPi.GPIO as GPIO
from blink2 import Blink
pir_pin = 23
log = True
log_file = 'motion_log'
speed = 2
with open(log_file, 'w') as log:
time_now = datetime.now().strftime('%d-%m-%Y | %H:%M:%S')
log.write('Starting log... %s\n' % time_now)
GPIO.setmode(GPIO.BCM)
GPIO.setup(pir_pin, GPIO.IN) # activate input
i = 0
while True:
if GPIO.input(pir_pin):
i += 1
print("%i PIR ALARM!" % i)
Blink(27, 3, 0.3)
GPIO.setmode(GPIO.BCM)
GPIO.setup(pir_pin, GPIO.IN)
with open(log_file, 'a') as log:
time_now = datetime.now().strftime('%d-%m-%Y | %H:%M:%S')
log.write('%3i | %s\n' % (i, time_now))
time.sleep(speed)
|
<commit_before><commit_msg>Add motion sensor log script.<commit_after>
|
import os
import time
from datetime import datetime
import RPi.GPIO as GPIO
from blink2 import Blink
pir_pin = 23
log = True
log_file = 'motion_log'
speed = 2
with open(log_file, 'w') as log:
time_now = datetime.now().strftime('%d-%m-%Y | %H:%M:%S')
log.write('Starting log... %s\n' % time_now)
GPIO.setmode(GPIO.BCM)
GPIO.setup(pir_pin, GPIO.IN) # activate input
i = 0
while True:
if GPIO.input(pir_pin):
i += 1
print("%i PIR ALARM!" % i)
Blink(27, 3, 0.3)
GPIO.setmode(GPIO.BCM)
GPIO.setup(pir_pin, GPIO.IN)
with open(log_file, 'a') as log:
time_now = datetime.now().strftime('%d-%m-%Y | %H:%M:%S')
log.write('%3i | %s\n' % (i, time_now))
time.sleep(speed)
|
Add motion sensor log script.import os
import time
from datetime import datetime
import RPi.GPIO as GPIO
from blink2 import Blink
pir_pin = 23
log = True
log_file = 'motion_log'
speed = 2
with open(log_file, 'w') as log:
time_now = datetime.now().strftime('%d-%m-%Y | %H:%M:%S')
log.write('Starting log... %s\n' % time_now)
GPIO.setmode(GPIO.BCM)
GPIO.setup(pir_pin, GPIO.IN) # activate input
i = 0
while True:
if GPIO.input(pir_pin):
i += 1
print("%i PIR ALARM!" % i)
Blink(27, 3, 0.3)
GPIO.setmode(GPIO.BCM)
GPIO.setup(pir_pin, GPIO.IN)
with open(log_file, 'a') as log:
time_now = datetime.now().strftime('%d-%m-%Y | %H:%M:%S')
log.write('%3i | %s\n' % (i, time_now))
time.sleep(speed)
|
<commit_before><commit_msg>Add motion sensor log script.<commit_after>import os
import time
from datetime import datetime
import RPi.GPIO as GPIO
from blink2 import Blink
pir_pin = 23
log = True
log_file = 'motion_log'
speed = 2
with open(log_file, 'w') as log:
time_now = datetime.now().strftime('%d-%m-%Y | %H:%M:%S')
log.write('Starting log... %s\n' % time_now)
GPIO.setmode(GPIO.BCM)
GPIO.setup(pir_pin, GPIO.IN) # activate input
i = 0
while True:
if GPIO.input(pir_pin):
i += 1
print("%i PIR ALARM!" % i)
Blink(27, 3, 0.3)
GPIO.setmode(GPIO.BCM)
GPIO.setup(pir_pin, GPIO.IN)
with open(log_file, 'a') as log:
time_now = datetime.now().strftime('%d-%m-%Y | %H:%M:%S')
log.write('%3i | %s\n' % (i, time_now))
time.sleep(speed)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.