commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ff1ed506bba57de35424362a1661cac5236b2f4b
|
tools/add_model.py
|
tools/add_model.py
|
#! /usr/bin/env python
import os
import sys
import json
import clstm
import shutil
from kraken.lib.models import load_clstm
rnn = load_clstm(sys.argv[1])
alphabet = []
cls = clstm.Classes()
cls.resize(1)
for i in range(1, rnn.rnn.noutput()):
cls[0] = i
alphabet.append(rnn.rnn.decode(cls))
s = {}
s['summary'] = raw_input('summary: ')
s['description'] = raw_input('description: ')
s['author'] = raw_input('author: ')
s['author-email'] = raw_input('author-email: ')
s['license'] = raw_input('license: ')
s['url'] = raw_input('url: ')
s['script'] = raw_input('scripts (split by whitespace): ').split(' ')
s['graphemes'] = alphabet
s['name'] = os.path.basename(sys.argv[1])
odir = os.path.splitext(os.path.basename(sys.argv[1]))[0]
try:
os.mkdir(odir)
except:
pass
with open(os.path.join(odir, 'DESCRIPTION'), 'wb') as fp:
json.dump(s, fp, sort_keys=True, indent=4, separators=(',', ': '))
shutil.copyfile(sys.argv[1], os.path.join(odir, os.path.basename(sys.argv[1])))
|
Add simple tools for creating metadata
|
Add simple tools for creating metadata
|
Python
|
apache-2.0
|
mittagessen/kraken-models
|
Add simple tools for creating metadata
|
#! /usr/bin/env python
import os
import sys
import json
import clstm
import shutil
from kraken.lib.models import load_clstm
rnn = load_clstm(sys.argv[1])
alphabet = []
cls = clstm.Classes()
cls.resize(1)
for i in range(1, rnn.rnn.noutput()):
cls[0] = i
alphabet.append(rnn.rnn.decode(cls))
s = {}
s['summary'] = raw_input('summary: ')
s['description'] = raw_input('description: ')
s['author'] = raw_input('author: ')
s['author-email'] = raw_input('author-email: ')
s['license'] = raw_input('license: ')
s['url'] = raw_input('url: ')
s['script'] = raw_input('scripts (split by whitespace): ').split(' ')
s['graphemes'] = alphabet
s['name'] = os.path.basename(sys.argv[1])
odir = os.path.splitext(os.path.basename(sys.argv[1]))[0]
try:
os.mkdir(odir)
except:
pass
with open(os.path.join(odir, 'DESCRIPTION'), 'wb') as fp:
json.dump(s, fp, sort_keys=True, indent=4, separators=(',', ': '))
shutil.copyfile(sys.argv[1], os.path.join(odir, os.path.basename(sys.argv[1])))
|
<commit_before><commit_msg>Add simple tools for creating metadata<commit_after>
|
#! /usr/bin/env python
import os
import sys
import json
import clstm
import shutil
from kraken.lib.models import load_clstm
rnn = load_clstm(sys.argv[1])
alphabet = []
cls = clstm.Classes()
cls.resize(1)
for i in range(1, rnn.rnn.noutput()):
cls[0] = i
alphabet.append(rnn.rnn.decode(cls))
s = {}
s['summary'] = raw_input('summary: ')
s['description'] = raw_input('description: ')
s['author'] = raw_input('author: ')
s['author-email'] = raw_input('author-email: ')
s['license'] = raw_input('license: ')
s['url'] = raw_input('url: ')
s['script'] = raw_input('scripts (split by whitespace): ').split(' ')
s['graphemes'] = alphabet
s['name'] = os.path.basename(sys.argv[1])
odir = os.path.splitext(os.path.basename(sys.argv[1]))[0]
try:
os.mkdir(odir)
except:
pass
with open(os.path.join(odir, 'DESCRIPTION'), 'wb') as fp:
json.dump(s, fp, sort_keys=True, indent=4, separators=(',', ': '))
shutil.copyfile(sys.argv[1], os.path.join(odir, os.path.basename(sys.argv[1])))
|
Add simple tools for creating metadata#! /usr/bin/env python
import os
import sys
import json
import clstm
import shutil
from kraken.lib.models import load_clstm
rnn = load_clstm(sys.argv[1])
alphabet = []
cls = clstm.Classes()
cls.resize(1)
for i in range(1, rnn.rnn.noutput()):
cls[0] = i
alphabet.append(rnn.rnn.decode(cls))
s = {}
s['summary'] = raw_input('summary: ')
s['description'] = raw_input('description: ')
s['author'] = raw_input('author: ')
s['author-email'] = raw_input('author-email: ')
s['license'] = raw_input('license: ')
s['url'] = raw_input('url: ')
s['script'] = raw_input('scripts (split by whitespace): ').split(' ')
s['graphemes'] = alphabet
s['name'] = os.path.basename(sys.argv[1])
odir = os.path.splitext(os.path.basename(sys.argv[1]))[0]
try:
os.mkdir(odir)
except:
pass
with open(os.path.join(odir, 'DESCRIPTION'), 'wb') as fp:
json.dump(s, fp, sort_keys=True, indent=4, separators=(',', ': '))
shutil.copyfile(sys.argv[1], os.path.join(odir, os.path.basename(sys.argv[1])))
|
<commit_before><commit_msg>Add simple tools for creating metadata<commit_after>#! /usr/bin/env python
import os
import sys
import json
import clstm
import shutil
from kraken.lib.models import load_clstm
rnn = load_clstm(sys.argv[1])
alphabet = []
cls = clstm.Classes()
cls.resize(1)
for i in range(1, rnn.rnn.noutput()):
cls[0] = i
alphabet.append(rnn.rnn.decode(cls))
s = {}
s['summary'] = raw_input('summary: ')
s['description'] = raw_input('description: ')
s['author'] = raw_input('author: ')
s['author-email'] = raw_input('author-email: ')
s['license'] = raw_input('license: ')
s['url'] = raw_input('url: ')
s['script'] = raw_input('scripts (split by whitespace): ').split(' ')
s['graphemes'] = alphabet
s['name'] = os.path.basename(sys.argv[1])
odir = os.path.splitext(os.path.basename(sys.argv[1]))[0]
try:
os.mkdir(odir)
except:
pass
with open(os.path.join(odir, 'DESCRIPTION'), 'wb') as fp:
json.dump(s, fp, sort_keys=True, indent=4, separators=(',', ': '))
shutil.copyfile(sys.argv[1], os.path.join(odir, os.path.basename(sys.argv[1])))
|
|
0eb5f0f5e971a9e3bb74c774f8582c0fb8b82378
|
kubernetes/test/test_api_client.py
|
kubernetes/test/test_api_client.py
|
# coding: utf-8
import atexit
import weakref
import unittest
import kubernetes
class TestApiClient(unittest.TestCase):
def test_context_manager_closes_threadpool(self):
with kubernetes.client.ApiClient() as client:
self.assertIsNotNone(client.pool)
pool_ref = weakref.ref(client._pool)
self.assertIsNotNone(pool_ref())
self.assertIsNone(pool_ref())
def test_atexit_closes_threadpool(self):
client = kubernetes.client.ApiClient()
self.assertIsNotNone(client.pool)
self.assertIsNotNone(client._pool)
atexit._run_exitfuncs()
self.assertIsNone(client._pool)
|
Add test to ensure kubernetes client threadpool is cleaned up
|
Add test to ensure kubernetes client threadpool is cleaned up
|
Python
|
apache-2.0
|
kubernetes-client/python,kubernetes-client/python
|
Add test to ensure kubernetes client threadpool is cleaned up
|
# coding: utf-8
import atexit
import weakref
import unittest
import kubernetes
class TestApiClient(unittest.TestCase):
def test_context_manager_closes_threadpool(self):
with kubernetes.client.ApiClient() as client:
self.assertIsNotNone(client.pool)
pool_ref = weakref.ref(client._pool)
self.assertIsNotNone(pool_ref())
self.assertIsNone(pool_ref())
def test_atexit_closes_threadpool(self):
client = kubernetes.client.ApiClient()
self.assertIsNotNone(client.pool)
self.assertIsNotNone(client._pool)
atexit._run_exitfuncs()
self.assertIsNone(client._pool)
|
<commit_before><commit_msg>Add test to ensure kubernetes client threadpool is cleaned up<commit_after>
|
# coding: utf-8
import atexit
import weakref
import unittest
import kubernetes
class TestApiClient(unittest.TestCase):
def test_context_manager_closes_threadpool(self):
with kubernetes.client.ApiClient() as client:
self.assertIsNotNone(client.pool)
pool_ref = weakref.ref(client._pool)
self.assertIsNotNone(pool_ref())
self.assertIsNone(pool_ref())
def test_atexit_closes_threadpool(self):
client = kubernetes.client.ApiClient()
self.assertIsNotNone(client.pool)
self.assertIsNotNone(client._pool)
atexit._run_exitfuncs()
self.assertIsNone(client._pool)
|
Add test to ensure kubernetes client threadpool is cleaned up# coding: utf-8
import atexit
import weakref
import unittest
import kubernetes
class TestApiClient(unittest.TestCase):
def test_context_manager_closes_threadpool(self):
with kubernetes.client.ApiClient() as client:
self.assertIsNotNone(client.pool)
pool_ref = weakref.ref(client._pool)
self.assertIsNotNone(pool_ref())
self.assertIsNone(pool_ref())
def test_atexit_closes_threadpool(self):
client = kubernetes.client.ApiClient()
self.assertIsNotNone(client.pool)
self.assertIsNotNone(client._pool)
atexit._run_exitfuncs()
self.assertIsNone(client._pool)
|
<commit_before><commit_msg>Add test to ensure kubernetes client threadpool is cleaned up<commit_after># coding: utf-8
import atexit
import weakref
import unittest
import kubernetes
class TestApiClient(unittest.TestCase):
def test_context_manager_closes_threadpool(self):
with kubernetes.client.ApiClient() as client:
self.assertIsNotNone(client.pool)
pool_ref = weakref.ref(client._pool)
self.assertIsNotNone(pool_ref())
self.assertIsNone(pool_ref())
def test_atexit_closes_threadpool(self):
client = kubernetes.client.ApiClient()
self.assertIsNotNone(client.pool)
self.assertIsNotNone(client._pool)
atexit._run_exitfuncs()
self.assertIsNone(client._pool)
|
|
8e4da46443d9766bc4cdc22f49d55daf4837c5e6
|
Basics/challenge_1.py
|
Basics/challenge_1.py
|
#!/usr/bin/env python
import codecs
def hex_2_b64(s):
return codecs.encode(codecs.decode(s, 'hex'), 'base64').decode()
if __name__ == '__main__':
print(hex_2_b64("49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d")),
|
Set 1 - Challenge 1
|
Set 1 - Challenge 1
|
Python
|
apache-2.0
|
Scythe14/Crypto
|
Set 1 - Challenge 1
|
#!/usr/bin/env python
import codecs
def hex_2_b64(s):
return codecs.encode(codecs.decode(s, 'hex'), 'base64').decode()
if __name__ == '__main__':
print(hex_2_b64("49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d")),
|
<commit_before><commit_msg>Set 1 - Challenge 1<commit_after>
|
#!/usr/bin/env python
import codecs
def hex_2_b64(s):
return codecs.encode(codecs.decode(s, 'hex'), 'base64').decode()
if __name__ == '__main__':
print(hex_2_b64("49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d")),
|
Set 1 - Challenge 1#!/usr/bin/env python
import codecs
def hex_2_b64(s):
return codecs.encode(codecs.decode(s, 'hex'), 'base64').decode()
if __name__ == '__main__':
print(hex_2_b64("49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d")),
|
<commit_before><commit_msg>Set 1 - Challenge 1<commit_after>#!/usr/bin/env python
import codecs
def hex_2_b64(s):
return codecs.encode(codecs.decode(s, 'hex'), 'base64').decode()
if __name__ == '__main__':
print(hex_2_b64("49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d")),
|
|
32996b01b0689cccb5ea12e4c6171b2880688684
|
watchdog_kj_kultura/organizations_requests/migrations/0004_template_description.py
|
watchdog_kj_kultura/organizations_requests/migrations/0004_template_description.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-29 20:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organizations_requests', '0003_template_introduction'),
]
operations = [
migrations.AddField(
model_name='template',
name='description',
field=models.TextField(default='', help_text='Short description of the potential use of the template.', verbose_name='Description'),
preserve_default=False,
),
]
|
Add missing migration for Template
|
Add missing migration for Template
|
Python
|
mit
|
watchdogpolska/watchdog-kj-kultura,watchdogpolska/watchdog-kj-kultura,watchdogpolska/watchdog-kj-kultura
|
Add missing migration for Template
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-29 20:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organizations_requests', '0003_template_introduction'),
]
operations = [
migrations.AddField(
model_name='template',
name='description',
field=models.TextField(default='', help_text='Short description of the potential use of the template.', verbose_name='Description'),
preserve_default=False,
),
]
|
<commit_before><commit_msg>Add missing migration for Template<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-29 20:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organizations_requests', '0003_template_introduction'),
]
operations = [
migrations.AddField(
model_name='template',
name='description',
field=models.TextField(default='', help_text='Short description of the potential use of the template.', verbose_name='Description'),
preserve_default=False,
),
]
|
Add missing migration for Template# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-29 20:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organizations_requests', '0003_template_introduction'),
]
operations = [
migrations.AddField(
model_name='template',
name='description',
field=models.TextField(default='', help_text='Short description of the potential use of the template.', verbose_name='Description'),
preserve_default=False,
),
]
|
<commit_before><commit_msg>Add missing migration for Template<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-29 20:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organizations_requests', '0003_template_introduction'),
]
operations = [
migrations.AddField(
model_name='template',
name='description',
field=models.TextField(default='', help_text='Short description of the potential use of the template.', verbose_name='Description'),
preserve_default=False,
),
]
|
|
20924798362dee42c4de75057d3eb257fe507e72
|
CvViewer.py
|
CvViewer.py
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Test application to display live images from an Allied Vision camera using OpenCV 3
#
# External dependencies
import cv2
import Vimba
# Initialize Vimba
Vimba.VmbStartup()
# Initialize the camera
camera = Vimba.VmbCamera( '50-0503323406' )
# Open the camera
camera.Open()
# Image acquisition indicator
streaming = False
# Image callback function
def ProcessImage( frame ) :
# Check the frame
if not frame.is_valid :
print( 'Invalid frame...' )
return
# Resize image for display
image = cv2.resize( frame.image, None, fx=0.4, fy=0.4 )
# Display the image
cv2.imshow( camera.id, image )
# Keyboard interruption
if cv2.waitKey( 1 ) & 0xFF == 27 :
global streaming
streaming = False
# Start image acquisition
streaming = True
camera.StartCapture( ProcessImage )
# Streaming loop
while streaming : pass
# Stop image acquisition
camera.StopCapture()
# Close the camera
camera.Close()
# Shutdown Vimba
Vimba.VmbShutdown()
# Close OpenCV windows
cv2.destroyAllWindows()
|
Add a viewer for the Allied Vision camera using OpenCV.
|
Add a viewer for the Allied Vision camera using OpenCV.
|
Python
|
mit
|
microy/Vimba
|
Add a viewer for the Allied Vision camera using OpenCV.
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Test application to display live images from an Allied Vision camera using OpenCV 3
#
# External dependencies
import cv2
import Vimba
# Initialize Vimba
Vimba.VmbStartup()
# Initialize the camera
camera = Vimba.VmbCamera( '50-0503323406' )
# Open the camera
camera.Open()
# Image acquisition indicator
streaming = False
# Image callback function
def ProcessImage( frame ) :
# Check the frame
if not frame.is_valid :
print( 'Invalid frame...' )
return
# Resize image for display
image = cv2.resize( frame.image, None, fx=0.4, fy=0.4 )
# Display the image
cv2.imshow( camera.id, image )
# Keyboard interruption
if cv2.waitKey( 1 ) & 0xFF == 27 :
global streaming
streaming = False
# Start image acquisition
streaming = True
camera.StartCapture( ProcessImage )
# Streaming loop
while streaming : pass
# Stop image acquisition
camera.StopCapture()
# Close the camera
camera.Close()
# Shutdown Vimba
Vimba.VmbShutdown()
# Close OpenCV windows
cv2.destroyAllWindows()
|
<commit_before><commit_msg>Add a viewer for the Allied Vision camera using OpenCV.<commit_after>
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Test application to display live images from an Allied Vision camera using OpenCV 3
#
# External dependencies
import cv2
import Vimba
# Initialize Vimba
Vimba.VmbStartup()
# Initialize the camera
camera = Vimba.VmbCamera( '50-0503323406' )
# Open the camera
camera.Open()
# Image acquisition indicator
streaming = False
# Image callback function
def ProcessImage( frame ) :
# Check the frame
if not frame.is_valid :
print( 'Invalid frame...' )
return
# Resize image for display
image = cv2.resize( frame.image, None, fx=0.4, fy=0.4 )
# Display the image
cv2.imshow( camera.id, image )
# Keyboard interruption
if cv2.waitKey( 1 ) & 0xFF == 27 :
global streaming
streaming = False
# Start image acquisition
streaming = True
camera.StartCapture( ProcessImage )
# Streaming loop
while streaming : pass
# Stop image acquisition
camera.StopCapture()
# Close the camera
camera.Close()
# Shutdown Vimba
Vimba.VmbShutdown()
# Close OpenCV windows
cv2.destroyAllWindows()
|
Add a viewer for the Allied Vision camera using OpenCV.#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Test application to display live images from an Allied Vision camera using OpenCV 3
#
# External dependencies
import cv2
import Vimba
# Initialize Vimba
Vimba.VmbStartup()
# Initialize the camera
camera = Vimba.VmbCamera( '50-0503323406' )
# Open the camera
camera.Open()
# Image acquisition indicator
streaming = False
# Image callback function
def ProcessImage( frame ) :
# Check the frame
if not frame.is_valid :
print( 'Invalid frame...' )
return
# Resize image for display
image = cv2.resize( frame.image, None, fx=0.4, fy=0.4 )
# Display the image
cv2.imshow( camera.id, image )
# Keyboard interruption
if cv2.waitKey( 1 ) & 0xFF == 27 :
global streaming
streaming = False
# Start image acquisition
streaming = True
camera.StartCapture( ProcessImage )
# Streaming loop
while streaming : pass
# Stop image acquisition
camera.StopCapture()
# Close the camera
camera.Close()
# Shutdown Vimba
Vimba.VmbShutdown()
# Close OpenCV windows
cv2.destroyAllWindows()
|
<commit_before><commit_msg>Add a viewer for the Allied Vision camera using OpenCV.<commit_after>#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Test application to display live images from an Allied Vision camera using OpenCV 3
#
# External dependencies
import cv2
import Vimba
# Initialize Vimba
Vimba.VmbStartup()
# Initialize the camera
camera = Vimba.VmbCamera( '50-0503323406' )
# Open the camera
camera.Open()
# Image acquisition indicator
streaming = False
# Image callback function
def ProcessImage( frame ) :
# Check the frame
if not frame.is_valid :
print( 'Invalid frame...' )
return
# Resize image for display
image = cv2.resize( frame.image, None, fx=0.4, fy=0.4 )
# Display the image
cv2.imshow( camera.id, image )
# Keyboard interruption
if cv2.waitKey( 1 ) & 0xFF == 27 :
global streaming
streaming = False
# Start image acquisition
streaming = True
camera.StartCapture( ProcessImage )
# Streaming loop
while streaming : pass
# Stop image acquisition
camera.StopCapture()
# Close the camera
camera.Close()
# Shutdown Vimba
Vimba.VmbShutdown()
# Close OpenCV windows
cv2.destroyAllWindows()
|
|
e52d780b15e43ce7bade0b662284fad28f0fba83
|
setup.py
|
setup.py
|
#!/usr/bin/python
from os.path import isfile, join
import glob
import os
import re
from setuptools import setup
if isfile("MANIFEST"):
os.unlink("MANIFEST")
VERSION = re.search('__version__ = "([^"]+)"',
open("dateutil/__init__.py").read()).group(1)
setup(name="python-dateutil",
version = VERSION,
description = "Extensions to the standard python 2.3+ datetime module",
author = "Gustavo Niemeyer",
author_email = "gustavo@niemeyer.net",
url = "http://labix.org/python-dateutil",
license = "PSF License",
long_description =
"""\
The dateutil module provides powerful extensions to the standard
datetime module, available in Python 2.3+.
""",
packages = ["dateutil", "dateutil.zoneinfo"],
package_data={"": ["*.tar.gz"]},
include_package_data=True,
)
|
#!/usr/bin/python
from os.path import isfile, join
import glob
import os
import re
from setuptools import setup
if isfile("MANIFEST"):
os.unlink("MANIFEST")
VERSION = re.search('__version__ = "([^"]+)"',
open("dateutil/__init__.py").read()).group(1)
setup(name="python-dateutil",
version = VERSION,
description = "Extensions to the standard python 2.3+ datetime module",
author = "Gustavo Niemeyer",
author_email = "gustavo@niemeyer.net",
url = "http://labix.org/python-dateutil",
license = "PSF License",
long_description =
"""\
The dateutil module provides powerful extensions to the standard
datetime module, available in Python 2.3+.
""",
packages = ["dateutil", "dateutil.zoneinfo"],
package_data={"": ["*.tar.gz"]},
include_package_data=True,
zip_safe=False,
)
|
Use zip_safe=False, as suggested by Stephan Richter.
|
Use zip_safe=False, as suggested by Stephan Richter.
|
Python
|
bsd-3-clause
|
adamgreig/python-dateutil,sprymix/dateutil,pganssle/dateutil-test-codecov,emsoftware/python-dateutil,Bachmann1234/dateutil,jenshnielsen/dateutil,pganssle/dateutil-test-codecov,sprymix/python-dateutil,abalkin/dateutil,mjschultz/dateutil,abalkin/dateutil
|
#!/usr/bin/python
from os.path import isfile, join
import glob
import os
import re
from setuptools import setup
if isfile("MANIFEST"):
os.unlink("MANIFEST")
VERSION = re.search('__version__ = "([^"]+)"',
open("dateutil/__init__.py").read()).group(1)
setup(name="python-dateutil",
version = VERSION,
description = "Extensions to the standard python 2.3+ datetime module",
author = "Gustavo Niemeyer",
author_email = "gustavo@niemeyer.net",
url = "http://labix.org/python-dateutil",
license = "PSF License",
long_description =
"""\
The dateutil module provides powerful extensions to the standard
datetime module, available in Python 2.3+.
""",
packages = ["dateutil", "dateutil.zoneinfo"],
package_data={"": ["*.tar.gz"]},
include_package_data=True,
)
Use zip_safe=False, as suggested by Stephan Richter.
|
#!/usr/bin/python
from os.path import isfile, join
import glob
import os
import re
from setuptools import setup
if isfile("MANIFEST"):
os.unlink("MANIFEST")
VERSION = re.search('__version__ = "([^"]+)"',
open("dateutil/__init__.py").read()).group(1)
setup(name="python-dateutil",
version = VERSION,
description = "Extensions to the standard python 2.3+ datetime module",
author = "Gustavo Niemeyer",
author_email = "gustavo@niemeyer.net",
url = "http://labix.org/python-dateutil",
license = "PSF License",
long_description =
"""\
The dateutil module provides powerful extensions to the standard
datetime module, available in Python 2.3+.
""",
packages = ["dateutil", "dateutil.zoneinfo"],
package_data={"": ["*.tar.gz"]},
include_package_data=True,
zip_safe=False,
)
|
<commit_before>#!/usr/bin/python
from os.path import isfile, join
import glob
import os
import re
from setuptools import setup
if isfile("MANIFEST"):
os.unlink("MANIFEST")
VERSION = re.search('__version__ = "([^"]+)"',
open("dateutil/__init__.py").read()).group(1)
setup(name="python-dateutil",
version = VERSION,
description = "Extensions to the standard python 2.3+ datetime module",
author = "Gustavo Niemeyer",
author_email = "gustavo@niemeyer.net",
url = "http://labix.org/python-dateutil",
license = "PSF License",
long_description =
"""\
The dateutil module provides powerful extensions to the standard
datetime module, available in Python 2.3+.
""",
packages = ["dateutil", "dateutil.zoneinfo"],
package_data={"": ["*.tar.gz"]},
include_package_data=True,
)
<commit_msg>Use zip_safe=False, as suggested by Stephan Richter.<commit_after>
|
#!/usr/bin/python
from os.path import isfile, join
import glob
import os
import re
from setuptools import setup
if isfile("MANIFEST"):
os.unlink("MANIFEST")
VERSION = re.search('__version__ = "([^"]+)"',
open("dateutil/__init__.py").read()).group(1)
setup(name="python-dateutil",
version = VERSION,
description = "Extensions to the standard python 2.3+ datetime module",
author = "Gustavo Niemeyer",
author_email = "gustavo@niemeyer.net",
url = "http://labix.org/python-dateutil",
license = "PSF License",
long_description =
"""\
The dateutil module provides powerful extensions to the standard
datetime module, available in Python 2.3+.
""",
packages = ["dateutil", "dateutil.zoneinfo"],
package_data={"": ["*.tar.gz"]},
include_package_data=True,
zip_safe=False,
)
|
#!/usr/bin/python
from os.path import isfile, join
import glob
import os
import re
from setuptools import setup
if isfile("MANIFEST"):
os.unlink("MANIFEST")
VERSION = re.search('__version__ = "([^"]+)"',
open("dateutil/__init__.py").read()).group(1)
setup(name="python-dateutil",
version = VERSION,
description = "Extensions to the standard python 2.3+ datetime module",
author = "Gustavo Niemeyer",
author_email = "gustavo@niemeyer.net",
url = "http://labix.org/python-dateutil",
license = "PSF License",
long_description =
"""\
The dateutil module provides powerful extensions to the standard
datetime module, available in Python 2.3+.
""",
packages = ["dateutil", "dateutil.zoneinfo"],
package_data={"": ["*.tar.gz"]},
include_package_data=True,
)
Use zip_safe=False, as suggested by Stephan Richter.#!/usr/bin/python
from os.path import isfile, join
import glob
import os
import re
from setuptools import setup
if isfile("MANIFEST"):
os.unlink("MANIFEST")
VERSION = re.search('__version__ = "([^"]+)"',
open("dateutil/__init__.py").read()).group(1)
setup(name="python-dateutil",
version = VERSION,
description = "Extensions to the standard python 2.3+ datetime module",
author = "Gustavo Niemeyer",
author_email = "gustavo@niemeyer.net",
url = "http://labix.org/python-dateutil",
license = "PSF License",
long_description =
"""\
The dateutil module provides powerful extensions to the standard
datetime module, available in Python 2.3+.
""",
packages = ["dateutil", "dateutil.zoneinfo"],
package_data={"": ["*.tar.gz"]},
include_package_data=True,
zip_safe=False,
)
|
<commit_before>#!/usr/bin/python
from os.path import isfile, join
import glob
import os
import re
from setuptools import setup
if isfile("MANIFEST"):
os.unlink("MANIFEST")
VERSION = re.search('__version__ = "([^"]+)"',
open("dateutil/__init__.py").read()).group(1)
setup(name="python-dateutil",
version = VERSION,
description = "Extensions to the standard python 2.3+ datetime module",
author = "Gustavo Niemeyer",
author_email = "gustavo@niemeyer.net",
url = "http://labix.org/python-dateutil",
license = "PSF License",
long_description =
"""\
The dateutil module provides powerful extensions to the standard
datetime module, available in Python 2.3+.
""",
packages = ["dateutil", "dateutil.zoneinfo"],
package_data={"": ["*.tar.gz"]},
include_package_data=True,
)
<commit_msg>Use zip_safe=False, as suggested by Stephan Richter.<commit_after>#!/usr/bin/python
from os.path import isfile, join
import glob
import os
import re
from setuptools import setup
if isfile("MANIFEST"):
os.unlink("MANIFEST")
VERSION = re.search('__version__ = "([^"]+)"',
open("dateutil/__init__.py").read()).group(1)
setup(name="python-dateutil",
version = VERSION,
description = "Extensions to the standard python 2.3+ datetime module",
author = "Gustavo Niemeyer",
author_email = "gustavo@niemeyer.net",
url = "http://labix.org/python-dateutil",
license = "PSF License",
long_description =
"""\
The dateutil module provides powerful extensions to the standard
datetime module, available in Python 2.3+.
""",
packages = ["dateutil", "dateutil.zoneinfo"],
package_data={"": ["*.tar.gz"]},
include_package_data=True,
zip_safe=False,
)
|
281bbe58981dea111a432bbd98ff5e32a7107b66
|
models.py
|
models.py
|
from sqlalchemy import Column, Integer, Float, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class NbaGame(Base):
__tablename__ = 'nba_game'
id = Column(Integer, primary_key=True)
date = Column(String)
opp = Column(String)
score = Column(String)
minutes = Column(String)
fgm = Column(Integer)
fga = Column(Integer)
fg_pct = Column(Float)
three_pm = Column(Integer)
three_pa = Column(Integer)
three_pct = Column(Float)
ftm = Column(Integer)
fta = Column(Integer)
ft_pct = Column(Float)
off_reb = Column(Integer)
def_reb = Column(Integer)
total_reb = Column(Integer)
ast = Column(Integer)
to = Column(Integer)
stl = Column(Integer)
blk = Column(Integer)
pf = Column(Integer)
pts = Column(Integer)
|
Install sqlalchemy, add nba game model
|
Install sqlalchemy, add nba game model
|
Python
|
mit
|
arosenberg01/asdata
|
Install sqlalchemy, add nba game model
|
from sqlalchemy import Column, Integer, Float, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class NbaGame(Base):
__tablename__ = 'nba_game'
id = Column(Integer, primary_key=True)
date = Column(String)
opp = Column(String)
score = Column(String)
minutes = Column(String)
fgm = Column(Integer)
fga = Column(Integer)
fg_pct = Column(Float)
three_pm = Column(Integer)
three_pa = Column(Integer)
three_pct = Column(Float)
ftm = Column(Integer)
fta = Column(Integer)
ft_pct = Column(Float)
off_reb = Column(Integer)
def_reb = Column(Integer)
total_reb = Column(Integer)
ast = Column(Integer)
to = Column(Integer)
stl = Column(Integer)
blk = Column(Integer)
pf = Column(Integer)
pts = Column(Integer)
|
<commit_before><commit_msg>Install sqlalchemy, add nba game model<commit_after>
|
from sqlalchemy import Column, Integer, Float, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class NbaGame(Base):
__tablename__ = 'nba_game'
id = Column(Integer, primary_key=True)
date = Column(String)
opp = Column(String)
score = Column(String)
minutes = Column(String)
fgm = Column(Integer)
fga = Column(Integer)
fg_pct = Column(Float)
three_pm = Column(Integer)
three_pa = Column(Integer)
three_pct = Column(Float)
ftm = Column(Integer)
fta = Column(Integer)
ft_pct = Column(Float)
off_reb = Column(Integer)
def_reb = Column(Integer)
total_reb = Column(Integer)
ast = Column(Integer)
to = Column(Integer)
stl = Column(Integer)
blk = Column(Integer)
pf = Column(Integer)
pts = Column(Integer)
|
Install sqlalchemy, add nba game modelfrom sqlalchemy import Column, Integer, Float, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class NbaGame(Base):
__tablename__ = 'nba_game'
id = Column(Integer, primary_key=True)
date = Column(String)
opp = Column(String)
score = Column(String)
minutes = Column(String)
fgm = Column(Integer)
fga = Column(Integer)
fg_pct = Column(Float)
three_pm = Column(Integer)
three_pa = Column(Integer)
three_pct = Column(Float)
ftm = Column(Integer)
fta = Column(Integer)
ft_pct = Column(Float)
off_reb = Column(Integer)
def_reb = Column(Integer)
total_reb = Column(Integer)
ast = Column(Integer)
to = Column(Integer)
stl = Column(Integer)
blk = Column(Integer)
pf = Column(Integer)
pts = Column(Integer)
|
<commit_before><commit_msg>Install sqlalchemy, add nba game model<commit_after>from sqlalchemy import Column, Integer, Float, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class NbaGame(Base):
__tablename__ = 'nba_game'
id = Column(Integer, primary_key=True)
date = Column(String)
opp = Column(String)
score = Column(String)
minutes = Column(String)
fgm = Column(Integer)
fga = Column(Integer)
fg_pct = Column(Float)
three_pm = Column(Integer)
three_pa = Column(Integer)
three_pct = Column(Float)
ftm = Column(Integer)
fta = Column(Integer)
ft_pct = Column(Float)
off_reb = Column(Integer)
def_reb = Column(Integer)
total_reb = Column(Integer)
ast = Column(Integer)
to = Column(Integer)
stl = Column(Integer)
blk = Column(Integer)
pf = Column(Integer)
pts = Column(Integer)
|
|
cb484a45562e14d2cb25f53c839973c6a99549a1
|
tests/nts/conftest.py
|
tests/nts/conftest.py
|
# -*- coding: utf-8 -*-
"""
tests.nts.conftest
~~~~~~~~~~~~~~~~~~
"""
import pathlib
import pytest
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase
NTS_TEST_SUITE_PATH = pathlib.Path(__file__).parent
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_collection_modifyitems(config, items):
"""
called after collection has been performed, may filter or re-order
the items in-place.
:param _pytest.main.Session session: the pytest session object
:param _pytest.config.Config config: pytest config object
:param List[_pytest.nodes.Item] items: list of item objects
"""
# Let PyTest or other plugins handle the initial collection
yield
# Check each collected item that's under this package to ensure that none is using TestCase as the base class
for item in items:
if not str(item.fspath).startswith(str(NTS_TEST_SUITE_PATH)):
continue
if not item.cls:
# The test item is not part of a class
continue
if issubclass(item.cls, TestCase):
raise RuntimeError(
"The tests under {} MUST NOT use unittest's TestCase class".format(
pathlib.Path(str(item.fspath)).relative_to(RUNTIME_VARS.CODE_DIR)
)
)
|
Enforce non TestCase usage under `tests/nts`
|
Enforce non TestCase usage under `tests/nts`
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Enforce non TestCase usage under `tests/nts`
|
# -*- coding: utf-8 -*-
"""
tests.nts.conftest
~~~~~~~~~~~~~~~~~~
"""
import pathlib
import pytest
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase
NTS_TEST_SUITE_PATH = pathlib.Path(__file__).parent
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_collection_modifyitems(config, items):
"""
called after collection has been performed, may filter or re-order
the items in-place.
:param _pytest.main.Session session: the pytest session object
:param _pytest.config.Config config: pytest config object
:param List[_pytest.nodes.Item] items: list of item objects
"""
# Let PyTest or other plugins handle the initial collection
yield
# Check each collected item that's under this package to ensure that none is using TestCase as the base class
for item in items:
if not str(item.fspath).startswith(str(NTS_TEST_SUITE_PATH)):
continue
if not item.cls:
# The test item is not part of a class
continue
if issubclass(item.cls, TestCase):
raise RuntimeError(
"The tests under {} MUST NOT use unittest's TestCase class".format(
pathlib.Path(str(item.fspath)).relative_to(RUNTIME_VARS.CODE_DIR)
)
)
|
<commit_before><commit_msg>Enforce non TestCase usage under `tests/nts`<commit_after>
|
# -*- coding: utf-8 -*-
"""
tests.nts.conftest
~~~~~~~~~~~~~~~~~~
"""
import pathlib
import pytest
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase
NTS_TEST_SUITE_PATH = pathlib.Path(__file__).parent
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_collection_modifyitems(config, items):
"""
called after collection has been performed, may filter or re-order
the items in-place.
:param _pytest.main.Session session: the pytest session object
:param _pytest.config.Config config: pytest config object
:param List[_pytest.nodes.Item] items: list of item objects
"""
# Let PyTest or other plugins handle the initial collection
yield
# Check each collected item that's under this package to ensure that none is using TestCase as the base class
for item in items:
if not str(item.fspath).startswith(str(NTS_TEST_SUITE_PATH)):
continue
if not item.cls:
# The test item is not part of a class
continue
if issubclass(item.cls, TestCase):
raise RuntimeError(
"The tests under {} MUST NOT use unittest's TestCase class".format(
pathlib.Path(str(item.fspath)).relative_to(RUNTIME_VARS.CODE_DIR)
)
)
|
Enforce non TestCase usage under `tests/nts`# -*- coding: utf-8 -*-
"""
tests.nts.conftest
~~~~~~~~~~~~~~~~~~
"""
import pathlib
import pytest
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase
NTS_TEST_SUITE_PATH = pathlib.Path(__file__).parent
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_collection_modifyitems(config, items):
"""
called after collection has been performed, may filter or re-order
the items in-place.
:param _pytest.main.Session session: the pytest session object
:param _pytest.config.Config config: pytest config object
:param List[_pytest.nodes.Item] items: list of item objects
"""
# Let PyTest or other plugins handle the initial collection
yield
# Check each collected item that's under this package to ensure that none is using TestCase as the base class
for item in items:
if not str(item.fspath).startswith(str(NTS_TEST_SUITE_PATH)):
continue
if not item.cls:
# The test item is not part of a class
continue
if issubclass(item.cls, TestCase):
raise RuntimeError(
"The tests under {} MUST NOT use unittest's TestCase class".format(
pathlib.Path(str(item.fspath)).relative_to(RUNTIME_VARS.CODE_DIR)
)
)
|
<commit_before><commit_msg>Enforce non TestCase usage under `tests/nts`<commit_after># -*- coding: utf-8 -*-
"""
tests.nts.conftest
~~~~~~~~~~~~~~~~~~
"""
import pathlib
import pytest
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase
NTS_TEST_SUITE_PATH = pathlib.Path(__file__).parent
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_collection_modifyitems(config, items):
"""
called after collection has been performed, may filter or re-order
the items in-place.
:param _pytest.main.Session session: the pytest session object
:param _pytest.config.Config config: pytest config object
:param List[_pytest.nodes.Item] items: list of item objects
"""
# Let PyTest or other plugins handle the initial collection
yield
# Check each collected item that's under this package to ensure that none is using TestCase as the base class
for item in items:
if not str(item.fspath).startswith(str(NTS_TEST_SUITE_PATH)):
continue
if not item.cls:
# The test item is not part of a class
continue
if issubclass(item.cls, TestCase):
raise RuntimeError(
"The tests under {} MUST NOT use unittest's TestCase class".format(
pathlib.Path(str(item.fspath)).relative_to(RUNTIME_VARS.CODE_DIR)
)
)
|
|
b59e2bc59a0c8a648189e01047cf729c82aadfe0
|
tests/currencycloud/test_resource.py
|
tests/currencycloud/test_resource.py
|
import pytest
from mock import patch
from currencycloud import Client, Config
from currencycloud.http import Http
from currencycloud.resources.resource import Resource
from currencycloud.resources.actions import UpdateMixin, DeleteMixin
class TestResource:
class PersonClient(Http):
def delete(self, resource_id):
pass
def update(self, resource_id, **kwargs):
pass
class Person(DeleteMixin, UpdateMixin, Resource):
pass
def setup_method(self):
self.config = Config(None, None, Config.ENV_DEMONSTRATION)
self.client = TestResource.PersonClient(self.config)
def test_resource_save_only_updates_changed_records(self):
person = TestResource.Person(self.client, id=1, name="Some", surname="One")
with patch.object(Http, 'post') as person_post:
def post_check(url, **kargs):
assert url == 1
assert len(kargs) == 1
assert kargs['name'] == 'Penelope'
person_post.side_effect = post_check
person.name = 'Penelope'
assert person.update() == person
def test_resource_delete_calls_delete_on_resource(self):
person = TestResource.Person(self.client, id=1, name="Some", surname="One")
with patch.object(Http, 'post') as person_post:
def post_check(url, **kargs):
assert url == '1/delete'
person_post.side_effect = post_check
person.delete()
|
Test update and delete actions
|
Test update and delete actions
|
Python
|
mit
|
CurrencyCloud/currencycloud-python
|
Test update and delete actions
|
import pytest
from mock import patch
from currencycloud import Client, Config
from currencycloud.http import Http
from currencycloud.resources.resource import Resource
from currencycloud.resources.actions import UpdateMixin, DeleteMixin
class TestResource:
class PersonClient(Http):
def delete(self, resource_id):
pass
def update(self, resource_id, **kwargs):
pass
class Person(DeleteMixin, UpdateMixin, Resource):
pass
def setup_method(self):
self.config = Config(None, None, Config.ENV_DEMONSTRATION)
self.client = TestResource.PersonClient(self.config)
def test_resource_save_only_updates_changed_records(self):
person = TestResource.Person(self.client, id=1, name="Some", surname="One")
with patch.object(Http, 'post') as person_post:
def post_check(url, **kargs):
assert url == 1
assert len(kargs) == 1
assert kargs['name'] == 'Penelope'
person_post.side_effect = post_check
person.name = 'Penelope'
assert person.update() == person
def test_resource_delete_calls_delete_on_resource(self):
person = TestResource.Person(self.client, id=1, name="Some", surname="One")
with patch.object(Http, 'post') as person_post:
def post_check(url, **kargs):
assert url == '1/delete'
person_post.side_effect = post_check
person.delete()
|
<commit_before><commit_msg>Test update and delete actions<commit_after>
|
import pytest
from mock import patch
from currencycloud import Client, Config
from currencycloud.http import Http
from currencycloud.resources.resource import Resource
from currencycloud.resources.actions import UpdateMixin, DeleteMixin
class TestResource:
class PersonClient(Http):
def delete(self, resource_id):
pass
def update(self, resource_id, **kwargs):
pass
class Person(DeleteMixin, UpdateMixin, Resource):
pass
def setup_method(self):
self.config = Config(None, None, Config.ENV_DEMONSTRATION)
self.client = TestResource.PersonClient(self.config)
def test_resource_save_only_updates_changed_records(self):
person = TestResource.Person(self.client, id=1, name="Some", surname="One")
with patch.object(Http, 'post') as person_post:
def post_check(url, **kargs):
assert url == 1
assert len(kargs) == 1
assert kargs['name'] == 'Penelope'
person_post.side_effect = post_check
person.name = 'Penelope'
assert person.update() == person
def test_resource_delete_calls_delete_on_resource(self):
person = TestResource.Person(self.client, id=1, name="Some", surname="One")
with patch.object(Http, 'post') as person_post:
def post_check(url, **kargs):
assert url == '1/delete'
person_post.side_effect = post_check
person.delete()
|
Test update and delete actionsimport pytest
from mock import patch
from currencycloud import Client, Config
from currencycloud.http import Http
from currencycloud.resources.resource import Resource
from currencycloud.resources.actions import UpdateMixin, DeleteMixin
class TestResource:
class PersonClient(Http):
def delete(self, resource_id):
pass
def update(self, resource_id, **kwargs):
pass
class Person(DeleteMixin, UpdateMixin, Resource):
pass
def setup_method(self):
self.config = Config(None, None, Config.ENV_DEMONSTRATION)
self.client = TestResource.PersonClient(self.config)
def test_resource_save_only_updates_changed_records(self):
person = TestResource.Person(self.client, id=1, name="Some", surname="One")
with patch.object(Http, 'post') as person_post:
def post_check(url, **kargs):
assert url == 1
assert len(kargs) == 1
assert kargs['name'] == 'Penelope'
person_post.side_effect = post_check
person.name = 'Penelope'
assert person.update() == person
def test_resource_delete_calls_delete_on_resource(self):
person = TestResource.Person(self.client, id=1, name="Some", surname="One")
with patch.object(Http, 'post') as person_post:
def post_check(url, **kargs):
assert url == '1/delete'
person_post.side_effect = post_check
person.delete()
|
<commit_before><commit_msg>Test update and delete actions<commit_after>import pytest
from mock import patch
from currencycloud import Client, Config
from currencycloud.http import Http
from currencycloud.resources.resource import Resource
from currencycloud.resources.actions import UpdateMixin, DeleteMixin
class TestResource:
class PersonClient(Http):
def delete(self, resource_id):
pass
def update(self, resource_id, **kwargs):
pass
class Person(DeleteMixin, UpdateMixin, Resource):
pass
def setup_method(self):
self.config = Config(None, None, Config.ENV_DEMONSTRATION)
self.client = TestResource.PersonClient(self.config)
def test_resource_save_only_updates_changed_records(self):
person = TestResource.Person(self.client, id=1, name="Some", surname="One")
with patch.object(Http, 'post') as person_post:
def post_check(url, **kargs):
assert url == 1
assert len(kargs) == 1
assert kargs['name'] == 'Penelope'
person_post.side_effect = post_check
person.name = 'Penelope'
assert person.update() == person
def test_resource_delete_calls_delete_on_resource(self):
person = TestResource.Person(self.client, id=1, name="Some", surname="One")
with patch.object(Http, 'post') as person_post:
def post_check(url, **kargs):
assert url == '1/delete'
person_post.side_effect = post_check
person.delete()
|
|
7a03f8012518ce56f214242c97fe3ce958f098e4
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from distutils.core import setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='facebook-sdk',
version='0.3.2',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
url='http://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=read("README.rst"),
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
)
|
#!/usr/bin/env python
from distutils.core import setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='facebook-sdk',
version='0.3.2',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
url='https://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=read("README.rst"),
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
)
|
Use HTTPS URL for repository.
|
Use HTTPS URL for repository.
|
Python
|
apache-2.0
|
Aloomaio/facebook-sdk,mobolic/facebook-sdk
|
#!/usr/bin/env python
from distutils.core import setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='facebook-sdk',
version='0.3.2',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
url='http://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=read("README.rst"),
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
)
Use HTTPS URL for repository.
|
#!/usr/bin/env python
from distutils.core import setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='facebook-sdk',
version='0.3.2',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
url='https://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=read("README.rst"),
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
)
|
<commit_before>#!/usr/bin/env python
from distutils.core import setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='facebook-sdk',
version='0.3.2',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
url='http://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=read("README.rst"),
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
)
<commit_msg>Use HTTPS URL for repository.<commit_after>
|
#!/usr/bin/env python
from distutils.core import setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='facebook-sdk',
version='0.3.2',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
url='https://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=read("README.rst"),
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
)
|
#!/usr/bin/env python
from distutils.core import setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='facebook-sdk',
version='0.3.2',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
url='http://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=read("README.rst"),
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
)
Use HTTPS URL for repository.#!/usr/bin/env python
from distutils.core import setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='facebook-sdk',
version='0.3.2',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
url='https://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=read("README.rst"),
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
)
|
<commit_before>#!/usr/bin/env python
from distutils.core import setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='facebook-sdk',
version='0.3.2',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
url='http://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=read("README.rst"),
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
)
<commit_msg>Use HTTPS URL for repository.<commit_after>#!/usr/bin/env python
from distutils.core import setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='facebook-sdk',
version='0.3.2',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
url='https://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=read("README.rst"),
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
)
|
bb0e8faf73298d3b5ce78853b9a70cb8c34b9965
|
trace_viewer/trace_viewer_project.py
|
trace_viewer/trace_viewer_project.py
|
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
from tvcm import project as project_module
class TraceViewerProject(project_module.Project):
trace_viewer_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..'))
src_path = os.path.abspath(os.path.join(
trace_viewer_path, 'trace_viewer'))
trace_viewer_third_party_path = os.path.abspath(os.path.join(
trace_viewer_path, 'third_party'))
jszip_path = os.path.abspath(os.path.join(
trace_viewer_third_party_path, 'jszip'))
test_data_path = os.path.join(trace_viewer_path, 'test_data')
skp_data_path = os.path.join(trace_viewer_path, 'skp_data')
def __init__(self):
super(TraceViewerProject, self).__init__(
[self.src_path, self.jszip_path])
|
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
from tvcm import project as project_module
class TraceViewerProject(project_module.Project):
trace_viewer_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..'))
src_path = os.path.abspath(os.path.join(
trace_viewer_path, 'trace_viewer'))
trace_viewer_third_party_path = os.path.abspath(os.path.join(
trace_viewer_path, 'third_party'))
jszip_path = os.path.abspath(os.path.join(
trace_viewer_third_party_path, 'jszip'))
test_data_path = os.path.join(trace_viewer_path, 'test_data')
skp_data_path = os.path.join(trace_viewer_path, 'skp_data')
def __init__(self, other_paths=None):
paths = [self.src_path, self.jszip_path]
if other_paths:
paths.extend(other_paths)
super(TraceViewerProject, self).__init__(
paths)
|
Allow other_paths to be passed into TraceViewerProject
|
Allow other_paths to be passed into TraceViewerProject
This allows external embedders to subclass TraceViewerProject and thus
use trace viewer.
|
Python
|
bsd-3-clause
|
catapult-project/catapult-csm,sahiljain/catapult,catapult-project/catapult-csm,sahiljain/catapult,catapult-project/catapult-csm,dstockwell/catapult,danbeam/catapult,catapult-project/catapult-csm,0x90sled/catapult,dstockwell/catapult,catapult-project/catapult,scottmcmaster/catapult,sahiljain/catapult,sahiljain/catapult,danbeam/catapult,benschmaus/catapult,zeptonaut/catapult,scottmcmaster/catapult,catapult-project/catapult,SummerLW/Perf-Insight-Report,vmpstr/trace-viewer,benschmaus/catapult,SummerLW/Perf-Insight-Report,dstockwell/catapult,modulexcite/catapult,vmpstr/trace-viewer,benschmaus/catapult,vmpstr/trace-viewer,catapult-project/catapult,sahiljain/catapult,danbeam/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult-csm,benschmaus/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult,SummerLW/Perf-Insight-Report,dstockwell/catapult,scottmcmaster/catapult,catapult-project/catapult-csm,0x90sled/catapult,catapult-project/catapult,danbeam/catapult,benschmaus/catapult,modulexcite/catapult,catapult-project/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult,benschmaus/catapult,sahiljain/catapult,zeptonaut/catapult,catapult-project/catapult-csm,0x90sled/catapult,benschmaus/catapult,zeptonaut/catapult,modulexcite/catapult
|
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
from tvcm import project as project_module
class TraceViewerProject(project_module.Project):
trace_viewer_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..'))
src_path = os.path.abspath(os.path.join(
trace_viewer_path, 'trace_viewer'))
trace_viewer_third_party_path = os.path.abspath(os.path.join(
trace_viewer_path, 'third_party'))
jszip_path = os.path.abspath(os.path.join(
trace_viewer_third_party_path, 'jszip'))
test_data_path = os.path.join(trace_viewer_path, 'test_data')
skp_data_path = os.path.join(trace_viewer_path, 'skp_data')
def __init__(self):
super(TraceViewerProject, self).__init__(
[self.src_path, self.jszip_path])
Allow other_paths to be passed into TraceViewerProject
This allows external embedders to subclass TraceViewerProject and thus
use trace viewer.
|
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
from tvcm import project as project_module
class TraceViewerProject(project_module.Project):
trace_viewer_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..'))
src_path = os.path.abspath(os.path.join(
trace_viewer_path, 'trace_viewer'))
trace_viewer_third_party_path = os.path.abspath(os.path.join(
trace_viewer_path, 'third_party'))
jszip_path = os.path.abspath(os.path.join(
trace_viewer_third_party_path, 'jszip'))
test_data_path = os.path.join(trace_viewer_path, 'test_data')
skp_data_path = os.path.join(trace_viewer_path, 'skp_data')
def __init__(self, other_paths=None):
paths = [self.src_path, self.jszip_path]
if other_paths:
paths.extend(other_paths)
super(TraceViewerProject, self).__init__(
paths)
|
<commit_before># Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
from tvcm import project as project_module
class TraceViewerProject(project_module.Project):
trace_viewer_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..'))
src_path = os.path.abspath(os.path.join(
trace_viewer_path, 'trace_viewer'))
trace_viewer_third_party_path = os.path.abspath(os.path.join(
trace_viewer_path, 'third_party'))
jszip_path = os.path.abspath(os.path.join(
trace_viewer_third_party_path, 'jszip'))
test_data_path = os.path.join(trace_viewer_path, 'test_data')
skp_data_path = os.path.join(trace_viewer_path, 'skp_data')
def __init__(self):
super(TraceViewerProject, self).__init__(
[self.src_path, self.jszip_path])
<commit_msg>Allow other_paths to be passed into TraceViewerProject
This allows external embedders to subclass TraceViewerProject and thus
use trace viewer.<commit_after>
|
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
from tvcm import project as project_module
class TraceViewerProject(project_module.Project):
trace_viewer_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..'))
src_path = os.path.abspath(os.path.join(
trace_viewer_path, 'trace_viewer'))
trace_viewer_third_party_path = os.path.abspath(os.path.join(
trace_viewer_path, 'third_party'))
jszip_path = os.path.abspath(os.path.join(
trace_viewer_third_party_path, 'jszip'))
test_data_path = os.path.join(trace_viewer_path, 'test_data')
skp_data_path = os.path.join(trace_viewer_path, 'skp_data')
def __init__(self, other_paths=None):
paths = [self.src_path, self.jszip_path]
if other_paths:
paths.extend(other_paths)
super(TraceViewerProject, self).__init__(
paths)
|
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
from tvcm import project as project_module
class TraceViewerProject(project_module.Project):
trace_viewer_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..'))
src_path = os.path.abspath(os.path.join(
trace_viewer_path, 'trace_viewer'))
trace_viewer_third_party_path = os.path.abspath(os.path.join(
trace_viewer_path, 'third_party'))
jszip_path = os.path.abspath(os.path.join(
trace_viewer_third_party_path, 'jszip'))
test_data_path = os.path.join(trace_viewer_path, 'test_data')
skp_data_path = os.path.join(trace_viewer_path, 'skp_data')
def __init__(self):
super(TraceViewerProject, self).__init__(
[self.src_path, self.jszip_path])
Allow other_paths to be passed into TraceViewerProject
This allows external embedders to subclass TraceViewerProject and thus
use trace viewer.# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
from tvcm import project as project_module
class TraceViewerProject(project_module.Project):
trace_viewer_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..'))
src_path = os.path.abspath(os.path.join(
trace_viewer_path, 'trace_viewer'))
trace_viewer_third_party_path = os.path.abspath(os.path.join(
trace_viewer_path, 'third_party'))
jszip_path = os.path.abspath(os.path.join(
trace_viewer_third_party_path, 'jszip'))
test_data_path = os.path.join(trace_viewer_path, 'test_data')
skp_data_path = os.path.join(trace_viewer_path, 'skp_data')
def __init__(self, other_paths=None):
paths = [self.src_path, self.jszip_path]
if other_paths:
paths.extend(other_paths)
super(TraceViewerProject, self).__init__(
paths)
|
<commit_before># Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
from tvcm import project as project_module
class TraceViewerProject(project_module.Project):
trace_viewer_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..'))
src_path = os.path.abspath(os.path.join(
trace_viewer_path, 'trace_viewer'))
trace_viewer_third_party_path = os.path.abspath(os.path.join(
trace_viewer_path, 'third_party'))
jszip_path = os.path.abspath(os.path.join(
trace_viewer_third_party_path, 'jszip'))
test_data_path = os.path.join(trace_viewer_path, 'test_data')
skp_data_path = os.path.join(trace_viewer_path, 'skp_data')
def __init__(self):
super(TraceViewerProject, self).__init__(
[self.src_path, self.jszip_path])
<commit_msg>Allow other_paths to be passed into TraceViewerProject
This allows external embedders to subclass TraceViewerProject and thus
use trace viewer.<commit_after># Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
from tvcm import project as project_module
class TraceViewerProject(project_module.Project):
trace_viewer_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..'))
src_path = os.path.abspath(os.path.join(
trace_viewer_path, 'trace_viewer'))
trace_viewer_third_party_path = os.path.abspath(os.path.join(
trace_viewer_path, 'third_party'))
jszip_path = os.path.abspath(os.path.join(
trace_viewer_third_party_path, 'jszip'))
test_data_path = os.path.join(trace_viewer_path, 'test_data')
skp_data_path = os.path.join(trace_viewer_path, 'skp_data')
def __init__(self, other_paths=None):
paths = [self.src_path, self.jszip_path]
if other_paths:
paths.extend(other_paths)
super(TraceViewerProject, self).__init__(
paths)
|
b5dec2c004e380da6214bffdd3761b0e46dab3a0
|
tests/test_archive.py
|
tests/test_archive.py
|
from json import load
from django_archive import __version__
from .base import BaseArchiveTestCase
from .sample.models import Sample
class ArchiveTestCase(BaseArchiveTestCase):
"""
Test that the archive command includes correct data in the archive
"""
def setUp(self):
Sample().save()
super().setUp()
def test_data(self):
"""
Confirm that the model was archived
"""
with self.tarfile.extractfile('data.json') as fileobj:
data = load(fileobj)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['model'], 'sample.sample')
def test_meta(self):
"""
Confirm that meta information is present
"""
with self.tarfile.extractfile('meta.json') as fileobj:
data = load(fileobj)
self.assertEqual(data['version'], __version__)
|
Add test for verifying archive contents.
|
Add test for verifying archive contents.
|
Python
|
mit
|
nathan-osman/django-archive,nathan-osman/django-archive
|
Add test for verifying archive contents.
|
from json import load
from django_archive import __version__
from .base import BaseArchiveTestCase
from .sample.models import Sample
class ArchiveTestCase(BaseArchiveTestCase):
"""
Test that the archive command includes correct data in the archive
"""
def setUp(self):
Sample().save()
super().setUp()
def test_data(self):
"""
Confirm that the model was archived
"""
with self.tarfile.extractfile('data.json') as fileobj:
data = load(fileobj)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['model'], 'sample.sample')
def test_meta(self):
"""
Confirm that meta information is present
"""
with self.tarfile.extractfile('meta.json') as fileobj:
data = load(fileobj)
self.assertEqual(data['version'], __version__)
|
<commit_before><commit_msg>Add test for verifying archive contents.<commit_after>
|
from json import load
from django_archive import __version__
from .base import BaseArchiveTestCase
from .sample.models import Sample
class ArchiveTestCase(BaseArchiveTestCase):
"""
Test that the archive command includes correct data in the archive
"""
def setUp(self):
Sample().save()
super().setUp()
def test_data(self):
"""
Confirm that the model was archived
"""
with self.tarfile.extractfile('data.json') as fileobj:
data = load(fileobj)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['model'], 'sample.sample')
def test_meta(self):
"""
Confirm that meta information is present
"""
with self.tarfile.extractfile('meta.json') as fileobj:
data = load(fileobj)
self.assertEqual(data['version'], __version__)
|
Add test for verifying archive contents.from json import load
from django_archive import __version__
from .base import BaseArchiveTestCase
from .sample.models import Sample
class ArchiveTestCase(BaseArchiveTestCase):
"""
Test that the archive command includes correct data in the archive
"""
def setUp(self):
Sample().save()
super().setUp()
def test_data(self):
"""
Confirm that the model was archived
"""
with self.tarfile.extractfile('data.json') as fileobj:
data = load(fileobj)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['model'], 'sample.sample')
def test_meta(self):
"""
Confirm that meta information is present
"""
with self.tarfile.extractfile('meta.json') as fileobj:
data = load(fileobj)
self.assertEqual(data['version'], __version__)
|
<commit_before><commit_msg>Add test for verifying archive contents.<commit_after>from json import load
from django_archive import __version__
from .base import BaseArchiveTestCase
from .sample.models import Sample
class ArchiveTestCase(BaseArchiveTestCase):
"""
Test that the archive command includes correct data in the archive
"""
def setUp(self):
Sample().save()
super().setUp()
def test_data(self):
"""
Confirm that the model was archived
"""
with self.tarfile.extractfile('data.json') as fileobj:
data = load(fileobj)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['model'], 'sample.sample')
def test_meta(self):
"""
Confirm that meta information is present
"""
with self.tarfile.extractfile('meta.json') as fileobj:
data = load(fileobj)
self.assertEqual(data['version'], __version__)
|
|
6d65637dec129863ac4a96bb28b476f032bbff84
|
nova/db/sqlalchemy/migrate_repo/versions/143_rename_instance_info_cache_sequence.py
|
nova/db/sqlalchemy/migrate_repo/versions/143_rename_instance_info_cache_sequence.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import UniqueConstraint
from sqlalchemy import MetaData, Table
OLD_MYSQL_NAME = 'instance_id'
NEW_MYSQL_NAME = 'instance_uuid'
OLD_PG_NAME = 'instance_info_caches_instance_id_key'
NEW_PG_NAME = 'instance_info_caches_instance_uuid_key'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# NOTE(dprince): Rename the unique key constraints for both MySQL
# and PostgreSQL so they reflect the most recent UUID conversions
# from Folsom.
instance_info_caches = Table('instance_info_caches', meta, autoload=True)
if migrate_engine.name == "mysql":
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=NEW_MYSQL_NAME).create()
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=OLD_MYSQL_NAME).drop()
if migrate_engine.name == "postgresql":
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=NEW_PG_NAME).create()
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=OLD_PG_NAME).drop()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instance_info_caches = Table('instance_info_caches', meta, autoload=True)
if migrate_engine.name == "mysql":
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=OLD_MYSQL_NAME).create()
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=NEW_MYSQL_NAME).drop()
if migrate_engine.name == "postgresql":
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=OLD_PG_NAME).create()
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=NEW_PG_NAME).drop()
|
Rename instance_info_cache unique key constraints.
|
Rename instance_info_cache unique key constraints.
Rename (via drop and recreate) the instance_uuid unique
constraints on the instance_info_cache table so they reflect
the column change name (UUID conversion) we complete in Folsom.
Fixes LP Bug #1080837.
Change-Id: I9dd01ebc896c1d7b51c212980e48db16bad18dec
|
Python
|
apache-2.0
|
plumgrid/plumgrid-nova,redhat-openstack/nova,yrobla/nova,gooddata/openstack-nova,orbitfp7/nova,edulramirez/nova,imsplitbit/nova,leilihh/novaha,cernops/nova,cloudbase/nova,shahar-stratoscale/nova,akash1808/nova,zaina/nova,orbitfp7/nova,jianghuaw/nova,whitepages/nova,hanlind/nova,cloudbase/nova,spring-week-topos/nova-week,leilihh/novaha,cloudbase/nova,yrobla/nova,Francis-Liu/animated-broccoli,silenceli/nova,hanlind/nova,BeyondTheClouds/nova,joker946/nova,JianyuWang/nova,gspilio/nova,tudorvio/nova,MountainWei/nova,rahulunair/nova,TwinkleChawla/nova,jianghuaw/nova,tanglei528/nova,takeshineshiro/nova,aristanetworks/arista-ovs-nova,JioCloud/nova,aristanetworks/arista-ovs-nova,affo/nova,ewindisch/nova,Stavitsky/nova,eayunstack/nova,gspilio/nova,dstroppa/openstack-smartos-nova-grizzly,belmiromoreira/nova,dims/nova,fajoy/nova,luogangyi/bcec-nova,angdraug/nova,sridevikoushik31/nova,raildo/nova,Juniper/nova,dims/nova,Yusuke1987/openstack_template,whitepages/nova,tealover/nova,Stavitsky/nova,JioCloud/nova,dawnpower/nova,citrix-openstack-build/nova,fajoy/nova,Triv90/Nova,openstack/nova,virtualopensystems/nova,CEG-FYP-OpenStack/scheduler,cernops/nova,BeyondTheClouds/nova,felixma/nova,openstack/nova,JioCloud/nova_test_latest,mgagne/nova,adelina-t/nova,devendermishrajio/nova_test_latest,maheshp/novatest,sacharya/nova,viggates/nova,iuliat/nova,rahulunair/nova,ruslanloman/nova,rajalokan/nova,maoy/zknova,felixma/nova,Brocade-OpenSource/OpenStack-DNRM-Nova,SUSE-Cloud/nova,alaski/nova,scripnichenko/nova,yrobla/nova,qwefi/nova,yatinkumbhare/openstack-nova,TieWei/nova,vmturbo/nova,belmiromoreira/nova,berrange/nova,berrange/nova,leilihh/nova,varunarya10/nova_test_latest,thomasem/nova,Triv90/Nova,akash1808/nova,eayunstack/nova,ntt-sic/nova,NeCTAR-RC/nova,jeffrey4l/nova,cloudbase/nova-virtualbox,cloudbau/nova,sridevikoushik31/openstack,bigswitch/nova,dstroppa/openstack-smartos-nova-grizzly,LoHChina/nova,vmturbo/nova,petrutlucian94/nova,kimjaejoong/nova,varunarya10/nova_test_latest,affo/nova,yosshy/nova,mahak/nova,badock/nova,alvarolopez/nova,sridevikoushik31/nova,nikesh-mahalka/nova,sridevikoushik31/openstack,tangfeixiong/nova,raildo/nova,saleemjaveds/https-github.com-openstack-nova,hanlind/nova,rrader/nova-docker-plugin,scripnichenko/nova,watonyweng/nova,cloudbase/nova-virtualbox,shootstar/novatest,Metaswitch/calico-nova,petrutlucian94/nova,tangfeixiong/nova,isyippee/nova,Triv90/Nova,shootstar/novatest,maoy/zknova,spring-week-topos/nova-week,luogangyi/bcec-nova,alvarolopez/nova,aristanetworks/arista-ovs-nova,Tehsmash/nova,CCI-MOC/nova,rickerc/nova_audit,gooddata/openstack-nova,zhimin711/nova,jianghuaw/nova,tanglei528/nova,yatinkumbhare/openstack-nova,sebrandon1/nova,CiscoSystems/nova,viggates/nova,petrutlucian94/nova_dev,ted-gould/nova,maelnor/nova,mmnelemane/nova,mandeepdhami/nova,barnsnake351/nova,akash1808/nova_test_latest,mahak/nova,klmitch/nova,blueboxgroup/nova,devendermishrajio/nova,tianweizhang/nova,Yusuke1987/openstack_template,noironetworks/nova,silenceli/nova,klmitch/nova,adelina-t/nova,eharney/nova,cyx1231st/nova,sridevikoushik31/nova,leilihh/nova,jianghuaw/nova,fajoy/nova,kimjaejoong/nova,citrix-openstack-build/nova,mmnelemane/nova,mandeepdhami/nova,barnsnake351/nova,klmitch/nova,jeffrey4l/nova,noironetworks/nova,fnordahl/nova,eharney/nova,ted-gould/nova,bigswitch/nova,CCI-MOC/nova,CiscoSystems/nova,sebrandon1/nova,JianyuWang/nova,sacharya/nova,cyx1231st/nova,projectcalico/calico-nova,takeshineshiro/nova,openstack/nova,nikesh-mahalka/nova,akash1808/nova_test_latest,vladikr/nova_drafts,maheshp/novatest,plumgrid/plumgrid-nova,CloudServer/nova,Juniper/nova,tianweizhang/nova,DirectXMan12/nova-hacking,LoHChina/nova,cloudbau/nova,devoid/nova,dawnpower/nova,devoid/nova,joker946/nova,blueboxgroup/nova,MountainWei/nova,bclau/nova,iuliat/nova,houshengbo/nova_vmware_compute_driver,angdraug/nova,shail2810/nova,sridevikoushik31/openstack,shail2810/nova,fnordahl/nova,maheshp/novatest,edulramirez/nova,CEG-FYP-OpenStack/scheduler,petrutlucian94/nova_dev,ruslanloman/nova,alaski/nova,double12gzh/nova,vmturbo/nova,ntt-sic/nova,OpenAcademy-OpenStack/nova-scheduler,rajalokan/nova,BeyondTheClouds/nova,gspilio/nova,rajalokan/nova,maoy/zknova,DirectXMan12/nova-hacking,zaina/nova,double12gzh/nova,Juniper/nova,mikalstill/nova,houshengbo/nova_vmware_compute_driver,apporc/nova,alexandrucoman/vbox-nova-driver,rahulunair/nova,shahar-stratoscale/nova,maelnor/nova,DirectXMan12/nova-hacking,isyippee/nova,zhimin711/nova,virtualopensystems/nova,Juniper/nova,redhat-openstack/nova,eonpatapon/nova,tealover/nova,devendermishrajio/nova_test_latest,rajalokan/nova,rickerc/nova_audit,OpenAcademy-OpenStack/nova-scheduler,mikalstill/nova,gooddata/openstack-nova,cernops/nova,JioCloud/nova_test_latest,projectcalico/calico-nova,CloudServer/nova,gooddata/openstack-nova,phenoxim/nova,bgxavier/nova,mikalstill/nova,vmturbo/nova,klmitch/nova,watonyweng/nova,j-carpentier/nova,mgagne/nova,SUSE-Cloud/nova,Brocade-OpenSource/OpenStack-DNRM-Nova,tudorvio/nova,thomasem/nova,Yuriy-Leonov/nova,phenoxim/nova,vladikr/nova_drafts,Francis-Liu/animated-broccoli,yosshy/nova,sebrandon1/nova,bclau/nova,badock/nova,apporc/nova,Metaswitch/calico-nova,rrader/nova-docker-plugin,devendermishrajio/nova,TwinkleChawla/nova,eonpatapon/nova,j-carpentier/nova,Tehsmash/nova,qwefi/nova,dstroppa/openstack-smartos-nova-grizzly,ewindisch/nova,alexandrucoman/vbox-nova-driver,mahak/nova,Yuriy-Leonov/nova,NeCTAR-RC/nova,bgxavier/nova,imsplitbit/nova,saleemjaveds/https-github.com-openstack-nova,houshengbo/nova_vmware_compute_driver,zzicewind/nova,sridevikoushik31/nova,TieWei/nova,zzicewind/nova
|
Rename instance_info_cache unique key constraints.
Rename (via drop and recreate) the instance_uuid unique
constraints on the instance_info_cache table so they reflect
the column change name (UUID conversion) we complete in Folsom.
Fixes LP Bug #1080837.
Change-Id: I9dd01ebc896c1d7b51c212980e48db16bad18dec
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import UniqueConstraint
from sqlalchemy import MetaData, Table
OLD_MYSQL_NAME = 'instance_id'
NEW_MYSQL_NAME = 'instance_uuid'
OLD_PG_NAME = 'instance_info_caches_instance_id_key'
NEW_PG_NAME = 'instance_info_caches_instance_uuid_key'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# NOTE(dprince): Rename the unique key constraints for both MySQL
# and PostgreSQL so they reflect the most recent UUID conversions
# from Folsom.
instance_info_caches = Table('instance_info_caches', meta, autoload=True)
if migrate_engine.name == "mysql":
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=NEW_MYSQL_NAME).create()
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=OLD_MYSQL_NAME).drop()
if migrate_engine.name == "postgresql":
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=NEW_PG_NAME).create()
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=OLD_PG_NAME).drop()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instance_info_caches = Table('instance_info_caches', meta, autoload=True)
if migrate_engine.name == "mysql":
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=OLD_MYSQL_NAME).create()
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=NEW_MYSQL_NAME).drop()
if migrate_engine.name == "postgresql":
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=OLD_PG_NAME).create()
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=NEW_PG_NAME).drop()
|
<commit_before><commit_msg>Rename instance_info_cache unique key constraints.
Rename (via drop and recreate) the instance_uuid unique
constraints on the instance_info_cache table so they reflect
the column change name (UUID conversion) we complete in Folsom.
Fixes LP Bug #1080837.
Change-Id: I9dd01ebc896c1d7b51c212980e48db16bad18dec<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import UniqueConstraint
from sqlalchemy import MetaData, Table
OLD_MYSQL_NAME = 'instance_id'
NEW_MYSQL_NAME = 'instance_uuid'
OLD_PG_NAME = 'instance_info_caches_instance_id_key'
NEW_PG_NAME = 'instance_info_caches_instance_uuid_key'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# NOTE(dprince): Rename the unique key constraints for both MySQL
# and PostgreSQL so they reflect the most recent UUID conversions
# from Folsom.
instance_info_caches = Table('instance_info_caches', meta, autoload=True)
if migrate_engine.name == "mysql":
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=NEW_MYSQL_NAME).create()
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=OLD_MYSQL_NAME).drop()
if migrate_engine.name == "postgresql":
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=NEW_PG_NAME).create()
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=OLD_PG_NAME).drop()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instance_info_caches = Table('instance_info_caches', meta, autoload=True)
if migrate_engine.name == "mysql":
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=OLD_MYSQL_NAME).create()
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=NEW_MYSQL_NAME).drop()
if migrate_engine.name == "postgresql":
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=OLD_PG_NAME).create()
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=NEW_PG_NAME).drop()
|
Rename instance_info_cache unique key constraints.
Rename (via drop and recreate) the instance_uuid unique
constraints on the instance_info_cache table so they reflect
the column change name (UUID conversion) we complete in Folsom.
Fixes LP Bug #1080837.
Change-Id: I9dd01ebc896c1d7b51c212980e48db16bad18dec# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import UniqueConstraint
from sqlalchemy import MetaData, Table
OLD_MYSQL_NAME = 'instance_id'
NEW_MYSQL_NAME = 'instance_uuid'
OLD_PG_NAME = 'instance_info_caches_instance_id_key'
NEW_PG_NAME = 'instance_info_caches_instance_uuid_key'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# NOTE(dprince): Rename the unique key constraints for both MySQL
# and PostgreSQL so they reflect the most recent UUID conversions
# from Folsom.
instance_info_caches = Table('instance_info_caches', meta, autoload=True)
if migrate_engine.name == "mysql":
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=NEW_MYSQL_NAME).create()
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=OLD_MYSQL_NAME).drop()
if migrate_engine.name == "postgresql":
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=NEW_PG_NAME).create()
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=OLD_PG_NAME).drop()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instance_info_caches = Table('instance_info_caches', meta, autoload=True)
if migrate_engine.name == "mysql":
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=OLD_MYSQL_NAME).create()
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=NEW_MYSQL_NAME).drop()
if migrate_engine.name == "postgresql":
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=OLD_PG_NAME).create()
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=NEW_PG_NAME).drop()
|
<commit_before><commit_msg>Rename instance_info_cache unique key constraints.
Rename (via drop and recreate) the instance_uuid unique
constraints on the instance_info_cache table so they reflect
the column change name (UUID conversion) we complete in Folsom.
Fixes LP Bug #1080837.
Change-Id: I9dd01ebc896c1d7b51c212980e48db16bad18dec<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import UniqueConstraint
from sqlalchemy import MetaData, Table
OLD_MYSQL_NAME = 'instance_id'
NEW_MYSQL_NAME = 'instance_uuid'
OLD_PG_NAME = 'instance_info_caches_instance_id_key'
NEW_PG_NAME = 'instance_info_caches_instance_uuid_key'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# NOTE(dprince): Rename the unique key constraints for both MySQL
# and PostgreSQL so they reflect the most recent UUID conversions
# from Folsom.
instance_info_caches = Table('instance_info_caches', meta, autoload=True)
if migrate_engine.name == "mysql":
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=NEW_MYSQL_NAME).create()
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=OLD_MYSQL_NAME).drop()
if migrate_engine.name == "postgresql":
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=NEW_PG_NAME).create()
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=OLD_PG_NAME).drop()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instance_info_caches = Table('instance_info_caches', meta, autoload=True)
if migrate_engine.name == "mysql":
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=OLD_MYSQL_NAME).create()
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=NEW_MYSQL_NAME).drop()
if migrate_engine.name == "postgresql":
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=OLD_PG_NAME).create()
UniqueConstraint('instance_uuid', table=instance_info_caches,
name=NEW_PG_NAME).drop()
|
|
41ab8dbe59cc3ffb41b352cf228fb62e00612d1b
|
test_bvg_cli.py
|
test_bvg_cli.py
|
import httpretty
from bvg_cli import BVG_URL
from bvg_cli import request_departures, request_station_ids
from html_dumps import DEPARTURE_HTML, STATION_HTML
@httpretty.activate
def test_request_station_server_error():
httpretty.register_uri(httpretty.GET, BVG_URL, status=500)
_, ok = request_station_ids('any station')
assert ok is False
@httpretty.activate
def test_request_departures_server_error():
httpretty.register_uri(httpretty.GET, BVG_URL, status=500)
_, ok = request_departures('any id', limit=10)
assert ok is False
@httpretty.activate
def test_parameter_station_name():
httpretty.register_uri(httpretty.GET, BVG_URL, status=201)
_, ok = request_station_ids('anystation')
request = httpretty.last_request()
assert b'input=anystation' in request.body
@httpretty.activate
def test_station_name():
httpretty.register_uri(httpretty.GET, BVG_URL, body=STATION_HTML)
stations, ok = request_station_ids('Weber')
assert ok is True
assert len(stations) == 8
assert len(stations[0]) == 2
@httpretty.activate
def test_parameter_limit():
httpretty.register_uri(httpretty.GET, BVG_URL, body=DEPARTURE_HTML)
_, ok = request_departures('any id', limit=2)
request = httpretty.last_request()
assert hasattr(request, 'querystring')
assert 'maxJourneys' in request.querystring
assert '2' in request.querystring['maxJourneys']
@httpretty.activate
def test_limit():
httpretty.register_uri(httpretty.GET, BVG_URL, body=DEPARTURE_HTML)
departures, ok = request_departures('9120025', limit=2)
assert ok is True
assert len(departures) == 2
|
Add tests with mocked http responses
|
Add tests with mocked http responses
|
Python
|
mit
|
behrtam/bvg-cli,behrtam/bvg-cli
|
Add tests with mocked http responses
|
import httpretty
from bvg_cli import BVG_URL
from bvg_cli import request_departures, request_station_ids
from html_dumps import DEPARTURE_HTML, STATION_HTML
@httpretty.activate
def test_request_station_server_error():
httpretty.register_uri(httpretty.GET, BVG_URL, status=500)
_, ok = request_station_ids('any station')
assert ok is False
@httpretty.activate
def test_request_departures_server_error():
httpretty.register_uri(httpretty.GET, BVG_URL, status=500)
_, ok = request_departures('any id', limit=10)
assert ok is False
@httpretty.activate
def test_parameter_station_name():
httpretty.register_uri(httpretty.GET, BVG_URL, status=201)
_, ok = request_station_ids('anystation')
request = httpretty.last_request()
assert b'input=anystation' in request.body
@httpretty.activate
def test_station_name():
httpretty.register_uri(httpretty.GET, BVG_URL, body=STATION_HTML)
stations, ok = request_station_ids('Weber')
assert ok is True
assert len(stations) == 8
assert len(stations[0]) == 2
@httpretty.activate
def test_parameter_limit():
httpretty.register_uri(httpretty.GET, BVG_URL, body=DEPARTURE_HTML)
_, ok = request_departures('any id', limit=2)
request = httpretty.last_request()
assert hasattr(request, 'querystring')
assert 'maxJourneys' in request.querystring
assert '2' in request.querystring['maxJourneys']
@httpretty.activate
def test_limit():
httpretty.register_uri(httpretty.GET, BVG_URL, body=DEPARTURE_HTML)
departures, ok = request_departures('9120025', limit=2)
assert ok is True
assert len(departures) == 2
|
<commit_before><commit_msg>Add tests with mocked http responses<commit_after>
|
import httpretty
from bvg_cli import BVG_URL
from bvg_cli import request_departures, request_station_ids
from html_dumps import DEPARTURE_HTML, STATION_HTML
@httpretty.activate
def test_request_station_server_error():
httpretty.register_uri(httpretty.GET, BVG_URL, status=500)
_, ok = request_station_ids('any station')
assert ok is False
@httpretty.activate
def test_request_departures_server_error():
httpretty.register_uri(httpretty.GET, BVG_URL, status=500)
_, ok = request_departures('any id', limit=10)
assert ok is False
@httpretty.activate
def test_parameter_station_name():
httpretty.register_uri(httpretty.GET, BVG_URL, status=201)
_, ok = request_station_ids('anystation')
request = httpretty.last_request()
assert b'input=anystation' in request.body
@httpretty.activate
def test_station_name():
httpretty.register_uri(httpretty.GET, BVG_URL, body=STATION_HTML)
stations, ok = request_station_ids('Weber')
assert ok is True
assert len(stations) == 8
assert len(stations[0]) == 2
@httpretty.activate
def test_parameter_limit():
httpretty.register_uri(httpretty.GET, BVG_URL, body=DEPARTURE_HTML)
_, ok = request_departures('any id', limit=2)
request = httpretty.last_request()
assert hasattr(request, 'querystring')
assert 'maxJourneys' in request.querystring
assert '2' in request.querystring['maxJourneys']
@httpretty.activate
def test_limit():
httpretty.register_uri(httpretty.GET, BVG_URL, body=DEPARTURE_HTML)
departures, ok = request_departures('9120025', limit=2)
assert ok is True
assert len(departures) == 2
|
Add tests with mocked http responsesimport httpretty
from bvg_cli import BVG_URL
from bvg_cli import request_departures, request_station_ids
from html_dumps import DEPARTURE_HTML, STATION_HTML
@httpretty.activate
def test_request_station_server_error():
httpretty.register_uri(httpretty.GET, BVG_URL, status=500)
_, ok = request_station_ids('any station')
assert ok is False
@httpretty.activate
def test_request_departures_server_error():
httpretty.register_uri(httpretty.GET, BVG_URL, status=500)
_, ok = request_departures('any id', limit=10)
assert ok is False
@httpretty.activate
def test_parameter_station_name():
httpretty.register_uri(httpretty.GET, BVG_URL, status=201)
_, ok = request_station_ids('anystation')
request = httpretty.last_request()
assert b'input=anystation' in request.body
@httpretty.activate
def test_station_name():
httpretty.register_uri(httpretty.GET, BVG_URL, body=STATION_HTML)
stations, ok = request_station_ids('Weber')
assert ok is True
assert len(stations) == 8
assert len(stations[0]) == 2
@httpretty.activate
def test_parameter_limit():
httpretty.register_uri(httpretty.GET, BVG_URL, body=DEPARTURE_HTML)
_, ok = request_departures('any id', limit=2)
request = httpretty.last_request()
assert hasattr(request, 'querystring')
assert 'maxJourneys' in request.querystring
assert '2' in request.querystring['maxJourneys']
@httpretty.activate
def test_limit():
httpretty.register_uri(httpretty.GET, BVG_URL, body=DEPARTURE_HTML)
departures, ok = request_departures('9120025', limit=2)
assert ok is True
assert len(departures) == 2
|
<commit_before><commit_msg>Add tests with mocked http responses<commit_after>import httpretty
from bvg_cli import BVG_URL
from bvg_cli import request_departures, request_station_ids
from html_dumps import DEPARTURE_HTML, STATION_HTML
@httpretty.activate
def test_request_station_server_error():
httpretty.register_uri(httpretty.GET, BVG_URL, status=500)
_, ok = request_station_ids('any station')
assert ok is False
@httpretty.activate
def test_request_departures_server_error():
httpretty.register_uri(httpretty.GET, BVG_URL, status=500)
_, ok = request_departures('any id', limit=10)
assert ok is False
@httpretty.activate
def test_parameter_station_name():
httpretty.register_uri(httpretty.GET, BVG_URL, status=201)
_, ok = request_station_ids('anystation')
request = httpretty.last_request()
assert b'input=anystation' in request.body
@httpretty.activate
def test_station_name():
httpretty.register_uri(httpretty.GET, BVG_URL, body=STATION_HTML)
stations, ok = request_station_ids('Weber')
assert ok is True
assert len(stations) == 8
assert len(stations[0]) == 2
@httpretty.activate
def test_parameter_limit():
httpretty.register_uri(httpretty.GET, BVG_URL, body=DEPARTURE_HTML)
_, ok = request_departures('any id', limit=2)
request = httpretty.last_request()
assert hasattr(request, 'querystring')
assert 'maxJourneys' in request.querystring
assert '2' in request.querystring['maxJourneys']
@httpretty.activate
def test_limit():
httpretty.register_uri(httpretty.GET, BVG_URL, body=DEPARTURE_HTML)
departures, ok = request_departures('9120025', limit=2)
assert ok is True
assert len(departures) == 2
|
|
ae02408361b2f6c426138799da756af56ada67f0
|
utils.py
|
utils.py
|
"""
Utility classes and funcitons for template mathcing framework.
"""
class BoundingBox(object):
def __init__(self, lr, ul):
self.lrx = lr[0]
self.lry = lr[1]
self.lr = lr
self.ulx = ul[0]
self.uly = ul[1]
self.ul = ul
self.llx = ul[0]
self.lly = lr[1]
self.ll = (self.llx, self.lly)
self.urx = lr[0]
self.ury = ul[1]
self.ur = (self.urx, self.ury)
self.corners = [self.ul, self.ll, self.ur, self.lr]
def contains(self, point):
intersect_x = point[0] >= self.ulx and point[0] <= self.lrx
intersect_y = point[1] >= self.lry and point[1] <= self.uly
return intersect_x and intersect_y
def intersects(self, bbox):
for corner in bbox.corners:
if self.contains(corner):
return True
return False
|
Move utilities to separate file
|
Move utilities to separate file
|
Python
|
mit
|
rmsare/scarplet,stgl/scarplet
|
Move utilities to separate file
|
"""
Utility classes and funcitons for template mathcing framework.
"""
class BoundingBox(object):
def __init__(self, lr, ul):
self.lrx = lr[0]
self.lry = lr[1]
self.lr = lr
self.ulx = ul[0]
self.uly = ul[1]
self.ul = ul
self.llx = ul[0]
self.lly = lr[1]
self.ll = (self.llx, self.lly)
self.urx = lr[0]
self.ury = ul[1]
self.ur = (self.urx, self.ury)
self.corners = [self.ul, self.ll, self.ur, self.lr]
def contains(self, point):
intersect_x = point[0] >= self.ulx and point[0] <= self.lrx
intersect_y = point[1] >= self.lry and point[1] <= self.uly
return intersect_x and intersect_y
def intersects(self, bbox):
for corner in bbox.corners:
if self.contains(corner):
return True
return False
|
<commit_before><commit_msg>Move utilities to separate file<commit_after>
|
"""
Utility classes and funcitons for template mathcing framework.
"""
class BoundingBox(object):
def __init__(self, lr, ul):
self.lrx = lr[0]
self.lry = lr[1]
self.lr = lr
self.ulx = ul[0]
self.uly = ul[1]
self.ul = ul
self.llx = ul[0]
self.lly = lr[1]
self.ll = (self.llx, self.lly)
self.urx = lr[0]
self.ury = ul[1]
self.ur = (self.urx, self.ury)
self.corners = [self.ul, self.ll, self.ur, self.lr]
def contains(self, point):
intersect_x = point[0] >= self.ulx and point[0] <= self.lrx
intersect_y = point[1] >= self.lry and point[1] <= self.uly
return intersect_x and intersect_y
def intersects(self, bbox):
for corner in bbox.corners:
if self.contains(corner):
return True
return False
|
Move utilities to separate file"""
Utility classes and funcitons for template mathcing framework.
"""
class BoundingBox(object):
def __init__(self, lr, ul):
self.lrx = lr[0]
self.lry = lr[1]
self.lr = lr
self.ulx = ul[0]
self.uly = ul[1]
self.ul = ul
self.llx = ul[0]
self.lly = lr[1]
self.ll = (self.llx, self.lly)
self.urx = lr[0]
self.ury = ul[1]
self.ur = (self.urx, self.ury)
self.corners = [self.ul, self.ll, self.ur, self.lr]
def contains(self, point):
intersect_x = point[0] >= self.ulx and point[0] <= self.lrx
intersect_y = point[1] >= self.lry and point[1] <= self.uly
return intersect_x and intersect_y
def intersects(self, bbox):
for corner in bbox.corners:
if self.contains(corner):
return True
return False
|
<commit_before><commit_msg>Move utilities to separate file<commit_after>"""
Utility classes and funcitons for template mathcing framework.
"""
class BoundingBox(object):
def __init__(self, lr, ul):
self.lrx = lr[0]
self.lry = lr[1]
self.lr = lr
self.ulx = ul[0]
self.uly = ul[1]
self.ul = ul
self.llx = ul[0]
self.lly = lr[1]
self.ll = (self.llx, self.lly)
self.urx = lr[0]
self.ury = ul[1]
self.ur = (self.urx, self.ury)
self.corners = [self.ul, self.ll, self.ur, self.lr]
def contains(self, point):
intersect_x = point[0] >= self.ulx and point[0] <= self.lrx
intersect_y = point[1] >= self.lry and point[1] <= self.uly
return intersect_x and intersect_y
def intersects(self, bbox):
for corner in bbox.corners:
if self.contains(corner):
return True
return False
|
|
f9988a1419e6adefa163ef0dd92c9f33f2ccf562
|
anchorhub/tests/test_main.py
|
anchorhub/tests/test_main.py
|
"""
test_main.py - Tests for main.py
main.py:
http://www.github.com/samjabrahams/anchorhub/main.py
"""
import anchorhub.main as main
def test_one():
"""
main.py: Test defaults with local directory as input.
"""
main.main(['.'])
|
Add basic tests for main.py
|
Add basic tests for main.py
Needs to be updated as main.py becomes complete.
|
Python
|
apache-2.0
|
samjabrahams/anchorhub
|
Add basic tests for main.py
Needs to be updated as main.py becomes complete.
|
"""
test_main.py - Tests for main.py
main.py:
http://www.github.com/samjabrahams/anchorhub/main.py
"""
import anchorhub.main as main
def test_one():
"""
main.py: Test defaults with local directory as input.
"""
main.main(['.'])
|
<commit_before><commit_msg>Add basic tests for main.py
Needs to be updated as main.py becomes complete.<commit_after>
|
"""
test_main.py - Tests for main.py
main.py:
http://www.github.com/samjabrahams/anchorhub/main.py
"""
import anchorhub.main as main
def test_one():
"""
main.py: Test defaults with local directory as input.
"""
main.main(['.'])
|
Add basic tests for main.py
Needs to be updated as main.py becomes complete."""
test_main.py - Tests for main.py
main.py:
http://www.github.com/samjabrahams/anchorhub/main.py
"""
import anchorhub.main as main
def test_one():
"""
main.py: Test defaults with local directory as input.
"""
main.main(['.'])
|
<commit_before><commit_msg>Add basic tests for main.py
Needs to be updated as main.py becomes complete.<commit_after>"""
test_main.py - Tests for main.py
main.py:
http://www.github.com/samjabrahams/anchorhub/main.py
"""
import anchorhub.main as main
def test_one():
"""
main.py: Test defaults with local directory as input.
"""
main.main(['.'])
|
|
bc781453ac14c58fa64f51b5ab91e78dd300621a
|
tests/devices_test/size_test.py
|
tests/devices_test/size_test.py
|
import unittest
from blivet.devices import StorageDevice
from blivet import errors
from blivet.formats import getFormat
from blivet.size import Size
class StorageDeviceSizeTest(unittest.TestCase):
def _getDevice(self, *args, **kwargs):
return StorageDevice(*args, **kwargs)
def testSizeSetter(self):
initial_size = Size('10 GiB')
new_size = Size('2 GiB')
##
## setter sets the size
##
dev = self._getDevice('sizetest', size=initial_size)
self.assertEqual(dev.size, initial_size)
dev.size = new_size
self.assertEqual(dev.size, new_size)
##
## setter raises exn if size outside of format limits
##
dev.format._maxSize = Size("5 GiB")
with self.assertRaises(errors.DeviceError):
dev.size = Size("6 GiB")
##
## new formats' min size is checked against device size
##
fmt = getFormat(None)
fmt._minSize = Size("10 GiB")
with self.assertRaises(errors.DeviceError):
dev.format = fmt
# the format assignment should succeed without the min size conflict
fmt._minSize = Size(0)
dev.format = fmt
##
## new formats' max size is checked against device size
##
fmt = getFormat(None)
fmt._maxSize = Size("10 MiB")
with self.assertRaises(errors.DeviceError):
dev.format = fmt
# the format assignment should succeed without the min size conflict
fmt._maxSize = Size(0)
dev.format = fmt
def testSizeGetter(self):
initial_size = Size("10 GiB")
new_size = Size("5 GiB")
dev = self._getDevice('sizetest', size=initial_size)
##
## getter returns the size in the basic case for non-existing devices
##
self.assertEqual(dev.size, initial_size)
# create a new device that exists
dev = self._getDevice('sizetest', size=initial_size, exists=True)
##
## getter returns the size in the basic case for existing devices
##
self.assertEqual(dev.size, initial_size)
##
## size does not reflect target size for non-resizable devices
##
# bypass the setter since the min/max will be the current size for a
# non-resizable device
dev._targetSize = new_size
self.assertEqual(dev.size, initial_size)
##
## getter returns target size when device is resizable and target size
## is non-zero
##
dev._resizable = True
dev.targetSize = new_size # verify that the target size setter works
self.assertEqual(dev.size, new_size)
self.assertEqual(dev.size, dev.targetSize)
self.assertNotEqual(dev._size, dev.targetSize)
##
## getter returns current size when device is resizable and target size
## is zero
##
dev.targetSize = Size(0)
self.assertEqual(dev.size, initial_size)
self.assertEqual(dev.size, dev.currentSize)
|
Add unit tests for device size setters and getters.
|
Add unit tests for device size setters and getters.
|
Python
|
lgpl-2.1
|
rvykydal/blivet,rvykydal/blivet,vojtechtrefny/blivet,vpodzime/blivet,vojtechtrefny/blivet,jkonecny12/blivet,AdamWill/blivet,rhinstaller/blivet,vpodzime/blivet,AdamWill/blivet,jkonecny12/blivet,rhinstaller/blivet
|
Add unit tests for device size setters and getters.
|
import unittest
from blivet.devices import StorageDevice
from blivet import errors
from blivet.formats import getFormat
from blivet.size import Size
class StorageDeviceSizeTest(unittest.TestCase):
def _getDevice(self, *args, **kwargs):
return StorageDevice(*args, **kwargs)
def testSizeSetter(self):
initial_size = Size('10 GiB')
new_size = Size('2 GiB')
##
## setter sets the size
##
dev = self._getDevice('sizetest', size=initial_size)
self.assertEqual(dev.size, initial_size)
dev.size = new_size
self.assertEqual(dev.size, new_size)
##
## setter raises exn if size outside of format limits
##
dev.format._maxSize = Size("5 GiB")
with self.assertRaises(errors.DeviceError):
dev.size = Size("6 GiB")
##
## new formats' min size is checked against device size
##
fmt = getFormat(None)
fmt._minSize = Size("10 GiB")
with self.assertRaises(errors.DeviceError):
dev.format = fmt
# the format assignment should succeed without the min size conflict
fmt._minSize = Size(0)
dev.format = fmt
##
## new formats' max size is checked against device size
##
fmt = getFormat(None)
fmt._maxSize = Size("10 MiB")
with self.assertRaises(errors.DeviceError):
dev.format = fmt
# the format assignment should succeed without the min size conflict
fmt._maxSize = Size(0)
dev.format = fmt
def testSizeGetter(self):
initial_size = Size("10 GiB")
new_size = Size("5 GiB")
dev = self._getDevice('sizetest', size=initial_size)
##
## getter returns the size in the basic case for non-existing devices
##
self.assertEqual(dev.size, initial_size)
# create a new device that exists
dev = self._getDevice('sizetest', size=initial_size, exists=True)
##
## getter returns the size in the basic case for existing devices
##
self.assertEqual(dev.size, initial_size)
##
## size does not reflect target size for non-resizable devices
##
# bypass the setter since the min/max will be the current size for a
# non-resizable device
dev._targetSize = new_size
self.assertEqual(dev.size, initial_size)
##
## getter returns target size when device is resizable and target size
## is non-zero
##
dev._resizable = True
dev.targetSize = new_size # verify that the target size setter works
self.assertEqual(dev.size, new_size)
self.assertEqual(dev.size, dev.targetSize)
self.assertNotEqual(dev._size, dev.targetSize)
##
## getter returns current size when device is resizable and target size
## is zero
##
dev.targetSize = Size(0)
self.assertEqual(dev.size, initial_size)
self.assertEqual(dev.size, dev.currentSize)
|
<commit_before><commit_msg>Add unit tests for device size setters and getters.<commit_after>
|
import unittest
from blivet.devices import StorageDevice
from blivet import errors
from blivet.formats import getFormat
from blivet.size import Size
class StorageDeviceSizeTest(unittest.TestCase):
def _getDevice(self, *args, **kwargs):
return StorageDevice(*args, **kwargs)
def testSizeSetter(self):
initial_size = Size('10 GiB')
new_size = Size('2 GiB')
##
## setter sets the size
##
dev = self._getDevice('sizetest', size=initial_size)
self.assertEqual(dev.size, initial_size)
dev.size = new_size
self.assertEqual(dev.size, new_size)
##
## setter raises exn if size outside of format limits
##
dev.format._maxSize = Size("5 GiB")
with self.assertRaises(errors.DeviceError):
dev.size = Size("6 GiB")
##
## new formats' min size is checked against device size
##
fmt = getFormat(None)
fmt._minSize = Size("10 GiB")
with self.assertRaises(errors.DeviceError):
dev.format = fmt
# the format assignment should succeed without the min size conflict
fmt._minSize = Size(0)
dev.format = fmt
##
## new formats' max size is checked against device size
##
fmt = getFormat(None)
fmt._maxSize = Size("10 MiB")
with self.assertRaises(errors.DeviceError):
dev.format = fmt
# the format assignment should succeed without the min size conflict
fmt._maxSize = Size(0)
dev.format = fmt
def testSizeGetter(self):
initial_size = Size("10 GiB")
new_size = Size("5 GiB")
dev = self._getDevice('sizetest', size=initial_size)
##
## getter returns the size in the basic case for non-existing devices
##
self.assertEqual(dev.size, initial_size)
# create a new device that exists
dev = self._getDevice('sizetest', size=initial_size, exists=True)
##
## getter returns the size in the basic case for existing devices
##
self.assertEqual(dev.size, initial_size)
##
## size does not reflect target size for non-resizable devices
##
# bypass the setter since the min/max will be the current size for a
# non-resizable device
dev._targetSize = new_size
self.assertEqual(dev.size, initial_size)
##
## getter returns target size when device is resizable and target size
## is non-zero
##
dev._resizable = True
dev.targetSize = new_size # verify that the target size setter works
self.assertEqual(dev.size, new_size)
self.assertEqual(dev.size, dev.targetSize)
self.assertNotEqual(dev._size, dev.targetSize)
##
## getter returns current size when device is resizable and target size
## is zero
##
dev.targetSize = Size(0)
self.assertEqual(dev.size, initial_size)
self.assertEqual(dev.size, dev.currentSize)
|
Add unit tests for device size setters and getters.
import unittest
from blivet.devices import StorageDevice
from blivet import errors
from blivet.formats import getFormat
from blivet.size import Size
class StorageDeviceSizeTest(unittest.TestCase):
def _getDevice(self, *args, **kwargs):
return StorageDevice(*args, **kwargs)
def testSizeSetter(self):
initial_size = Size('10 GiB')
new_size = Size('2 GiB')
##
## setter sets the size
##
dev = self._getDevice('sizetest', size=initial_size)
self.assertEqual(dev.size, initial_size)
dev.size = new_size
self.assertEqual(dev.size, new_size)
##
## setter raises exn if size outside of format limits
##
dev.format._maxSize = Size("5 GiB")
with self.assertRaises(errors.DeviceError):
dev.size = Size("6 GiB")
##
## new formats' min size is checked against device size
##
fmt = getFormat(None)
fmt._minSize = Size("10 GiB")
with self.assertRaises(errors.DeviceError):
dev.format = fmt
# the format assignment should succeed without the min size conflict
fmt._minSize = Size(0)
dev.format = fmt
##
## new formats' max size is checked against device size
##
fmt = getFormat(None)
fmt._maxSize = Size("10 MiB")
with self.assertRaises(errors.DeviceError):
dev.format = fmt
# the format assignment should succeed without the min size conflict
fmt._maxSize = Size(0)
dev.format = fmt
def testSizeGetter(self):
initial_size = Size("10 GiB")
new_size = Size("5 GiB")
dev = self._getDevice('sizetest', size=initial_size)
##
## getter returns the size in the basic case for non-existing devices
##
self.assertEqual(dev.size, initial_size)
# create a new device that exists
dev = self._getDevice('sizetest', size=initial_size, exists=True)
##
## getter returns the size in the basic case for existing devices
##
self.assertEqual(dev.size, initial_size)
##
## size does not reflect target size for non-resizable devices
##
# bypass the setter since the min/max will be the current size for a
# non-resizable device
dev._targetSize = new_size
self.assertEqual(dev.size, initial_size)
##
## getter returns target size when device is resizable and target size
## is non-zero
##
dev._resizable = True
dev.targetSize = new_size # verify that the target size setter works
self.assertEqual(dev.size, new_size)
self.assertEqual(dev.size, dev.targetSize)
self.assertNotEqual(dev._size, dev.targetSize)
##
## getter returns current size when device is resizable and target size
## is zero
##
dev.targetSize = Size(0)
self.assertEqual(dev.size, initial_size)
self.assertEqual(dev.size, dev.currentSize)
|
<commit_before><commit_msg>Add unit tests for device size setters and getters.<commit_after>
import unittest
from blivet.devices import StorageDevice
from blivet import errors
from blivet.formats import getFormat
from blivet.size import Size
class StorageDeviceSizeTest(unittest.TestCase):
def _getDevice(self, *args, **kwargs):
return StorageDevice(*args, **kwargs)
def testSizeSetter(self):
initial_size = Size('10 GiB')
new_size = Size('2 GiB')
##
## setter sets the size
##
dev = self._getDevice('sizetest', size=initial_size)
self.assertEqual(dev.size, initial_size)
dev.size = new_size
self.assertEqual(dev.size, new_size)
##
## setter raises exn if size outside of format limits
##
dev.format._maxSize = Size("5 GiB")
with self.assertRaises(errors.DeviceError):
dev.size = Size("6 GiB")
##
## new formats' min size is checked against device size
##
fmt = getFormat(None)
fmt._minSize = Size("10 GiB")
with self.assertRaises(errors.DeviceError):
dev.format = fmt
# the format assignment should succeed without the min size conflict
fmt._minSize = Size(0)
dev.format = fmt
##
## new formats' max size is checked against device size
##
fmt = getFormat(None)
fmt._maxSize = Size("10 MiB")
with self.assertRaises(errors.DeviceError):
dev.format = fmt
# the format assignment should succeed without the min size conflict
fmt._maxSize = Size(0)
dev.format = fmt
def testSizeGetter(self):
initial_size = Size("10 GiB")
new_size = Size("5 GiB")
dev = self._getDevice('sizetest', size=initial_size)
##
## getter returns the size in the basic case for non-existing devices
##
self.assertEqual(dev.size, initial_size)
# create a new device that exists
dev = self._getDevice('sizetest', size=initial_size, exists=True)
##
## getter returns the size in the basic case for existing devices
##
self.assertEqual(dev.size, initial_size)
##
## size does not reflect target size for non-resizable devices
##
# bypass the setter since the min/max will be the current size for a
# non-resizable device
dev._targetSize = new_size
self.assertEqual(dev.size, initial_size)
##
## getter returns target size when device is resizable and target size
## is non-zero
##
dev._resizable = True
dev.targetSize = new_size # verify that the target size setter works
self.assertEqual(dev.size, new_size)
self.assertEqual(dev.size, dev.targetSize)
self.assertNotEqual(dev._size, dev.targetSize)
##
## getter returns current size when device is resizable and target size
## is zero
##
dev.targetSize = Size(0)
self.assertEqual(dev.size, initial_size)
self.assertEqual(dev.size, dev.currentSize)
|
|
685e2c52e2e8cd7c2f691cd3c9a25704a65fbc5b
|
tools/git_hooks/find_duplicates.py
|
tools/git_hooks/find_duplicates.py
|
from os import walk
from os.path import join
from argparse import ArgumentParser
if __name__ == "__main__":
parser = ArgumentParser("Find duplicate file names within a directory structure")
parser.add_argument("dirs", help="Directories to search for duplicate file names"
, nargs="*")
parser.add_argument("--silent", help="Supress printing of filenames, just return number of duplicates", action="store_true")
args = parser.parse_args()
scanned_files = {}
for dir in args.dirs:
for root, dirs, files in walk(dir):
for file in files:
scanned_files.setdefault(file, [])
scanned_files[file].append(join(root, file))
count_dupe = 0
for key, value in scanned_files.iteritems():
if len(value) > 1:
count_dupe += 1
if not args.silent:
print("Multiple files found with name {}".format(key))
for file in value:
print(" {}".format(file))
exit(count_dupe)
|
Add a git hook script that will list all duplicate files within the directories specified
|
Add a git hook script that will list all duplicate files within the directories specified
|
Python
|
apache-2.0
|
cvtsi2sd/mbed-os,mazimkhan/mbed-os,pradeep-gr/mbed-os5-onsemi,c1728p9/mbed-os,fanghuaqi/mbed,kjbracey-arm/mbed,mikaleppanen/mbed-os,NXPmicro/mbed,mbedmicro/mbed,adamgreen/mbed,bcostm/mbed-os,nvlsianpu/mbed,Archcady/mbed-os,svogl/mbed-os,adamgreen/mbed,arostm/mbed-os,mmorenobarm/mbed-os,screamerbg/mbed,mikaleppanen/mbed-os,betzw/mbed-os,screamerbg/mbed,ryankurte/mbed-os,nRFMesh/mbed-os,fanghuaqi/mbed,svogl/mbed-os,maximmbed/mbed,infinnovation/mbed-os,adamgreen/mbed,kl-cruz/mbed-os,YarivCol/mbed-os,theotherjimmy/mbed,bulislaw/mbed-os,andcor02/mbed-os,mbedmicro/mbed,theotherjimmy/mbed,adamgreen/mbed,ryankurte/mbed-os,CalSol/mbed,nvlsianpu/mbed,nvlsianpu/mbed,svogl/mbed-os,cvtsi2sd/mbed-os,nRFMesh/mbed-os,bcostm/mbed-os,NXPmicro/mbed,fahhem/mbed-os,catiedev/mbed-os,pradeep-gr/mbed-os5-onsemi,theotherjimmy/mbed,CalSol/mbed,infinnovation/mbed-os,fanghuaqi/mbed,mazimkhan/mbed-os,andcor02/mbed-os,YarivCol/mbed-os,j-greffe/mbed-os,andcor02/mbed-os,fahhem/mbed-os,netzimme/mbed-os,arostm/mbed-os,radhika-raghavendran/mbed-os5.1-onsemi,pradeep-gr/mbed-os5-onsemi,maximmbed/mbed,adustm/mbed,fahhem/mbed-os,monkiineko/mbed-os,kl-cruz/mbed-os,RonEld/mbed,mazimkhan/mbed-os,j-greffe/mbed-os,monkiineko/mbed-os,netzimme/mbed-os,pradeep-gr/mbed-os5-onsemi,j-greffe/mbed-os,YarivCol/mbed-os,betzw/mbed-os,j-greffe/mbed-os,Archcady/mbed-os,mbedmicro/mbed,c1728p9/mbed-os,radhika-raghavendran/mbed-os5.1-onsemi,radhika-raghavendran/mbed-os5.1-onsemi,kjbracey-arm/mbed,maximmbed/mbed,NXPmicro/mbed,kl-cruz/mbed-os,adustm/mbed,bulislaw/mbed-os,mmorenobarm/mbed-os,HeadsUpDisplayInc/mbed,kl-cruz/mbed-os,catiedev/mbed-os,mbedmicro/mbed,nRFMesh/mbed-os,mikaleppanen/mbed-os,netzimme/mbed-os,kjbracey-arm/mbed,bcostm/mbed-os,Archcady/mbed-os,svogl/mbed-os,fahhem/mbed-os,bulislaw/mbed-os,screamerbg/mbed,kl-cruz/mbed-os,andcor02/mbed-os,infinnovation/mbed-os,bcostm/mbed-os,monkiineko/mbed-os,HeadsUpDisplayInc/mbed,andcor02/mbed-os,CalSol/mbed,c1728p9/mbed-os,RonEld/mbed,YarivCol/mbed-os,ryankurte/mbed-os,karsev/mbed-os,maximmbed/mbed,kl-cruz/mbed-os,ryankurte/mbed-os,nRFMesh/mbed-os,catiedev/mbed-os,NXPmicro/mbed,HeadsUpDisplayInc/mbed,CalSol/mbed,theotherjimmy/mbed,betzw/mbed-os,cvtsi2sd/mbed-os,betzw/mbed-os,betzw/mbed-os,nvlsianpu/mbed,ryankurte/mbed-os,bcostm/mbed-os,adamgreen/mbed,YarivCol/mbed-os,HeadsUpDisplayInc/mbed,CalSol/mbed,Archcady/mbed-os,mmorenobarm/mbed-os,monkiineko/mbed-os,nRFMesh/mbed-os,YarivCol/mbed-os,c1728p9/mbed-os,cvtsi2sd/mbed-os,kjbracey-arm/mbed,Archcady/mbed-os,bulislaw/mbed-os,catiedev/mbed-os,fanghuaqi/mbed,karsev/mbed-os,radhika-raghavendran/mbed-os5.1-onsemi,infinnovation/mbed-os,radhika-raghavendran/mbed-os5.1-onsemi,screamerbg/mbed,mazimkhan/mbed-os,bulislaw/mbed-os,karsev/mbed-os,cvtsi2sd/mbed-os,c1728p9/mbed-os,netzimme/mbed-os,catiedev/mbed-os,nRFMesh/mbed-os,netzimme/mbed-os,svogl/mbed-os,monkiineko/mbed-os,karsev/mbed-os,HeadsUpDisplayInc/mbed,NXPmicro/mbed,pradeep-gr/mbed-os5-onsemi,maximmbed/mbed,netzimme/mbed-os,theotherjimmy/mbed,NXPmicro/mbed,mazimkhan/mbed-os,CalSol/mbed,RonEld/mbed,bcostm/mbed-os,j-greffe/mbed-os,RonEld/mbed,mmorenobarm/mbed-os,ryankurte/mbed-os,fanghuaqi/mbed,j-greffe/mbed-os,screamerbg/mbed,screamerbg/mbed,adustm/mbed,HeadsUpDisplayInc/mbed,nvlsianpu/mbed,mazimkhan/mbed-os,catiedev/mbed-os,bulislaw/mbed-os,arostm/mbed-os,adustm/mbed,mbedmicro/mbed,infinnovation/mbed-os,cvtsi2sd/mbed-os,fahhem/mbed-os,arostm/mbed-os,RonEld/mbed,adustm/mbed,radhika-raghavendran/mbed-os5.1-onsemi,infinnovation/mbed-os,mikaleppanen/mbed-os,RonEld/mbed,karsev/mbed-os,pradeep-gr/mbed-os5-onsemi,arostm/mbed-os,mmorenobarm/mbed-os,theotherjimmy/mbed,monkiineko/mbed-os,betzw/mbed-os,mikaleppanen/mbed-os,nvlsianpu/mbed,adamgreen/mbed,Archcady/mbed-os,mikaleppanen/mbed-os,andcor02/mbed-os,c1728p9/mbed-os,fahhem/mbed-os,maximmbed/mbed,mmorenobarm/mbed-os,svogl/mbed-os,adustm/mbed,arostm/mbed-os,karsev/mbed-os
|
Add a git hook script that will list all duplicate files within the directories specified
|
from os import walk
from os.path import join
from argparse import ArgumentParser
if __name__ == "__main__":
parser = ArgumentParser("Find duplicate file names within a directory structure")
parser.add_argument("dirs", help="Directories to search for duplicate file names"
, nargs="*")
parser.add_argument("--silent", help="Supress printing of filenames, just return number of duplicates", action="store_true")
args = parser.parse_args()
scanned_files = {}
for dir in args.dirs:
for root, dirs, files in walk(dir):
for file in files:
scanned_files.setdefault(file, [])
scanned_files[file].append(join(root, file))
count_dupe = 0
for key, value in scanned_files.iteritems():
if len(value) > 1:
count_dupe += 1
if not args.silent:
print("Multiple files found with name {}".format(key))
for file in value:
print(" {}".format(file))
exit(count_dupe)
|
<commit_before><commit_msg>Add a git hook script that will list all duplicate files within the directories specified<commit_after>
|
from os import walk
from os.path import join
from argparse import ArgumentParser
if __name__ == "__main__":
parser = ArgumentParser("Find duplicate file names within a directory structure")
parser.add_argument("dirs", help="Directories to search for duplicate file names"
, nargs="*")
parser.add_argument("--silent", help="Supress printing of filenames, just return number of duplicates", action="store_true")
args = parser.parse_args()
scanned_files = {}
for dir in args.dirs:
for root, dirs, files in walk(dir):
for file in files:
scanned_files.setdefault(file, [])
scanned_files[file].append(join(root, file))
count_dupe = 0
for key, value in scanned_files.iteritems():
if len(value) > 1:
count_dupe += 1
if not args.silent:
print("Multiple files found with name {}".format(key))
for file in value:
print(" {}".format(file))
exit(count_dupe)
|
Add a git hook script that will list all duplicate files within the directories specifiedfrom os import walk
from os.path import join
from argparse import ArgumentParser
if __name__ == "__main__":
parser = ArgumentParser("Find duplicate file names within a directory structure")
parser.add_argument("dirs", help="Directories to search for duplicate file names"
, nargs="*")
parser.add_argument("--silent", help="Supress printing of filenames, just return number of duplicates", action="store_true")
args = parser.parse_args()
scanned_files = {}
for dir in args.dirs:
for root, dirs, files in walk(dir):
for file in files:
scanned_files.setdefault(file, [])
scanned_files[file].append(join(root, file))
count_dupe = 0
for key, value in scanned_files.iteritems():
if len(value) > 1:
count_dupe += 1
if not args.silent:
print("Multiple files found with name {}".format(key))
for file in value:
print(" {}".format(file))
exit(count_dupe)
|
<commit_before><commit_msg>Add a git hook script that will list all duplicate files within the directories specified<commit_after>from os import walk
from os.path import join
from argparse import ArgumentParser
if __name__ == "__main__":
parser = ArgumentParser("Find duplicate file names within a directory structure")
parser.add_argument("dirs", help="Directories to search for duplicate file names"
, nargs="*")
parser.add_argument("--silent", help="Supress printing of filenames, just return number of duplicates", action="store_true")
args = parser.parse_args()
scanned_files = {}
for dir in args.dirs:
for root, dirs, files in walk(dir):
for file in files:
scanned_files.setdefault(file, [])
scanned_files[file].append(join(root, file))
count_dupe = 0
for key, value in scanned_files.iteritems():
if len(value) > 1:
count_dupe += 1
if not args.silent:
print("Multiple files found with name {}".format(key))
for file in value:
print(" {}".format(file))
exit(count_dupe)
|
|
0da1fe90725bdb1729ee8835886e86756a4a91f8
|
win/data/tools/make-portable-python.py
|
win/data/tools/make-portable-python.py
|
'''Module that when called from the command line will go through all
files in the Scripts directory of the current python and will change the
python path hardcoded in these files from a full python path, to just
python.exe.
'''
import sys
import os
from os import listdir, remove, rename
from os.path import dirname, join
from re import compile, sub
from tempfile import mkstemp
from shutil import copystat
pypat = compile('#!(.+?)python.exe')
def make_portable():
scripts = join(dirname(sys.executable), 'Scripts')
for fname in listdir(scripts):
f = join(scripts, fname)
with open(f, 'rb') as fh:
old = fh.read()
new = sub(pypat, '#!python.exe', old)
if old == new:
continue
fd, tmp = mkstemp(prefix=fname, dir=scripts)
os.close(fd)
try:
with open(tmp, 'wb') as fh:
fh.write(new)
except:
print("Couldn't write {}".format(tmp))
continue
copystat(f, tmp)
try:
remove(f)
except:
print("Couldn't remove {}".format(f))
continue
rename(tmp, f)
print('Updated {}'.format(f))
if __name__ == '__main__':
make_portable()
|
Add make python portable script.
|
Add make python portable script.
|
Python
|
mit
|
kivy/kivy-sdk-packager,kivy/kivy-sdk-packager
|
Add make python portable script.
|
'''Module that when called from the command line will go through all
files in the Scripts directory of the current python and will change the
python path hardcoded in these files from a full python path, to just
python.exe.
'''
import sys
import os
from os import listdir, remove, rename
from os.path import dirname, join
from re import compile, sub
from tempfile import mkstemp
from shutil import copystat
pypat = compile('#!(.+?)python.exe')
def make_portable():
scripts = join(dirname(sys.executable), 'Scripts')
for fname in listdir(scripts):
f = join(scripts, fname)
with open(f, 'rb') as fh:
old = fh.read()
new = sub(pypat, '#!python.exe', old)
if old == new:
continue
fd, tmp = mkstemp(prefix=fname, dir=scripts)
os.close(fd)
try:
with open(tmp, 'wb') as fh:
fh.write(new)
except:
print("Couldn't write {}".format(tmp))
continue
copystat(f, tmp)
try:
remove(f)
except:
print("Couldn't remove {}".format(f))
continue
rename(tmp, f)
print('Updated {}'.format(f))
if __name__ == '__main__':
make_portable()
|
<commit_before><commit_msg>Add make python portable script.<commit_after>
|
'''Module that when called from the command line will go through all
files in the Scripts directory of the current python and will change the
python path hardcoded in these files from a full python path, to just
python.exe.
'''
import sys
import os
from os import listdir, remove, rename
from os.path import dirname, join
from re import compile, sub
from tempfile import mkstemp
from shutil import copystat
pypat = compile('#!(.+?)python.exe')
def make_portable():
scripts = join(dirname(sys.executable), 'Scripts')
for fname in listdir(scripts):
f = join(scripts, fname)
with open(f, 'rb') as fh:
old = fh.read()
new = sub(pypat, '#!python.exe', old)
if old == new:
continue
fd, tmp = mkstemp(prefix=fname, dir=scripts)
os.close(fd)
try:
with open(tmp, 'wb') as fh:
fh.write(new)
except:
print("Couldn't write {}".format(tmp))
continue
copystat(f, tmp)
try:
remove(f)
except:
print("Couldn't remove {}".format(f))
continue
rename(tmp, f)
print('Updated {}'.format(f))
if __name__ == '__main__':
make_portable()
|
Add make python portable script.'''Module that when called from the command line will go through all
files in the Scripts directory of the current python and will change the
python path hardcoded in these files from a full python path, to just
python.exe.
'''
import sys
import os
from os import listdir, remove, rename
from os.path import dirname, join
from re import compile, sub
from tempfile import mkstemp
from shutil import copystat
pypat = compile('#!(.+?)python.exe')
def make_portable():
scripts = join(dirname(sys.executable), 'Scripts')
for fname in listdir(scripts):
f = join(scripts, fname)
with open(f, 'rb') as fh:
old = fh.read()
new = sub(pypat, '#!python.exe', old)
if old == new:
continue
fd, tmp = mkstemp(prefix=fname, dir=scripts)
os.close(fd)
try:
with open(tmp, 'wb') as fh:
fh.write(new)
except:
print("Couldn't write {}".format(tmp))
continue
copystat(f, tmp)
try:
remove(f)
except:
print("Couldn't remove {}".format(f))
continue
rename(tmp, f)
print('Updated {}'.format(f))
if __name__ == '__main__':
make_portable()
|
<commit_before><commit_msg>Add make python portable script.<commit_after>'''Module that when called from the command line will go through all
files in the Scripts directory of the current python and will change the
python path hardcoded in these files from a full python path, to just
python.exe.
'''
import sys
import os
from os import listdir, remove, rename
from os.path import dirname, join
from re import compile, sub
from tempfile import mkstemp
from shutil import copystat
pypat = compile('#!(.+?)python.exe')
def make_portable():
scripts = join(dirname(sys.executable), 'Scripts')
for fname in listdir(scripts):
f = join(scripts, fname)
with open(f, 'rb') as fh:
old = fh.read()
new = sub(pypat, '#!python.exe', old)
if old == new:
continue
fd, tmp = mkstemp(prefix=fname, dir=scripts)
os.close(fd)
try:
with open(tmp, 'wb') as fh:
fh.write(new)
except:
print("Couldn't write {}".format(tmp))
continue
copystat(f, tmp)
try:
remove(f)
except:
print("Couldn't remove {}".format(f))
continue
rename(tmp, f)
print('Updated {}'.format(f))
if __name__ == '__main__':
make_portable()
|
|
a5e244b630fdd8220ebb1f5228aab20e3d44ca41
|
monty/fnmatch.py
|
monty/fnmatch.py
|
# coding: utf-8
"""This module provides support for Unix shell-style wildcards"""
from __future__ import unicode_literals, absolute_import
import fnmatch
from monty.string import list_strings
class WildCard(object):
"""
This object provides an easy-to-use interface for filename matching with
shell patterns (fnmatch).
>>> w = WildCard("*.nc|*.pdf")
>>> w.filter(["foo.nc", "bar.pdf", "hello.txt"])
['foo.nc', 'bar.pdf']
>>> w.filter("foo.nc")
['foo.nc']
"""
def __init__(self, wildcard, sep="|"):
"""
Initializes a WildCard.
Args:
wildcard (str): String of tokens separated by sep. Each token
represents a pattern.
sep (str): Separator for shell patterns.
"""
self.pats = ["*"]
if wildcard:
self.pats = wildcard.split(sep)
def __str__(self):
return "<%s, patterns = %s>" % (self.__class__.__name__, self.pats)
def filter(self, names):
"""
Returns a list with the names matching the pattern.
"""
names = list_strings(names)
fnames = []
for f in names:
for pat in self.pats:
if fnmatch.fnmatch(f, pat):
fnames.append(f)
return fnames
def match(self, name):
"""
Returns True if name matches one of the patterns.
"""
for pat in self.pats:
if fnmatch.fnmatch(name, pat):
return True
return False
|
Revert "Remove WildCard module. Functionality is already available in glob. Not sure why"
|
Revert "Remove WildCard module. Functionality is already available in glob. Not sure why"
This reverts commit 44502648df007fd32d5818f2ac0d1df98d9b4550.
|
Python
|
mit
|
gmatteo/monty,gmatteo/monty,materialsvirtuallab/monty,davidwaroquiers/monty,davidwaroquiers/monty,materialsvirtuallab/monty
|
Revert "Remove WildCard module. Functionality is already available in glob. Not sure why"
This reverts commit 44502648df007fd32d5818f2ac0d1df98d9b4550.
|
# coding: utf-8
"""This module provides support for Unix shell-style wildcards"""
from __future__ import unicode_literals, absolute_import
import fnmatch
from monty.string import list_strings
class WildCard(object):
"""
This object provides an easy-to-use interface for filename matching with
shell patterns (fnmatch).
>>> w = WildCard("*.nc|*.pdf")
>>> w.filter(["foo.nc", "bar.pdf", "hello.txt"])
['foo.nc', 'bar.pdf']
>>> w.filter("foo.nc")
['foo.nc']
"""
def __init__(self, wildcard, sep="|"):
"""
Initializes a WildCard.
Args:
wildcard (str): String of tokens separated by sep. Each token
represents a pattern.
sep (str): Separator for shell patterns.
"""
self.pats = ["*"]
if wildcard:
self.pats = wildcard.split(sep)
def __str__(self):
return "<%s, patterns = %s>" % (self.__class__.__name__, self.pats)
def filter(self, names):
"""
Returns a list with the names matching the pattern.
"""
names = list_strings(names)
fnames = []
for f in names:
for pat in self.pats:
if fnmatch.fnmatch(f, pat):
fnames.append(f)
return fnames
def match(self, name):
"""
Returns True if name matches one of the patterns.
"""
for pat in self.pats:
if fnmatch.fnmatch(name, pat):
return True
return False
|
<commit_before><commit_msg>Revert "Remove WildCard module. Functionality is already available in glob. Not sure why"
This reverts commit 44502648df007fd32d5818f2ac0d1df98d9b4550.<commit_after>
|
# coding: utf-8
"""This module provides support for Unix shell-style wildcards"""
from __future__ import unicode_literals, absolute_import
import fnmatch
from monty.string import list_strings
class WildCard(object):
"""
This object provides an easy-to-use interface for filename matching with
shell patterns (fnmatch).
>>> w = WildCard("*.nc|*.pdf")
>>> w.filter(["foo.nc", "bar.pdf", "hello.txt"])
['foo.nc', 'bar.pdf']
>>> w.filter("foo.nc")
['foo.nc']
"""
def __init__(self, wildcard, sep="|"):
"""
Initializes a WildCard.
Args:
wildcard (str): String of tokens separated by sep. Each token
represents a pattern.
sep (str): Separator for shell patterns.
"""
self.pats = ["*"]
if wildcard:
self.pats = wildcard.split(sep)
def __str__(self):
return "<%s, patterns = %s>" % (self.__class__.__name__, self.pats)
def filter(self, names):
"""
Returns a list with the names matching the pattern.
"""
names = list_strings(names)
fnames = []
for f in names:
for pat in self.pats:
if fnmatch.fnmatch(f, pat):
fnames.append(f)
return fnames
def match(self, name):
"""
Returns True if name matches one of the patterns.
"""
for pat in self.pats:
if fnmatch.fnmatch(name, pat):
return True
return False
|
Revert "Remove WildCard module. Functionality is already available in glob. Not sure why"
This reverts commit 44502648df007fd32d5818f2ac0d1df98d9b4550.# coding: utf-8
"""This module provides support for Unix shell-style wildcards"""
from __future__ import unicode_literals, absolute_import
import fnmatch
from monty.string import list_strings
class WildCard(object):
"""
This object provides an easy-to-use interface for filename matching with
shell patterns (fnmatch).
>>> w = WildCard("*.nc|*.pdf")
>>> w.filter(["foo.nc", "bar.pdf", "hello.txt"])
['foo.nc', 'bar.pdf']
>>> w.filter("foo.nc")
['foo.nc']
"""
def __init__(self, wildcard, sep="|"):
"""
Initializes a WildCard.
Args:
wildcard (str): String of tokens separated by sep. Each token
represents a pattern.
sep (str): Separator for shell patterns.
"""
self.pats = ["*"]
if wildcard:
self.pats = wildcard.split(sep)
def __str__(self):
return "<%s, patterns = %s>" % (self.__class__.__name__, self.pats)
def filter(self, names):
"""
Returns a list with the names matching the pattern.
"""
names = list_strings(names)
fnames = []
for f in names:
for pat in self.pats:
if fnmatch.fnmatch(f, pat):
fnames.append(f)
return fnames
def match(self, name):
"""
Returns True if name matches one of the patterns.
"""
for pat in self.pats:
if fnmatch.fnmatch(name, pat):
return True
return False
|
<commit_before><commit_msg>Revert "Remove WildCard module. Functionality is already available in glob. Not sure why"
This reverts commit 44502648df007fd32d5818f2ac0d1df98d9b4550.<commit_after># coding: utf-8
"""This module provides support for Unix shell-style wildcards"""
from __future__ import unicode_literals, absolute_import
import fnmatch
from monty.string import list_strings
class WildCard(object):
"""
This object provides an easy-to-use interface for filename matching with
shell patterns (fnmatch).
>>> w = WildCard("*.nc|*.pdf")
>>> w.filter(["foo.nc", "bar.pdf", "hello.txt"])
['foo.nc', 'bar.pdf']
>>> w.filter("foo.nc")
['foo.nc']
"""
def __init__(self, wildcard, sep="|"):
"""
Initializes a WildCard.
Args:
wildcard (str): String of tokens separated by sep. Each token
represents a pattern.
sep (str): Separator for shell patterns.
"""
self.pats = ["*"]
if wildcard:
self.pats = wildcard.split(sep)
def __str__(self):
return "<%s, patterns = %s>" % (self.__class__.__name__, self.pats)
def filter(self, names):
"""
Returns a list with the names matching the pattern.
"""
names = list_strings(names)
fnames = []
for f in names:
for pat in self.pats:
if fnmatch.fnmatch(f, pat):
fnames.append(f)
return fnames
def match(self, name):
"""
Returns True if name matches one of the patterns.
"""
for pat in self.pats:
if fnmatch.fnmatch(name, pat):
return True
return False
|
|
4ca382fa691ae91f531a238df9ba13234b311b55
|
utils/remove_limit.py
|
utils/remove_limit.py
|
#!/usr/bin/env python
"""
Utility to remove limit warnings from Filter API output.
If --warnings was used, you will have the following in output:
{"limit": {"track": 2530, "timestamp_ms": "1482168932301"}}
This utility removes any limit warnings from output.
Usage:
remove_limit.py aleppo.json > aleppo_no_warnings.json
"""
from __future__ import print_function
import sys
import json
import fileinput
limit_breaker = '{"limit": {"track":'
for line in fileinput.input():
if limit_breaker not in line:
print(json.dumps(line))
|
Add utility to remove limit warnings.
|
Add utility to remove limit warnings.
|
Python
|
cc0-1.0
|
remagio/twarc,DocNow/twarc,remagio/twarc,edsu/twarc,hugovk/twarc
|
Add utility to remove limit warnings.
|
#!/usr/bin/env python
"""
Utility to remove limit warnings from Filter API output.
If --warnings was used, you will have the following in output:
{"limit": {"track": 2530, "timestamp_ms": "1482168932301"}}
This utility removes any limit warnings from output.
Usage:
remove_limit.py aleppo.json > aleppo_no_warnings.json
"""
from __future__ import print_function
import sys
import json
import fileinput
limit_breaker = '{"limit": {"track":'
for line in fileinput.input():
if limit_breaker not in line:
print(json.dumps(line))
|
<commit_before><commit_msg>Add utility to remove limit warnings.<commit_after>
|
#!/usr/bin/env python
"""
Utility to remove limit warnings from Filter API output.
If --warnings was used, you will have the following in output:
{"limit": {"track": 2530, "timestamp_ms": "1482168932301"}}
This utility removes any limit warnings from output.
Usage:
remove_limit.py aleppo.json > aleppo_no_warnings.json
"""
from __future__ import print_function
import sys
import json
import fileinput
limit_breaker = '{"limit": {"track":'
for line in fileinput.input():
if limit_breaker not in line:
print(json.dumps(line))
|
Add utility to remove limit warnings.#!/usr/bin/env python
"""
Utility to remove limit warnings from Filter API output.
If --warnings was used, you will have the following in output:
{"limit": {"track": 2530, "timestamp_ms": "1482168932301"}}
This utility removes any limit warnings from output.
Usage:
remove_limit.py aleppo.json > aleppo_no_warnings.json
"""
from __future__ import print_function
import sys
import json
import fileinput
limit_breaker = '{"limit": {"track":'
for line in fileinput.input():
if limit_breaker not in line:
print(json.dumps(line))
|
<commit_before><commit_msg>Add utility to remove limit warnings.<commit_after>#!/usr/bin/env python
"""
Utility to remove limit warnings from Filter API output.
If --warnings was used, you will have the following in output:
{"limit": {"track": 2530, "timestamp_ms": "1482168932301"}}
This utility removes any limit warnings from output.
Usage:
remove_limit.py aleppo.json > aleppo_no_warnings.json
"""
from __future__ import print_function
import sys
import json
import fileinput
limit_breaker = '{"limit": {"track":'
for line in fileinput.input():
if limit_breaker not in line:
print(json.dumps(line))
|
|
6b8f84e3feab1cb4c08f3ca52269db9f2c86d36f
|
examples/endless_turn.py
|
examples/endless_turn.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PyAX-12
# The MIT License
#
# Copyright (c) 2010,2015,2017 Jeremie DECOCK (http://www.jdhp.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
A PyAX-12 demo.
"""
from pyax12.connection import Connection
from pyax12.argparse_default import common_argument_parser
import pyax12.packet as pk
from pyax12 import utils
import time
def main():
"""
A PyAX-12 demo.
"""
# Parse options
parser = common_argument_parser(desc=main.__doc__)
args = parser.parse_args()
# Connect to the serial port
serial_connection = Connection(port=args.port,
baudrate=args.baudrate,
timeout=args.timeout,
rpi_gpio=args.rpi)
dynamixel_id = args.dynamixel_id
###
serial_connection.set_cw_angle_limit(dynamixel_id, 0, degrees=False)
serial_connection.set_ccw_angle_limit(dynamixel_id, 0, degrees=False)
serial_connection.set_speed(dynamixel_id, 512)
time.sleep(5) # Wait 5 seconds
serial_connection.set_speed(dynamixel_id, 0)
serial_connection.set_ccw_angle_limit(dynamixel_id, 1023, degrees=False)
serial_connection.goto(dynamixel_id, 0, speed=512, degrees=True)
###
# Close the serial connection
serial_connection.close()
if __name__ == '__main__':
main()
|
Add an endless turn demo.
|
Add an endless turn demo.
|
Python
|
mit
|
jeremiedecock/pyax12,jeremiedecock/pyax12
|
Add an endless turn demo.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PyAX-12
# The MIT License
#
# Copyright (c) 2010,2015,2017 Jeremie DECOCK (http://www.jdhp.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
A PyAX-12 demo.
"""
from pyax12.connection import Connection
from pyax12.argparse_default import common_argument_parser
import pyax12.packet as pk
from pyax12 import utils
import time
def main():
"""
A PyAX-12 demo.
"""
# Parse options
parser = common_argument_parser(desc=main.__doc__)
args = parser.parse_args()
# Connect to the serial port
serial_connection = Connection(port=args.port,
baudrate=args.baudrate,
timeout=args.timeout,
rpi_gpio=args.rpi)
dynamixel_id = args.dynamixel_id
###
serial_connection.set_cw_angle_limit(dynamixel_id, 0, degrees=False)
serial_connection.set_ccw_angle_limit(dynamixel_id, 0, degrees=False)
serial_connection.set_speed(dynamixel_id, 512)
time.sleep(5) # Wait 5 seconds
serial_connection.set_speed(dynamixel_id, 0)
serial_connection.set_ccw_angle_limit(dynamixel_id, 1023, degrees=False)
serial_connection.goto(dynamixel_id, 0, speed=512, degrees=True)
###
# Close the serial connection
serial_connection.close()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add an endless turn demo.<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PyAX-12
# The MIT License
#
# Copyright (c) 2010,2015,2017 Jeremie DECOCK (http://www.jdhp.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
A PyAX-12 demo.
"""
from pyax12.connection import Connection
from pyax12.argparse_default import common_argument_parser
import pyax12.packet as pk
from pyax12 import utils
import time
def main():
"""
A PyAX-12 demo.
"""
# Parse options
parser = common_argument_parser(desc=main.__doc__)
args = parser.parse_args()
# Connect to the serial port
serial_connection = Connection(port=args.port,
baudrate=args.baudrate,
timeout=args.timeout,
rpi_gpio=args.rpi)
dynamixel_id = args.dynamixel_id
###
serial_connection.set_cw_angle_limit(dynamixel_id, 0, degrees=False)
serial_connection.set_ccw_angle_limit(dynamixel_id, 0, degrees=False)
serial_connection.set_speed(dynamixel_id, 512)
time.sleep(5) # Wait 5 seconds
serial_connection.set_speed(dynamixel_id, 0)
serial_connection.set_ccw_angle_limit(dynamixel_id, 1023, degrees=False)
serial_connection.goto(dynamixel_id, 0, speed=512, degrees=True)
###
# Close the serial connection
serial_connection.close()
if __name__ == '__main__':
main()
|
Add an endless turn demo.#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PyAX-12
# The MIT License
#
# Copyright (c) 2010,2015,2017 Jeremie DECOCK (http://www.jdhp.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
A PyAX-12 demo.
"""
from pyax12.connection import Connection
from pyax12.argparse_default import common_argument_parser
import pyax12.packet as pk
from pyax12 import utils
import time
def main():
"""
A PyAX-12 demo.
"""
# Parse options
parser = common_argument_parser(desc=main.__doc__)
args = parser.parse_args()
# Connect to the serial port
serial_connection = Connection(port=args.port,
baudrate=args.baudrate,
timeout=args.timeout,
rpi_gpio=args.rpi)
dynamixel_id = args.dynamixel_id
###
serial_connection.set_cw_angle_limit(dynamixel_id, 0, degrees=False)
serial_connection.set_ccw_angle_limit(dynamixel_id, 0, degrees=False)
serial_connection.set_speed(dynamixel_id, 512)
time.sleep(5) # Wait 5 seconds
serial_connection.set_speed(dynamixel_id, 0)
serial_connection.set_ccw_angle_limit(dynamixel_id, 1023, degrees=False)
serial_connection.goto(dynamixel_id, 0, speed=512, degrees=True)
###
# Close the serial connection
serial_connection.close()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add an endless turn demo.<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PyAX-12
# The MIT License
#
# Copyright (c) 2010,2015,2017 Jeremie DECOCK (http://www.jdhp.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
A PyAX-12 demo.
"""
from pyax12.connection import Connection
from pyax12.argparse_default import common_argument_parser
import pyax12.packet as pk
from pyax12 import utils
import time
def main():
"""
A PyAX-12 demo.
"""
# Parse options
parser = common_argument_parser(desc=main.__doc__)
args = parser.parse_args()
# Connect to the serial port
serial_connection = Connection(port=args.port,
baudrate=args.baudrate,
timeout=args.timeout,
rpi_gpio=args.rpi)
dynamixel_id = args.dynamixel_id
###
serial_connection.set_cw_angle_limit(dynamixel_id, 0, degrees=False)
serial_connection.set_ccw_angle_limit(dynamixel_id, 0, degrees=False)
serial_connection.set_speed(dynamixel_id, 512)
time.sleep(5) # Wait 5 seconds
serial_connection.set_speed(dynamixel_id, 0)
serial_connection.set_ccw_angle_limit(dynamixel_id, 1023, degrees=False)
serial_connection.goto(dynamixel_id, 0, speed=512, degrees=True)
###
# Close the serial connection
serial_connection.close()
if __name__ == '__main__':
main()
|
|
755f3f3f8e39a338ea9f56fdb2c8f39787180c49
|
tests/import_tests.py
|
tests/import_tests.py
|
from nose.tools import ok_
import oemof.db as oemofdb
def test_oemofdb_imports():
ok_(oemofdb.connection)
ok_(oemofdb.engine)
ok_(oemofdb.url)
|
Test minimal package import functionality
|
Test minimal package import functionality
Ok, it's not really minimal, but it's one way to be sure, that the
package actually exists, is importable and some of it's exported things
are accessible.
|
Python
|
mit
|
oemof/oemof.db
|
Test minimal package import functionality
Ok, it's not really minimal, but it's one way to be sure, that the
package actually exists, is importable and some of it's exported things
are accessible.
|
from nose.tools import ok_
import oemof.db as oemofdb
def test_oemofdb_imports():
ok_(oemofdb.connection)
ok_(oemofdb.engine)
ok_(oemofdb.url)
|
<commit_before><commit_msg>Test minimal package import functionality
Ok, it's not really minimal, but it's one way to be sure, that the
package actually exists, is importable and some of it's exported things
are accessible.<commit_after>
|
from nose.tools import ok_
import oemof.db as oemofdb
def test_oemofdb_imports():
ok_(oemofdb.connection)
ok_(oemofdb.engine)
ok_(oemofdb.url)
|
Test minimal package import functionality
Ok, it's not really minimal, but it's one way to be sure, that the
package actually exists, is importable and some of it's exported things
are accessible.from nose.tools import ok_
import oemof.db as oemofdb
def test_oemofdb_imports():
ok_(oemofdb.connection)
ok_(oemofdb.engine)
ok_(oemofdb.url)
|
<commit_before><commit_msg>Test minimal package import functionality
Ok, it's not really minimal, but it's one way to be sure, that the
package actually exists, is importable and some of it's exported things
are accessible.<commit_after>from nose.tools import ok_
import oemof.db as oemofdb
def test_oemofdb_imports():
ok_(oemofdb.connection)
ok_(oemofdb.engine)
ok_(oemofdb.url)
|
|
5df72de23fe87d0fe20df02007db79ec5bb8f843
|
write.py
|
write.py
|
#!/usr/bin/env blender
import sys
import os
import bpy
import addon_utils
def _main(args):
print("Writing", args)
default, state = addon_utils.check("io_EDM")
if not state:
import io_EDM
io_EDM.register()
# Call the import operator
bpy.ops.export_mesh.edm(filepath="test.edm")
if __name__ == "__main__":
if _main(sys.argv) == -1:
sys.exit()
|
Add a very simple test writing script
|
Add a very simple test writing script
|
Python
|
mit
|
ndevenish/Blender_ioEDM,ndevenish/Blender_ioEDM
|
Add a very simple test writing script
|
#!/usr/bin/env blender
import sys
import os
import bpy
import addon_utils
def _main(args):
print("Writing", args)
default, state = addon_utils.check("io_EDM")
if not state:
import io_EDM
io_EDM.register()
# Call the import operator
bpy.ops.export_mesh.edm(filepath="test.edm")
if __name__ == "__main__":
if _main(sys.argv) == -1:
sys.exit()
|
<commit_before><commit_msg>Add a very simple test writing script<commit_after>
|
#!/usr/bin/env blender
import sys
import os
import bpy
import addon_utils
def _main(args):
print("Writing", args)
default, state = addon_utils.check("io_EDM")
if not state:
import io_EDM
io_EDM.register()
# Call the import operator
bpy.ops.export_mesh.edm(filepath="test.edm")
if __name__ == "__main__":
if _main(sys.argv) == -1:
sys.exit()
|
Add a very simple test writing script#!/usr/bin/env blender
import sys
import os
import bpy
import addon_utils
def _main(args):
print("Writing", args)
default, state = addon_utils.check("io_EDM")
if not state:
import io_EDM
io_EDM.register()
# Call the import operator
bpy.ops.export_mesh.edm(filepath="test.edm")
if __name__ == "__main__":
if _main(sys.argv) == -1:
sys.exit()
|
<commit_before><commit_msg>Add a very simple test writing script<commit_after>#!/usr/bin/env blender
import sys
import os
import bpy
import addon_utils
def _main(args):
print("Writing", args)
default, state = addon_utils.check("io_EDM")
if not state:
import io_EDM
io_EDM.register()
# Call the import operator
bpy.ops.export_mesh.edm(filepath="test.edm")
if __name__ == "__main__":
if _main(sys.argv) == -1:
sys.exit()
|
|
2c301d7f0d52188893c0e83326003dbf84a7424a
|
fix_movies.py
|
fix_movies.py
|
#!/usr/bin/python
# we've been outputting stuff to text so now I get to wedge it into a database
# funtimes
# set up the database with `sqlite3 netflix_genres.sqlite < create_tables.sql`
import codecs
import sqlite3
import sys
conn = sqlite3.connect('netflix_genres.sqlite')
c = conn.cursor()
# for row in c.execute('SELECT id, info FROM movies'):
# db_id = row[0]
# info = row[1].split(',')
# movie_id = info[0]
# updates.append((movie_id, db_id))
#
# # print updates
#
# for update in updates:
# print update
# c.execute('UPDATE movies SET movie_id=? WHERE id=?', update)
c.execute('SELECT DISTINCT (movie_id) FROM movies ORDER BY movie_id')
movie_ids = c.fetchall()
inserts = []
for row in movie_ids:
movie_id = row[0]
name = cover_url = movie_url = None
genre_ids = []
for entry in c.execute('SELECT id, name, cover_url, movie_url, genres FROM movies WHERE movie_id = ?', (movie_id,)):
if not name:
name = entry[1]
if name != entry[1]:
print "Row %s has variant name (%s not %s)" % (entry[0], entry[1], name)
cover_url = cover_url or entry[2]
movie_url = movie_url or entry[3]
genre_ids.append("%05i" % int(entry[4]))
# print entry
genres = ','.join(genre_ids)
print (movie_id, name)
inserts.append((name, cover_url, movie_url, genres))
#print inserts
print "... Inserting"
c.executemany('INSERT INTO movies_n (name, cover_url, movie_url, genres) VALUES(?,?,?,?)', inserts)
conn.commit()
conn.close()
|
Document work in fixing movies table
|
Document work in fixing movies table
|
Python
|
unlicense
|
george08/netflix-o-matic
|
Document work in fixing movies table
|
#!/usr/bin/python
# we've been outputting stuff to text so now I get to wedge it into a database
# funtimes
# set up the database with `sqlite3 netflix_genres.sqlite < create_tables.sql`
import codecs
import sqlite3
import sys
conn = sqlite3.connect('netflix_genres.sqlite')
c = conn.cursor()
# for row in c.execute('SELECT id, info FROM movies'):
# db_id = row[0]
# info = row[1].split(',')
# movie_id = info[0]
# updates.append((movie_id, db_id))
#
# # print updates
#
# for update in updates:
# print update
# c.execute('UPDATE movies SET movie_id=? WHERE id=?', update)
c.execute('SELECT DISTINCT (movie_id) FROM movies ORDER BY movie_id')
movie_ids = c.fetchall()
inserts = []
for row in movie_ids:
movie_id = row[0]
name = cover_url = movie_url = None
genre_ids = []
for entry in c.execute('SELECT id, name, cover_url, movie_url, genres FROM movies WHERE movie_id = ?', (movie_id,)):
if not name:
name = entry[1]
if name != entry[1]:
print "Row %s has variant name (%s not %s)" % (entry[0], entry[1], name)
cover_url = cover_url or entry[2]
movie_url = movie_url or entry[3]
genre_ids.append("%05i" % int(entry[4]))
# print entry
genres = ','.join(genre_ids)
print (movie_id, name)
inserts.append((name, cover_url, movie_url, genres))
#print inserts
print "... Inserting"
c.executemany('INSERT INTO movies_n (name, cover_url, movie_url, genres) VALUES(?,?,?,?)', inserts)
conn.commit()
conn.close()
|
<commit_before><commit_msg>Document work in fixing movies table<commit_after>
|
#!/usr/bin/python
# we've been outputting stuff to text so now I get to wedge it into a database
# funtimes
# set up the database with `sqlite3 netflix_genres.sqlite < create_tables.sql`
import codecs
import sqlite3
import sys
conn = sqlite3.connect('netflix_genres.sqlite')
c = conn.cursor()
# for row in c.execute('SELECT id, info FROM movies'):
# db_id = row[0]
# info = row[1].split(',')
# movie_id = info[0]
# updates.append((movie_id, db_id))
#
# # print updates
#
# for update in updates:
# print update
# c.execute('UPDATE movies SET movie_id=? WHERE id=?', update)
c.execute('SELECT DISTINCT (movie_id) FROM movies ORDER BY movie_id')
movie_ids = c.fetchall()
inserts = []
for row in movie_ids:
movie_id = row[0]
name = cover_url = movie_url = None
genre_ids = []
for entry in c.execute('SELECT id, name, cover_url, movie_url, genres FROM movies WHERE movie_id = ?', (movie_id,)):
if not name:
name = entry[1]
if name != entry[1]:
print "Row %s has variant name (%s not %s)" % (entry[0], entry[1], name)
cover_url = cover_url or entry[2]
movie_url = movie_url or entry[3]
genre_ids.append("%05i" % int(entry[4]))
# print entry
genres = ','.join(genre_ids)
print (movie_id, name)
inserts.append((name, cover_url, movie_url, genres))
#print inserts
print "... Inserting"
c.executemany('INSERT INTO movies_n (name, cover_url, movie_url, genres) VALUES(?,?,?,?)', inserts)
conn.commit()
conn.close()
|
Document work in fixing movies table#!/usr/bin/python
# we've been outputting stuff to text so now I get to wedge it into a database
# funtimes
# set up the database with `sqlite3 netflix_genres.sqlite < create_tables.sql`
import codecs
import sqlite3
import sys
conn = sqlite3.connect('netflix_genres.sqlite')
c = conn.cursor()
# for row in c.execute('SELECT id, info FROM movies'):
# db_id = row[0]
# info = row[1].split(',')
# movie_id = info[0]
# updates.append((movie_id, db_id))
#
# # print updates
#
# for update in updates:
# print update
# c.execute('UPDATE movies SET movie_id=? WHERE id=?', update)
c.execute('SELECT DISTINCT (movie_id) FROM movies ORDER BY movie_id')
movie_ids = c.fetchall()
inserts = []
for row in movie_ids:
movie_id = row[0]
name = cover_url = movie_url = None
genre_ids = []
for entry in c.execute('SELECT id, name, cover_url, movie_url, genres FROM movies WHERE movie_id = ?', (movie_id,)):
if not name:
name = entry[1]
if name != entry[1]:
print "Row %s has variant name (%s not %s)" % (entry[0], entry[1], name)
cover_url = cover_url or entry[2]
movie_url = movie_url or entry[3]
genre_ids.append("%05i" % int(entry[4]))
# print entry
genres = ','.join(genre_ids)
print (movie_id, name)
inserts.append((name, cover_url, movie_url, genres))
#print inserts
print "... Inserting"
c.executemany('INSERT INTO movies_n (name, cover_url, movie_url, genres) VALUES(?,?,?,?)', inserts)
conn.commit()
conn.close()
|
<commit_before><commit_msg>Document work in fixing movies table<commit_after>#!/usr/bin/python
# we've been outputting stuff to text so now I get to wedge it into a database
# funtimes
# set up the database with `sqlite3 netflix_genres.sqlite < create_tables.sql`
import codecs
import sqlite3
import sys
conn = sqlite3.connect('netflix_genres.sqlite')
c = conn.cursor()
# for row in c.execute('SELECT id, info FROM movies'):
# db_id = row[0]
# info = row[1].split(',')
# movie_id = info[0]
# updates.append((movie_id, db_id))
#
# # print updates
#
# for update in updates:
# print update
# c.execute('UPDATE movies SET movie_id=? WHERE id=?', update)
c.execute('SELECT DISTINCT (movie_id) FROM movies ORDER BY movie_id')
movie_ids = c.fetchall()
inserts = []
for row in movie_ids:
movie_id = row[0]
name = cover_url = movie_url = None
genre_ids = []
for entry in c.execute('SELECT id, name, cover_url, movie_url, genres FROM movies WHERE movie_id = ?', (movie_id,)):
if not name:
name = entry[1]
if name != entry[1]:
print "Row %s has variant name (%s not %s)" % (entry[0], entry[1], name)
cover_url = cover_url or entry[2]
movie_url = movie_url or entry[3]
genre_ids.append("%05i" % int(entry[4]))
# print entry
genres = ','.join(genre_ids)
print (movie_id, name)
inserts.append((name, cover_url, movie_url, genres))
#print inserts
print "... Inserting"
c.executemany('INSERT INTO movies_n (name, cover_url, movie_url, genres) VALUES(?,?,?,?)', inserts)
conn.commit()
conn.close()
|
|
836eb50eae358cd077b58ba0d5c884f8c83919dc
|
main/readability_graph_brown.py
|
main/readability_graph_brown.py
|
from feature import simple
import graph
from nltk.corpus import brown
def calc_readability(corpus):
texts = []
results = []
for fileid in corpus.fileids():
sentlist = brown.sents(fileids=[fileid])
text = ' '.join([ ' '.join(ss) for ss in sentlist ])
texts.append(text)
for text in texts:
results.append(simple.get_text_stats(text)['read'])
return results
if __name__ == '__main__':
result = calc_readability(brown)
values = [ float(v['ari']) for v in result ]
graph.hist('data/ari_hist', values, 'ARI', 'Frequency',
'Frequency of ARI values')
values = [ float(v['flesch_reading_ease']) for v in result ]
graph.hist('data/flesch_reading_ease_hist', values, 'Flesch Reading Ease', 'Frequency',
'Frequency of Flesch Reading Ease values')
values = [ float(v['flesch_kincaid_grade_level']) for v in result ]
graph.hist('data/flesch_kincaid_grade_level_hist', values, 'Flesch Kincaid Grade Level', 'Frequency',
'Frequency of Flesch Kincaid Grade Level values')
values = [ float(v['gunning_fog_index']) for v in result ]
graph.hist('data/gunning_fog_index_hist', values, 'Gunning Fog Index', 'Frequency',
'Frequency of Gunning Fog Index values')
values = [ float(v['smog_index']) for v in result ]
graph.hist('data/smog_index_hist', values, 'Smog Index', 'Frequency',
'Frequency of Smog Index values')
values = [ float(v['coleman_liau_index']) for v in result ]
graph.hist('data/coleman_liau_index_hist', values, 'Coleman Liau Index', 'Frequency',
'Frequency of Coleman Liau Index values')
values = [ float(v['lix']) for v in result ]
graph.hist('data/lix_hist', values, 'LIX', 'Frequency',
'Frequency of LIX values')
values = [ float(v['rix']) for v in result ]
graph.hist('data/rix_hist', values, 'RIX', 'Frequency',
'Frequency of RIX values')
indices = ['ari', 'flesch_reading_ease', 'flesch_kincaid_grade_level', 'gunning_fog_index', 'smog_index',
'coleman_liau_index', 'lix', 'rix']
seen = []
for i in indices:
for j in indices:
if i == j:
continue
key = tuple(sorted([i, j]))
if key in seen:
continue
seen.append(key)
x = [ float(v[i]) for v in result ]
y = [ float(v[j]) for v in result ]
graph.scatter('data/%s-%s' % (i, j), x, y, i, j)
|
Add readability grapher for brown corpus
|
Add readability grapher for brown corpus
|
Python
|
mit
|
worldwise001/stylometry
|
Add readability grapher for brown corpus
|
from feature import simple
import graph
from nltk.corpus import brown
def calc_readability(corpus):
texts = []
results = []
for fileid in corpus.fileids():
sentlist = brown.sents(fileids=[fileid])
text = ' '.join([ ' '.join(ss) for ss in sentlist ])
texts.append(text)
for text in texts:
results.append(simple.get_text_stats(text)['read'])
return results
if __name__ == '__main__':
result = calc_readability(brown)
values = [ float(v['ari']) for v in result ]
graph.hist('data/ari_hist', values, 'ARI', 'Frequency',
'Frequency of ARI values')
values = [ float(v['flesch_reading_ease']) for v in result ]
graph.hist('data/flesch_reading_ease_hist', values, 'Flesch Reading Ease', 'Frequency',
'Frequency of Flesch Reading Ease values')
values = [ float(v['flesch_kincaid_grade_level']) for v in result ]
graph.hist('data/flesch_kincaid_grade_level_hist', values, 'Flesch Kincaid Grade Level', 'Frequency',
'Frequency of Flesch Kincaid Grade Level values')
values = [ float(v['gunning_fog_index']) for v in result ]
graph.hist('data/gunning_fog_index_hist', values, 'Gunning Fog Index', 'Frequency',
'Frequency of Gunning Fog Index values')
values = [ float(v['smog_index']) for v in result ]
graph.hist('data/smog_index_hist', values, 'Smog Index', 'Frequency',
'Frequency of Smog Index values')
values = [ float(v['coleman_liau_index']) for v in result ]
graph.hist('data/coleman_liau_index_hist', values, 'Coleman Liau Index', 'Frequency',
'Frequency of Coleman Liau Index values')
values = [ float(v['lix']) for v in result ]
graph.hist('data/lix_hist', values, 'LIX', 'Frequency',
'Frequency of LIX values')
values = [ float(v['rix']) for v in result ]
graph.hist('data/rix_hist', values, 'RIX', 'Frequency',
'Frequency of RIX values')
indices = ['ari', 'flesch_reading_ease', 'flesch_kincaid_grade_level', 'gunning_fog_index', 'smog_index',
'coleman_liau_index', 'lix', 'rix']
seen = []
for i in indices:
for j in indices:
if i == j:
continue
key = tuple(sorted([i, j]))
if key in seen:
continue
seen.append(key)
x = [ float(v[i]) for v in result ]
y = [ float(v[j]) for v in result ]
graph.scatter('data/%s-%s' % (i, j), x, y, i, j)
|
<commit_before><commit_msg>Add readability grapher for brown corpus<commit_after>
|
from feature import simple
import graph
from nltk.corpus import brown
def calc_readability(corpus):
texts = []
results = []
for fileid in corpus.fileids():
sentlist = brown.sents(fileids=[fileid])
text = ' '.join([ ' '.join(ss) for ss in sentlist ])
texts.append(text)
for text in texts:
results.append(simple.get_text_stats(text)['read'])
return results
if __name__ == '__main__':
result = calc_readability(brown)
values = [ float(v['ari']) for v in result ]
graph.hist('data/ari_hist', values, 'ARI', 'Frequency',
'Frequency of ARI values')
values = [ float(v['flesch_reading_ease']) for v in result ]
graph.hist('data/flesch_reading_ease_hist', values, 'Flesch Reading Ease', 'Frequency',
'Frequency of Flesch Reading Ease values')
values = [ float(v['flesch_kincaid_grade_level']) for v in result ]
graph.hist('data/flesch_kincaid_grade_level_hist', values, 'Flesch Kincaid Grade Level', 'Frequency',
'Frequency of Flesch Kincaid Grade Level values')
values = [ float(v['gunning_fog_index']) for v in result ]
graph.hist('data/gunning_fog_index_hist', values, 'Gunning Fog Index', 'Frequency',
'Frequency of Gunning Fog Index values')
values = [ float(v['smog_index']) for v in result ]
graph.hist('data/smog_index_hist', values, 'Smog Index', 'Frequency',
'Frequency of Smog Index values')
values = [ float(v['coleman_liau_index']) for v in result ]
graph.hist('data/coleman_liau_index_hist', values, 'Coleman Liau Index', 'Frequency',
'Frequency of Coleman Liau Index values')
values = [ float(v['lix']) for v in result ]
graph.hist('data/lix_hist', values, 'LIX', 'Frequency',
'Frequency of LIX values')
values = [ float(v['rix']) for v in result ]
graph.hist('data/rix_hist', values, 'RIX', 'Frequency',
'Frequency of RIX values')
indices = ['ari', 'flesch_reading_ease', 'flesch_kincaid_grade_level', 'gunning_fog_index', 'smog_index',
'coleman_liau_index', 'lix', 'rix']
seen = []
for i in indices:
for j in indices:
if i == j:
continue
key = tuple(sorted([i, j]))
if key in seen:
continue
seen.append(key)
x = [ float(v[i]) for v in result ]
y = [ float(v[j]) for v in result ]
graph.scatter('data/%s-%s' % (i, j), x, y, i, j)
|
Add readability grapher for brown corpusfrom feature import simple
import graph
from nltk.corpus import brown
def calc_readability(corpus):
texts = []
results = []
for fileid in corpus.fileids():
sentlist = brown.sents(fileids=[fileid])
text = ' '.join([ ' '.join(ss) for ss in sentlist ])
texts.append(text)
for text in texts:
results.append(simple.get_text_stats(text)['read'])
return results
if __name__ == '__main__':
result = calc_readability(brown)
values = [ float(v['ari']) for v in result ]
graph.hist('data/ari_hist', values, 'ARI', 'Frequency',
'Frequency of ARI values')
values = [ float(v['flesch_reading_ease']) for v in result ]
graph.hist('data/flesch_reading_ease_hist', values, 'Flesch Reading Ease', 'Frequency',
'Frequency of Flesch Reading Ease values')
values = [ float(v['flesch_kincaid_grade_level']) for v in result ]
graph.hist('data/flesch_kincaid_grade_level_hist', values, 'Flesch Kincaid Grade Level', 'Frequency',
'Frequency of Flesch Kincaid Grade Level values')
values = [ float(v['gunning_fog_index']) for v in result ]
graph.hist('data/gunning_fog_index_hist', values, 'Gunning Fog Index', 'Frequency',
'Frequency of Gunning Fog Index values')
values = [ float(v['smog_index']) for v in result ]
graph.hist('data/smog_index_hist', values, 'Smog Index', 'Frequency',
'Frequency of Smog Index values')
values = [ float(v['coleman_liau_index']) for v in result ]
graph.hist('data/coleman_liau_index_hist', values, 'Coleman Liau Index', 'Frequency',
'Frequency of Coleman Liau Index values')
values = [ float(v['lix']) for v in result ]
graph.hist('data/lix_hist', values, 'LIX', 'Frequency',
'Frequency of LIX values')
values = [ float(v['rix']) for v in result ]
graph.hist('data/rix_hist', values, 'RIX', 'Frequency',
'Frequency of RIX values')
indices = ['ari', 'flesch_reading_ease', 'flesch_kincaid_grade_level', 'gunning_fog_index', 'smog_index',
'coleman_liau_index', 'lix', 'rix']
seen = []
for i in indices:
for j in indices:
if i == j:
continue
key = tuple(sorted([i, j]))
if key in seen:
continue
seen.append(key)
x = [ float(v[i]) for v in result ]
y = [ float(v[j]) for v in result ]
graph.scatter('data/%s-%s' % (i, j), x, y, i, j)
|
<commit_before><commit_msg>Add readability grapher for brown corpus<commit_after>from feature import simple
import graph
from nltk.corpus import brown
def calc_readability(corpus):
texts = []
results = []
for fileid in corpus.fileids():
sentlist = brown.sents(fileids=[fileid])
text = ' '.join([ ' '.join(ss) for ss in sentlist ])
texts.append(text)
for text in texts:
results.append(simple.get_text_stats(text)['read'])
return results
if __name__ == '__main__':
result = calc_readability(brown)
values = [ float(v['ari']) for v in result ]
graph.hist('data/ari_hist', values, 'ARI', 'Frequency',
'Frequency of ARI values')
values = [ float(v['flesch_reading_ease']) for v in result ]
graph.hist('data/flesch_reading_ease_hist', values, 'Flesch Reading Ease', 'Frequency',
'Frequency of Flesch Reading Ease values')
values = [ float(v['flesch_kincaid_grade_level']) for v in result ]
graph.hist('data/flesch_kincaid_grade_level_hist', values, 'Flesch Kincaid Grade Level', 'Frequency',
'Frequency of Flesch Kincaid Grade Level values')
values = [ float(v['gunning_fog_index']) for v in result ]
graph.hist('data/gunning_fog_index_hist', values, 'Gunning Fog Index', 'Frequency',
'Frequency of Gunning Fog Index values')
values = [ float(v['smog_index']) for v in result ]
graph.hist('data/smog_index_hist', values, 'Smog Index', 'Frequency',
'Frequency of Smog Index values')
values = [ float(v['coleman_liau_index']) for v in result ]
graph.hist('data/coleman_liau_index_hist', values, 'Coleman Liau Index', 'Frequency',
'Frequency of Coleman Liau Index values')
values = [ float(v['lix']) for v in result ]
graph.hist('data/lix_hist', values, 'LIX', 'Frequency',
'Frequency of LIX values')
values = [ float(v['rix']) for v in result ]
graph.hist('data/rix_hist', values, 'RIX', 'Frequency',
'Frequency of RIX values')
indices = ['ari', 'flesch_reading_ease', 'flesch_kincaid_grade_level', 'gunning_fog_index', 'smog_index',
'coleman_liau_index', 'lix', 'rix']
seen = []
for i in indices:
for j in indices:
if i == j:
continue
key = tuple(sorted([i, j]))
if key in seen:
continue
seen.append(key)
x = [ float(v[i]) for v in result ]
y = [ float(v[j]) for v in result ]
graph.scatter('data/%s-%s' % (i, j), x, y, i, j)
|
|
5aa9fd6b9c30fc4861596c2efed456e01ab49f22
|
python/decrypt_img.py
|
python/decrypt_img.py
|
import requests
import json
# Decrypt a previously encrypted image using the passphrase 'superpass'
# https://pixlab.io/#/cmd?id=encrypt && https://pixlab.io/#/cmd?id=decrypt
# Password used for decryption
pwd = 'superpass'
req = requests.get('https://api.pixlab.io/decrypt',params={'img':'https://pixlab.xyz/wxfnq5886bad496f95.png','pwd':pwd,'key':'My_PixLab_Key'})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Link to the decrypted picture: "+ reply['link'])
|
Decrypt a previously encrypted image
|
Decrypt a previously encrypted image
|
Python
|
bsd-2-clause
|
symisc/pixlab,symisc/pixlab,symisc/pixlab
|
Decrypt a previously encrypted image
|
import requests
import json
# Decrypt a previously encrypted image using the passphrase 'superpass'
# https://pixlab.io/#/cmd?id=encrypt && https://pixlab.io/#/cmd?id=decrypt
# Password used for decryption
pwd = 'superpass'
req = requests.get('https://api.pixlab.io/decrypt',params={'img':'https://pixlab.xyz/wxfnq5886bad496f95.png','pwd':pwd,'key':'My_PixLab_Key'})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Link to the decrypted picture: "+ reply['link'])
|
<commit_before><commit_msg>Decrypt a previously encrypted image<commit_after>
|
import requests
import json
# Decrypt a previously encrypted image using the passphrase 'superpass'
# https://pixlab.io/#/cmd?id=encrypt && https://pixlab.io/#/cmd?id=decrypt
# Password used for decryption
pwd = 'superpass'
req = requests.get('https://api.pixlab.io/decrypt',params={'img':'https://pixlab.xyz/wxfnq5886bad496f95.png','pwd':pwd,'key':'My_PixLab_Key'})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Link to the decrypted picture: "+ reply['link'])
|
Decrypt a previously encrypted imageimport requests
import json
# Decrypt a previously encrypted image using the passphrase 'superpass'
# https://pixlab.io/#/cmd?id=encrypt && https://pixlab.io/#/cmd?id=decrypt
# Password used for decryption
pwd = 'superpass'
req = requests.get('https://api.pixlab.io/decrypt',params={'img':'https://pixlab.xyz/wxfnq5886bad496f95.png','pwd':pwd,'key':'My_PixLab_Key'})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Link to the decrypted picture: "+ reply['link'])
|
<commit_before><commit_msg>Decrypt a previously encrypted image<commit_after>import requests
import json
# Decrypt a previously encrypted image using the passphrase 'superpass'
# https://pixlab.io/#/cmd?id=encrypt && https://pixlab.io/#/cmd?id=decrypt
# Password used for decryption
pwd = 'superpass'
req = requests.get('https://api.pixlab.io/decrypt',params={'img':'https://pixlab.xyz/wxfnq5886bad496f95.png','pwd':pwd,'key':'My_PixLab_Key'})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Link to the decrypted picture: "+ reply['link'])
|
|
182afef414d6ae9cd1ebd7d38be56cd77aaad663
|
tests/test_Levitt1971_Fig4.py
|
tests/test_Levitt1971_Fig4.py
|
from UpDownMethods import CORRECT, INCORRECT
import UpDownMethods as ud
import numpy as np
import matplotlib.pyplot as plt
import unittest
#
# Simulation parameters
#
responses = [CORRECT, CORRECT, INCORRECT, INCORRECT, INCORRECT, INCORRECT,
CORRECT, CORRECT, INCORRECT, INCORRECT, INCORRECT, CORRECT,
CORRECT, CORRECT, CORRECT, CORRECT, INCORRECT, INCORRECT,
INCORRECT, CORRECT, CORRECT, INCORRECT, INCORRECT, INCORRECT]
initalValue = 0.0
stepSize = 1.0
down = 1
up = 1
#
# Test code
#
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.results = ud.initiate_procedure()
nextValue, self.results = ud.append_result(self.results, responses[0],
down, up, stepSize,
initalValue)
for resp in responses[1:]:
nextValue, self.results = ud.append_result(self.results, resp,
down, up, stepSize,
nextValue)
def test_initiateResults(self):
self.results = ud.initiate_procedure()
self.assertIs(len(self.results), 0)
def test_calculateMidpoints(self):
mids = ud.midpoints(self.results)
mids = mids["Midpoint"]
mids = mids[[1, 3, 5]].values # Runs 2, 4, 6
self.assertIsNone(np.testing.assert_array_equal(mids, [0.0, 1.5, -0.5]))
def test_plotResults(self):
ud.plot_results(self.results)
plt.savefig('test-fig4.png', bbox_inches='tight')
def test_runs(self):
runs = ud.runs(self.results)
self.assertIsNone(np.testing.assert_array_equal(
runs["Start"].values, [1, 3, 7, 9, 12, 17, 20, 22]))
self.assertIsNone(np.testing.assert_array_equal(
runs["Finish"].values, [3, 7, 9, 12, 17, 20, 22, 24]))
if __name__ == '__main__':
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestSequenceFunctions)
unittest.TextTestRunner(verbosity=2).run(suite)
|
Include Levitt figure 4 test
|
Include Levitt figure 4 test
|
Python
|
mit
|
codles/UpDownMethods
|
Include Levitt figure 4 test
|
from UpDownMethods import CORRECT, INCORRECT
import UpDownMethods as ud
import numpy as np
import matplotlib.pyplot as plt
import unittest
#
# Simulation parameters
#
responses = [CORRECT, CORRECT, INCORRECT, INCORRECT, INCORRECT, INCORRECT,
CORRECT, CORRECT, INCORRECT, INCORRECT, INCORRECT, CORRECT,
CORRECT, CORRECT, CORRECT, CORRECT, INCORRECT, INCORRECT,
INCORRECT, CORRECT, CORRECT, INCORRECT, INCORRECT, INCORRECT]
initalValue = 0.0
stepSize = 1.0
down = 1
up = 1
#
# Test code
#
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.results = ud.initiate_procedure()
nextValue, self.results = ud.append_result(self.results, responses[0],
down, up, stepSize,
initalValue)
for resp in responses[1:]:
nextValue, self.results = ud.append_result(self.results, resp,
down, up, stepSize,
nextValue)
def test_initiateResults(self):
self.results = ud.initiate_procedure()
self.assertIs(len(self.results), 0)
def test_calculateMidpoints(self):
mids = ud.midpoints(self.results)
mids = mids["Midpoint"]
mids = mids[[1, 3, 5]].values # Runs 2, 4, 6
self.assertIsNone(np.testing.assert_array_equal(mids, [0.0, 1.5, -0.5]))
def test_plotResults(self):
ud.plot_results(self.results)
plt.savefig('test-fig4.png', bbox_inches='tight')
def test_runs(self):
runs = ud.runs(self.results)
self.assertIsNone(np.testing.assert_array_equal(
runs["Start"].values, [1, 3, 7, 9, 12, 17, 20, 22]))
self.assertIsNone(np.testing.assert_array_equal(
runs["Finish"].values, [3, 7, 9, 12, 17, 20, 22, 24]))
if __name__ == '__main__':
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestSequenceFunctions)
unittest.TextTestRunner(verbosity=2).run(suite)
|
<commit_before><commit_msg>Include Levitt figure 4 test<commit_after>
|
from UpDownMethods import CORRECT, INCORRECT
import UpDownMethods as ud
import numpy as np
import matplotlib.pyplot as plt
import unittest
#
# Simulation parameters
#
responses = [CORRECT, CORRECT, INCORRECT, INCORRECT, INCORRECT, INCORRECT,
CORRECT, CORRECT, INCORRECT, INCORRECT, INCORRECT, CORRECT,
CORRECT, CORRECT, CORRECT, CORRECT, INCORRECT, INCORRECT,
INCORRECT, CORRECT, CORRECT, INCORRECT, INCORRECT, INCORRECT]
initalValue = 0.0
stepSize = 1.0
down = 1
up = 1
#
# Test code
#
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.results = ud.initiate_procedure()
nextValue, self.results = ud.append_result(self.results, responses[0],
down, up, stepSize,
initalValue)
for resp in responses[1:]:
nextValue, self.results = ud.append_result(self.results, resp,
down, up, stepSize,
nextValue)
def test_initiateResults(self):
self.results = ud.initiate_procedure()
self.assertIs(len(self.results), 0)
def test_calculateMidpoints(self):
mids = ud.midpoints(self.results)
mids = mids["Midpoint"]
mids = mids[[1, 3, 5]].values # Runs 2, 4, 6
self.assertIsNone(np.testing.assert_array_equal(mids, [0.0, 1.5, -0.5]))
def test_plotResults(self):
ud.plot_results(self.results)
plt.savefig('test-fig4.png', bbox_inches='tight')
def test_runs(self):
runs = ud.runs(self.results)
self.assertIsNone(np.testing.assert_array_equal(
runs["Start"].values, [1, 3, 7, 9, 12, 17, 20, 22]))
self.assertIsNone(np.testing.assert_array_equal(
runs["Finish"].values, [3, 7, 9, 12, 17, 20, 22, 24]))
if __name__ == '__main__':
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestSequenceFunctions)
unittest.TextTestRunner(verbosity=2).run(suite)
|
Include Levitt figure 4 testfrom UpDownMethods import CORRECT, INCORRECT
import UpDownMethods as ud
import numpy as np
import matplotlib.pyplot as plt
import unittest
#
# Simulation parameters
#
responses = [CORRECT, CORRECT, INCORRECT, INCORRECT, INCORRECT, INCORRECT,
CORRECT, CORRECT, INCORRECT, INCORRECT, INCORRECT, CORRECT,
CORRECT, CORRECT, CORRECT, CORRECT, INCORRECT, INCORRECT,
INCORRECT, CORRECT, CORRECT, INCORRECT, INCORRECT, INCORRECT]
initalValue = 0.0
stepSize = 1.0
down = 1
up = 1
#
# Test code
#
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.results = ud.initiate_procedure()
nextValue, self.results = ud.append_result(self.results, responses[0],
down, up, stepSize,
initalValue)
for resp in responses[1:]:
nextValue, self.results = ud.append_result(self.results, resp,
down, up, stepSize,
nextValue)
def test_initiateResults(self):
self.results = ud.initiate_procedure()
self.assertIs(len(self.results), 0)
def test_calculateMidpoints(self):
mids = ud.midpoints(self.results)
mids = mids["Midpoint"]
mids = mids[[1, 3, 5]].values # Runs 2, 4, 6
self.assertIsNone(np.testing.assert_array_equal(mids, [0.0, 1.5, -0.5]))
def test_plotResults(self):
ud.plot_results(self.results)
plt.savefig('test-fig4.png', bbox_inches='tight')
def test_runs(self):
runs = ud.runs(self.results)
self.assertIsNone(np.testing.assert_array_equal(
runs["Start"].values, [1, 3, 7, 9, 12, 17, 20, 22]))
self.assertIsNone(np.testing.assert_array_equal(
runs["Finish"].values, [3, 7, 9, 12, 17, 20, 22, 24]))
if __name__ == '__main__':
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestSequenceFunctions)
unittest.TextTestRunner(verbosity=2).run(suite)
|
<commit_before><commit_msg>Include Levitt figure 4 test<commit_after>from UpDownMethods import CORRECT, INCORRECT
import UpDownMethods as ud
import numpy as np
import matplotlib.pyplot as plt
import unittest
#
# Simulation parameters
#
responses = [CORRECT, CORRECT, INCORRECT, INCORRECT, INCORRECT, INCORRECT,
CORRECT, CORRECT, INCORRECT, INCORRECT, INCORRECT, CORRECT,
CORRECT, CORRECT, CORRECT, CORRECT, INCORRECT, INCORRECT,
INCORRECT, CORRECT, CORRECT, INCORRECT, INCORRECT, INCORRECT]
initalValue = 0.0
stepSize = 1.0
down = 1
up = 1
#
# Test code
#
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.results = ud.initiate_procedure()
nextValue, self.results = ud.append_result(self.results, responses[0],
down, up, stepSize,
initalValue)
for resp in responses[1:]:
nextValue, self.results = ud.append_result(self.results, resp,
down, up, stepSize,
nextValue)
def test_initiateResults(self):
self.results = ud.initiate_procedure()
self.assertIs(len(self.results), 0)
def test_calculateMidpoints(self):
mids = ud.midpoints(self.results)
mids = mids["Midpoint"]
mids = mids[[1, 3, 5]].values # Runs 2, 4, 6
self.assertIsNone(np.testing.assert_array_equal(mids, [0.0, 1.5, -0.5]))
def test_plotResults(self):
ud.plot_results(self.results)
plt.savefig('test-fig4.png', bbox_inches='tight')
def test_runs(self):
runs = ud.runs(self.results)
self.assertIsNone(np.testing.assert_array_equal(
runs["Start"].values, [1, 3, 7, 9, 12, 17, 20, 22]))
self.assertIsNone(np.testing.assert_array_equal(
runs["Finish"].values, [3, 7, 9, 12, 17, 20, 22, 24]))
if __name__ == '__main__':
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestSequenceFunctions)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
f2b334c0238e6b3bc37169a7e5681451da9ce424
|
Python/155_MinStack.py
|
Python/155_MinStack.py
|
class MinStack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self._s = []
self.t = len(self._s)-1
self.curMin = 2147483647
def push(self, x):
"""
:type x: int
:rtype: nothing
"""
self.curMin = x if x < self.curMin else self.curMin
if self.t < len(self._s)-1 and len(self._s)>0:
self._s[self.t+1] = [x,self.curMin]
else:
self._s.append([x,self.curMin])
self.t += 1
def pop(self):
"""
:rtype: nothing
"""
p = self._s[self.t][0]
self.t -= 1
self.curMin = 2147483647 if self.t == -1 else self._s[self.t][1]
# print "t=",self.t,"self.curMin=",self.curMin
return p
def top(self):
"""
:rtype: int
"""
return self._s[self.t][0]
def getMin(self):
"""
:rtype: int
"""
return self._s[self.t][1]
def getStack(self):
return self._s[:self.t+1]
if __name__ == '__main__':
stack = MinStack()
stack.push(2147483646)
stack.push(2147483645)
stack.push(2147483647)
stack.pop()
stack.pop()
stack.pop()
stack.push(2147483647)
stack.push(-2147483648)
stack.pop()
print stack.getMin()
|
Add solution for 155 min stack. (tooooooo slow)
|
Add solution for 155 min stack. (tooooooo slow)
|
Python
|
mit
|
comicxmz001/LeetCode,comicxmz001/LeetCode
|
Add solution for 155 min stack. (tooooooo slow)
|
class MinStack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self._s = []
self.t = len(self._s)-1
self.curMin = 2147483647
def push(self, x):
"""
:type x: int
:rtype: nothing
"""
self.curMin = x if x < self.curMin else self.curMin
if self.t < len(self._s)-1 and len(self._s)>0:
self._s[self.t+1] = [x,self.curMin]
else:
self._s.append([x,self.curMin])
self.t += 1
def pop(self):
"""
:rtype: nothing
"""
p = self._s[self.t][0]
self.t -= 1
self.curMin = 2147483647 if self.t == -1 else self._s[self.t][1]
# print "t=",self.t,"self.curMin=",self.curMin
return p
def top(self):
"""
:rtype: int
"""
return self._s[self.t][0]
def getMin(self):
"""
:rtype: int
"""
return self._s[self.t][1]
def getStack(self):
return self._s[:self.t+1]
if __name__ == '__main__':
stack = MinStack()
stack.push(2147483646)
stack.push(2147483645)
stack.push(2147483647)
stack.pop()
stack.pop()
stack.pop()
stack.push(2147483647)
stack.push(-2147483648)
stack.pop()
print stack.getMin()
|
<commit_before><commit_msg>Add solution for 155 min stack. (tooooooo slow)<commit_after>
|
class MinStack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self._s = []
self.t = len(self._s)-1
self.curMin = 2147483647
def push(self, x):
"""
:type x: int
:rtype: nothing
"""
self.curMin = x if x < self.curMin else self.curMin
if self.t < len(self._s)-1 and len(self._s)>0:
self._s[self.t+1] = [x,self.curMin]
else:
self._s.append([x,self.curMin])
self.t += 1
def pop(self):
"""
:rtype: nothing
"""
p = self._s[self.t][0]
self.t -= 1
self.curMin = 2147483647 if self.t == -1 else self._s[self.t][1]
# print "t=",self.t,"self.curMin=",self.curMin
return p
def top(self):
"""
:rtype: int
"""
return self._s[self.t][0]
def getMin(self):
"""
:rtype: int
"""
return self._s[self.t][1]
def getStack(self):
return self._s[:self.t+1]
if __name__ == '__main__':
stack = MinStack()
stack.push(2147483646)
stack.push(2147483645)
stack.push(2147483647)
stack.pop()
stack.pop()
stack.pop()
stack.push(2147483647)
stack.push(-2147483648)
stack.pop()
print stack.getMin()
|
Add solution for 155 min stack. (tooooooo slow)class MinStack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self._s = []
self.t = len(self._s)-1
self.curMin = 2147483647
def push(self, x):
"""
:type x: int
:rtype: nothing
"""
self.curMin = x if x < self.curMin else self.curMin
if self.t < len(self._s)-1 and len(self._s)>0:
self._s[self.t+1] = [x,self.curMin]
else:
self._s.append([x,self.curMin])
self.t += 1
def pop(self):
"""
:rtype: nothing
"""
p = self._s[self.t][0]
self.t -= 1
self.curMin = 2147483647 if self.t == -1 else self._s[self.t][1]
# print "t=",self.t,"self.curMin=",self.curMin
return p
def top(self):
"""
:rtype: int
"""
return self._s[self.t][0]
def getMin(self):
"""
:rtype: int
"""
return self._s[self.t][1]
def getStack(self):
return self._s[:self.t+1]
if __name__ == '__main__':
stack = MinStack()
stack.push(2147483646)
stack.push(2147483645)
stack.push(2147483647)
stack.pop()
stack.pop()
stack.pop()
stack.push(2147483647)
stack.push(-2147483648)
stack.pop()
print stack.getMin()
|
<commit_before><commit_msg>Add solution for 155 min stack. (tooooooo slow)<commit_after>class MinStack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self._s = []
self.t = len(self._s)-1
self.curMin = 2147483647
def push(self, x):
"""
:type x: int
:rtype: nothing
"""
self.curMin = x if x < self.curMin else self.curMin
if self.t < len(self._s)-1 and len(self._s)>0:
self._s[self.t+1] = [x,self.curMin]
else:
self._s.append([x,self.curMin])
self.t += 1
def pop(self):
"""
:rtype: nothing
"""
p = self._s[self.t][0]
self.t -= 1
self.curMin = 2147483647 if self.t == -1 else self._s[self.t][1]
# print "t=",self.t,"self.curMin=",self.curMin
return p
def top(self):
"""
:rtype: int
"""
return self._s[self.t][0]
def getMin(self):
"""
:rtype: int
"""
return self._s[self.t][1]
def getStack(self):
return self._s[:self.t+1]
if __name__ == '__main__':
stack = MinStack()
stack.push(2147483646)
stack.push(2147483645)
stack.push(2147483647)
stack.pop()
stack.pop()
stack.pop()
stack.push(2147483647)
stack.push(-2147483648)
stack.pop()
print stack.getMin()
|
|
d989180c24ef7bd114f4138935a35b73e05036c4
|
senlin/tests/tempest/api/policies/test_policy_type.py
|
senlin/tests/tempest/api/policies/test_policy_type.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
class TestPolicyType(base.BaseSenlinTest):
@decorators.idempotent_id('72cc0347-3eab-4cf6-b1ee-531b11f20550')
def test_policy_type_list(self):
res = self.client.list_objs('policy-types')
# Verify resp of policy type list API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
policy_types = res['body']
for policy_type in policy_types:
self.assertIn('name', policy_type)
@decorators.idempotent_id('57791ed7-7f57-4369-ba6e-7e039169ebdc')
def test_policy_type_show(self):
res = self.client.get_obj('policy-types', 'senlin.policy.deletion-1.0')
# Verify resp of policy type show API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
policy_type = res['body']
for key in ['name', 'schema']:
self.assertIn(key, policy_type)
self.assertEqual('senlin.policy.deletion-1.0', policy_type['name'])
|
Add API tests for policy type list/show
|
Add API tests for policy type list/show
Add API tests for policy type list/show
Change-Id: I0d14727264491bbb6a9a668246fc267ab2245611
|
Python
|
apache-2.0
|
openstack/senlin,stackforge/senlin,openstack/senlin,stackforge/senlin,openstack/senlin
|
Add API tests for policy type list/show
Add API tests for policy type list/show
Change-Id: I0d14727264491bbb6a9a668246fc267ab2245611
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
class TestPolicyType(base.BaseSenlinTest):
@decorators.idempotent_id('72cc0347-3eab-4cf6-b1ee-531b11f20550')
def test_policy_type_list(self):
res = self.client.list_objs('policy-types')
# Verify resp of policy type list API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
policy_types = res['body']
for policy_type in policy_types:
self.assertIn('name', policy_type)
@decorators.idempotent_id('57791ed7-7f57-4369-ba6e-7e039169ebdc')
def test_policy_type_show(self):
res = self.client.get_obj('policy-types', 'senlin.policy.deletion-1.0')
# Verify resp of policy type show API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
policy_type = res['body']
for key in ['name', 'schema']:
self.assertIn(key, policy_type)
self.assertEqual('senlin.policy.deletion-1.0', policy_type['name'])
|
<commit_before><commit_msg>Add API tests for policy type list/show
Add API tests for policy type list/show
Change-Id: I0d14727264491bbb6a9a668246fc267ab2245611<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
class TestPolicyType(base.BaseSenlinTest):
@decorators.idempotent_id('72cc0347-3eab-4cf6-b1ee-531b11f20550')
def test_policy_type_list(self):
res = self.client.list_objs('policy-types')
# Verify resp of policy type list API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
policy_types = res['body']
for policy_type in policy_types:
self.assertIn('name', policy_type)
@decorators.idempotent_id('57791ed7-7f57-4369-ba6e-7e039169ebdc')
def test_policy_type_show(self):
res = self.client.get_obj('policy-types', 'senlin.policy.deletion-1.0')
# Verify resp of policy type show API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
policy_type = res['body']
for key in ['name', 'schema']:
self.assertIn(key, policy_type)
self.assertEqual('senlin.policy.deletion-1.0', policy_type['name'])
|
Add API tests for policy type list/show
Add API tests for policy type list/show
Change-Id: I0d14727264491bbb6a9a668246fc267ab2245611# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
class TestPolicyType(base.BaseSenlinTest):
@decorators.idempotent_id('72cc0347-3eab-4cf6-b1ee-531b11f20550')
def test_policy_type_list(self):
res = self.client.list_objs('policy-types')
# Verify resp of policy type list API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
policy_types = res['body']
for policy_type in policy_types:
self.assertIn('name', policy_type)
@decorators.idempotent_id('57791ed7-7f57-4369-ba6e-7e039169ebdc')
def test_policy_type_show(self):
res = self.client.get_obj('policy-types', 'senlin.policy.deletion-1.0')
# Verify resp of policy type show API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
policy_type = res['body']
for key in ['name', 'schema']:
self.assertIn(key, policy_type)
self.assertEqual('senlin.policy.deletion-1.0', policy_type['name'])
|
<commit_before><commit_msg>Add API tests for policy type list/show
Add API tests for policy type list/show
Change-Id: I0d14727264491bbb6a9a668246fc267ab2245611<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
class TestPolicyType(base.BaseSenlinTest):
@decorators.idempotent_id('72cc0347-3eab-4cf6-b1ee-531b11f20550')
def test_policy_type_list(self):
res = self.client.list_objs('policy-types')
# Verify resp of policy type list API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
policy_types = res['body']
for policy_type in policy_types:
self.assertIn('name', policy_type)
@decorators.idempotent_id('57791ed7-7f57-4369-ba6e-7e039169ebdc')
def test_policy_type_show(self):
res = self.client.get_obj('policy-types', 'senlin.policy.deletion-1.0')
# Verify resp of policy type show API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
policy_type = res['body']
for key in ['name', 'schema']:
self.assertIn(key, policy_type)
self.assertEqual('senlin.policy.deletion-1.0', policy_type['name'])
|
|
642f513a181f8d4b14b6ecf784002d3010d4e98c
|
create_master.py
|
create_master.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Program: Create Master
Programmer: Michael Fryar, Training Analytics Associate, EPoD
Date created: February 16, 2018
Purpose: Create a master file of engagement data and append previous enagement
records to this file. Going forward, new data will be appended to this file.
"""
# Standard library imports
import csv # For reading data in comma separated value format
import os # For manipulating paths and changing directory
from os import walk # For getting list of existing files
from datetime import datetime # For timestamping csv files
def create_master(course):
"""Creates a master file and appends previously downloaded data.
Args:
course (str): Three letter course code. Known values are:
AGG - Aggregating Evidence
COM - Commissioning Evidence
CBA - Cost-Benefit Analysis
DES - Descriptive Evidence
IMP - Impact Evaluations
SYS - Systematic Approaches to Policy Decisions
"""
# Change to directory where engagment files are saved.
home_dir = os.path.expanduser('~')
archive_path = (
'EPoD/Dropbox (CID)/Training Assessment and Research' +
'/BCURE Learner Engagement Reports/{}'.format(course)
)
archive_dir = os.path.join(home_dir, archive_path)
os.chdir(archive_dir)
# Get list of existing files
for (dirpath, dirnames, filenames) in walk(archive_dir):
files = [f for f in filenames if not f[0] == '.']
break
# Create master file with column headers
headers = ['user_id', 'username', 'name', 'engagements.problems_attempted',
'engagements.problems_completed', 'engagements.videos_viewed',
'engagements.discussion_contributions', 'download_datetime_UTC']
mastername = '{}_engagement_master.csv'.format(course)
with open(mastername, 'w', newline='') as masterfile:
writer = csv.writer(masterfile)
writer.writerow(headers)
# Append data from existing files to new master file
for file in files:
timestamp = file[15:34].replace('_', ' ').replace('.', ':')
with open(file, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
engagement_data = list(reader)
with open(mastername, 'a', newline='') as masterfile:
writer = csv.writer(masterfile)
for row in engagement_data[1:]:
row.append(timestamp)
writer.writerow(row)
if __name__ == '__main__':
courses = ["AGG", "CBA", "COM", "DES", "IMP", "SYS"]
for course in courses:
create_master(course)
|
Create master engagement data file and append old engagement records
|
Create master engagement data file and append old engagement records
Going forward, learner engagement downloads will be appended to a master
file. This commit adds a script I wrote to create a master file for each
course and append existing engagement records.
|
Python
|
mit
|
mefryar/epodx-dashboards,mefryar/epodx-dashboards
|
Create master engagement data file and append old engagement records
Going forward, learner engagement downloads will be appended to a master
file. This commit adds a script I wrote to create a master file for each
course and append existing engagement records.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Program: Create Master
Programmer: Michael Fryar, Training Analytics Associate, EPoD
Date created: February 16, 2018
Purpose: Create a master file of engagement data and append previous enagement
records to this file. Going forward, new data will be appended to this file.
"""
# Standard library imports
import csv # For reading data in comma separated value format
import os # For manipulating paths and changing directory
from os import walk # For getting list of existing files
from datetime import datetime # For timestamping csv files
def create_master(course):
"""Creates a master file and appends previously downloaded data.
Args:
course (str): Three letter course code. Known values are:
AGG - Aggregating Evidence
COM - Commissioning Evidence
CBA - Cost-Benefit Analysis
DES - Descriptive Evidence
IMP - Impact Evaluations
SYS - Systematic Approaches to Policy Decisions
"""
# Change to directory where engagment files are saved.
home_dir = os.path.expanduser('~')
archive_path = (
'EPoD/Dropbox (CID)/Training Assessment and Research' +
'/BCURE Learner Engagement Reports/{}'.format(course)
)
archive_dir = os.path.join(home_dir, archive_path)
os.chdir(archive_dir)
# Get list of existing files
for (dirpath, dirnames, filenames) in walk(archive_dir):
files = [f for f in filenames if not f[0] == '.']
break
# Create master file with column headers
headers = ['user_id', 'username', 'name', 'engagements.problems_attempted',
'engagements.problems_completed', 'engagements.videos_viewed',
'engagements.discussion_contributions', 'download_datetime_UTC']
mastername = '{}_engagement_master.csv'.format(course)
with open(mastername, 'w', newline='') as masterfile:
writer = csv.writer(masterfile)
writer.writerow(headers)
# Append data from existing files to new master file
for file in files:
timestamp = file[15:34].replace('_', ' ').replace('.', ':')
with open(file, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
engagement_data = list(reader)
with open(mastername, 'a', newline='') as masterfile:
writer = csv.writer(masterfile)
for row in engagement_data[1:]:
row.append(timestamp)
writer.writerow(row)
if __name__ == '__main__':
courses = ["AGG", "CBA", "COM", "DES", "IMP", "SYS"]
for course in courses:
create_master(course)
|
<commit_before><commit_msg>Create master engagement data file and append old engagement records
Going forward, learner engagement downloads will be appended to a master
file. This commit adds a script I wrote to create a master file for each
course and append existing engagement records.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Program: Create Master
Programmer: Michael Fryar, Training Analytics Associate, EPoD
Date created: February 16, 2018
Purpose: Create a master file of engagement data and append previous enagement
records to this file. Going forward, new data will be appended to this file.
"""
# Standard library imports
import csv # For reading data in comma separated value format
import os # For manipulating paths and changing directory
from os import walk # For getting list of existing files
from datetime import datetime # For timestamping csv files
def create_master(course):
"""Creates a master file and appends previously downloaded data.
Args:
course (str): Three letter course code. Known values are:
AGG - Aggregating Evidence
COM - Commissioning Evidence
CBA - Cost-Benefit Analysis
DES - Descriptive Evidence
IMP - Impact Evaluations
SYS - Systematic Approaches to Policy Decisions
"""
# Change to directory where engagment files are saved.
home_dir = os.path.expanduser('~')
archive_path = (
'EPoD/Dropbox (CID)/Training Assessment and Research' +
'/BCURE Learner Engagement Reports/{}'.format(course)
)
archive_dir = os.path.join(home_dir, archive_path)
os.chdir(archive_dir)
# Get list of existing files
for (dirpath, dirnames, filenames) in walk(archive_dir):
files = [f for f in filenames if not f[0] == '.']
break
# Create master file with column headers
headers = ['user_id', 'username', 'name', 'engagements.problems_attempted',
'engagements.problems_completed', 'engagements.videos_viewed',
'engagements.discussion_contributions', 'download_datetime_UTC']
mastername = '{}_engagement_master.csv'.format(course)
with open(mastername, 'w', newline='') as masterfile:
writer = csv.writer(masterfile)
writer.writerow(headers)
# Append data from existing files to new master file
for file in files:
timestamp = file[15:34].replace('_', ' ').replace('.', ':')
with open(file, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
engagement_data = list(reader)
with open(mastername, 'a', newline='') as masterfile:
writer = csv.writer(masterfile)
for row in engagement_data[1:]:
row.append(timestamp)
writer.writerow(row)
if __name__ == '__main__':
courses = ["AGG", "CBA", "COM", "DES", "IMP", "SYS"]
for course in courses:
create_master(course)
|
Create master engagement data file and append old engagement records
Going forward, learner engagement downloads will be appended to a master
file. This commit adds a script I wrote to create a master file for each
course and append existing engagement records.#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Program: Create Master
Programmer: Michael Fryar, Training Analytics Associate, EPoD
Date created: February 16, 2018
Purpose: Create a master file of engagement data and append previous enagement
records to this file. Going forward, new data will be appended to this file.
"""
# Standard library imports
import csv # For reading data in comma separated value format
import os # For manipulating paths and changing directory
from os import walk # For getting list of existing files
from datetime import datetime # For timestamping csv files
def create_master(course):
"""Creates a master file and appends previously downloaded data.
Args:
course (str): Three letter course code. Known values are:
AGG - Aggregating Evidence
COM - Commissioning Evidence
CBA - Cost-Benefit Analysis
DES - Descriptive Evidence
IMP - Impact Evaluations
SYS - Systematic Approaches to Policy Decisions
"""
# Change to directory where engagment files are saved.
home_dir = os.path.expanduser('~')
archive_path = (
'EPoD/Dropbox (CID)/Training Assessment and Research' +
'/BCURE Learner Engagement Reports/{}'.format(course)
)
archive_dir = os.path.join(home_dir, archive_path)
os.chdir(archive_dir)
# Get list of existing files
for (dirpath, dirnames, filenames) in walk(archive_dir):
files = [f for f in filenames if not f[0] == '.']
break
# Create master file with column headers
headers = ['user_id', 'username', 'name', 'engagements.problems_attempted',
'engagements.problems_completed', 'engagements.videos_viewed',
'engagements.discussion_contributions', 'download_datetime_UTC']
mastername = '{}_engagement_master.csv'.format(course)
with open(mastername, 'w', newline='') as masterfile:
writer = csv.writer(masterfile)
writer.writerow(headers)
# Append data from existing files to new master file
for file in files:
timestamp = file[15:34].replace('_', ' ').replace('.', ':')
with open(file, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
engagement_data = list(reader)
with open(mastername, 'a', newline='') as masterfile:
writer = csv.writer(masterfile)
for row in engagement_data[1:]:
row.append(timestamp)
writer.writerow(row)
if __name__ == '__main__':
courses = ["AGG", "CBA", "COM", "DES", "IMP", "SYS"]
for course in courses:
create_master(course)
|
<commit_before><commit_msg>Create master engagement data file and append old engagement records
Going forward, learner engagement downloads will be appended to a master
file. This commit adds a script I wrote to create a master file for each
course and append existing engagement records.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Program: Create Master
Programmer: Michael Fryar, Training Analytics Associate, EPoD
Date created: February 16, 2018
Purpose: Create a master file of engagement data and append previous enagement
records to this file. Going forward, new data will be appended to this file.
"""
# Standard library imports
import csv # For reading data in comma separated value format
import os # For manipulating paths and changing directory
from os import walk # For getting list of existing files
from datetime import datetime # For timestamping csv files
def create_master(course):
"""Creates a master file and appends previously downloaded data.
Args:
course (str): Three letter course code. Known values are:
AGG - Aggregating Evidence
COM - Commissioning Evidence
CBA - Cost-Benefit Analysis
DES - Descriptive Evidence
IMP - Impact Evaluations
SYS - Systematic Approaches to Policy Decisions
"""
# Change to directory where engagment files are saved.
home_dir = os.path.expanduser('~')
archive_path = (
'EPoD/Dropbox (CID)/Training Assessment and Research' +
'/BCURE Learner Engagement Reports/{}'.format(course)
)
archive_dir = os.path.join(home_dir, archive_path)
os.chdir(archive_dir)
# Get list of existing files
for (dirpath, dirnames, filenames) in walk(archive_dir):
files = [f for f in filenames if not f[0] == '.']
break
# Create master file with column headers
headers = ['user_id', 'username', 'name', 'engagements.problems_attempted',
'engagements.problems_completed', 'engagements.videos_viewed',
'engagements.discussion_contributions', 'download_datetime_UTC']
mastername = '{}_engagement_master.csv'.format(course)
with open(mastername, 'w', newline='') as masterfile:
writer = csv.writer(masterfile)
writer.writerow(headers)
# Append data from existing files to new master file
for file in files:
timestamp = file[15:34].replace('_', ' ').replace('.', ':')
with open(file, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
engagement_data = list(reader)
with open(mastername, 'a', newline='') as masterfile:
writer = csv.writer(masterfile)
for row in engagement_data[1:]:
row.append(timestamp)
writer.writerow(row)
if __name__ == '__main__':
courses = ["AGG", "CBA", "COM", "DES", "IMP", "SYS"]
for course in courses:
create_master(course)
|
|
254ca39eb2c70d1468854de18954121c9b399810
|
data_prep.py
|
data_prep.py
|
__author__ = 'jacob'
import tarfile
import glob
import os
atlas_filenames = glob.iglob(os.path.join("data", "atlas", "*.tgz"))
cms_filenames = glob.iglob(os.path.join("data", "cms", "*.tgz"))
for fname in atlas_filenames:
tar = tarfile.open(fname, "r:gz")
tar.extractall(path=os.path.join("data", "atlas"))
tar.close()
for fname in cms_filenames:
tar = tarfile.open(fname, "r:gz")
tar.extractall(path=os.path.join("data", "cms"))
tar.close()
|
Add simple data extraction for the ATLAS and CMS compressed files
|
Add simple data extraction for the ATLAS and CMS compressed files
|
Python
|
mit
|
jacobbieker/ATLAS-Luminosity
|
Add simple data extraction for the ATLAS and CMS compressed files
|
__author__ = 'jacob'
import tarfile
import glob
import os
atlas_filenames = glob.iglob(os.path.join("data", "atlas", "*.tgz"))
cms_filenames = glob.iglob(os.path.join("data", "cms", "*.tgz"))
for fname in atlas_filenames:
tar = tarfile.open(fname, "r:gz")
tar.extractall(path=os.path.join("data", "atlas"))
tar.close()
for fname in cms_filenames:
tar = tarfile.open(fname, "r:gz")
tar.extractall(path=os.path.join("data", "cms"))
tar.close()
|
<commit_before><commit_msg>Add simple data extraction for the ATLAS and CMS compressed files<commit_after>
|
__author__ = 'jacob'
import tarfile
import glob
import os
atlas_filenames = glob.iglob(os.path.join("data", "atlas", "*.tgz"))
cms_filenames = glob.iglob(os.path.join("data", "cms", "*.tgz"))
for fname in atlas_filenames:
tar = tarfile.open(fname, "r:gz")
tar.extractall(path=os.path.join("data", "atlas"))
tar.close()
for fname in cms_filenames:
tar = tarfile.open(fname, "r:gz")
tar.extractall(path=os.path.join("data", "cms"))
tar.close()
|
Add simple data extraction for the ATLAS and CMS compressed files__author__ = 'jacob'
import tarfile
import glob
import os
atlas_filenames = glob.iglob(os.path.join("data", "atlas", "*.tgz"))
cms_filenames = glob.iglob(os.path.join("data", "cms", "*.tgz"))
for fname in atlas_filenames:
tar = tarfile.open(fname, "r:gz")
tar.extractall(path=os.path.join("data", "atlas"))
tar.close()
for fname in cms_filenames:
tar = tarfile.open(fname, "r:gz")
tar.extractall(path=os.path.join("data", "cms"))
tar.close()
|
<commit_before><commit_msg>Add simple data extraction for the ATLAS and CMS compressed files<commit_after>__author__ = 'jacob'
import tarfile
import glob
import os
atlas_filenames = glob.iglob(os.path.join("data", "atlas", "*.tgz"))
cms_filenames = glob.iglob(os.path.join("data", "cms", "*.tgz"))
for fname in atlas_filenames:
tar = tarfile.open(fname, "r:gz")
tar.extractall(path=os.path.join("data", "atlas"))
tar.close()
for fname in cms_filenames:
tar = tarfile.open(fname, "r:gz")
tar.extractall(path=os.path.join("data", "cms"))
tar.close()
|
|
ca892587c46070c7e59ee7e893992785ac64965b
|
kufpybio/restapi.py
|
kufpybio/restapi.py
|
import os
import urllib.request
class RESTAPI(object):
"""A general class that handles the local file access or the
retrival of tha file.
"""
def _get_data(self, path_template, url_template, entity_id):
file_path = self._file_path(path_template, entity_id)
if not os.path.exists(file_path):
self._retrive_data(url_template, entity_id, file_path)
return(open(file_path).read())
def _retrive_data(self, url_template, entity_id, file_path):
data = urllib.request.urlopen(
self._base_url + url_template % (entity_id)).read()
data_fh = open(file_path, "wb")
data_fh.write(data)
data_fh.close()
def _file_path(self, path_template, entity_id):
return(path_template % (self._download_folder, entity_id))
def _rest_url(self, url_template, entity_id):
return(url_template % (self._base_url, entity_id))
|
Add a general REST API class
|
Add a general REST API class
|
Python
|
isc
|
konrad/kufpybio
|
Add a general REST API class
|
import os
import urllib.request
class RESTAPI(object):
"""A general class that handles the local file access or the
retrival of tha file.
"""
def _get_data(self, path_template, url_template, entity_id):
file_path = self._file_path(path_template, entity_id)
if not os.path.exists(file_path):
self._retrive_data(url_template, entity_id, file_path)
return(open(file_path).read())
def _retrive_data(self, url_template, entity_id, file_path):
data = urllib.request.urlopen(
self._base_url + url_template % (entity_id)).read()
data_fh = open(file_path, "wb")
data_fh.write(data)
data_fh.close()
def _file_path(self, path_template, entity_id):
return(path_template % (self._download_folder, entity_id))
def _rest_url(self, url_template, entity_id):
return(url_template % (self._base_url, entity_id))
|
<commit_before><commit_msg>Add a general REST API class<commit_after>
|
import os
import urllib.request
class RESTAPI(object):
"""A general class that handles the local file access or the
retrival of tha file.
"""
def _get_data(self, path_template, url_template, entity_id):
file_path = self._file_path(path_template, entity_id)
if not os.path.exists(file_path):
self._retrive_data(url_template, entity_id, file_path)
return(open(file_path).read())
def _retrive_data(self, url_template, entity_id, file_path):
data = urllib.request.urlopen(
self._base_url + url_template % (entity_id)).read()
data_fh = open(file_path, "wb")
data_fh.write(data)
data_fh.close()
def _file_path(self, path_template, entity_id):
return(path_template % (self._download_folder, entity_id))
def _rest_url(self, url_template, entity_id):
return(url_template % (self._base_url, entity_id))
|
Add a general REST API classimport os
import urllib.request
class RESTAPI(object):
"""A general class that handles the local file access or the
retrival of tha file.
"""
def _get_data(self, path_template, url_template, entity_id):
file_path = self._file_path(path_template, entity_id)
if not os.path.exists(file_path):
self._retrive_data(url_template, entity_id, file_path)
return(open(file_path).read())
def _retrive_data(self, url_template, entity_id, file_path):
data = urllib.request.urlopen(
self._base_url + url_template % (entity_id)).read()
data_fh = open(file_path, "wb")
data_fh.write(data)
data_fh.close()
def _file_path(self, path_template, entity_id):
return(path_template % (self._download_folder, entity_id))
def _rest_url(self, url_template, entity_id):
return(url_template % (self._base_url, entity_id))
|
<commit_before><commit_msg>Add a general REST API class<commit_after>import os
import urllib.request
class RESTAPI(object):
"""A general class that handles the local file access or the
retrival of tha file.
"""
def _get_data(self, path_template, url_template, entity_id):
file_path = self._file_path(path_template, entity_id)
if not os.path.exists(file_path):
self._retrive_data(url_template, entity_id, file_path)
return(open(file_path).read())
def _retrive_data(self, url_template, entity_id, file_path):
data = urllib.request.urlopen(
self._base_url + url_template % (entity_id)).read()
data_fh = open(file_path, "wb")
data_fh.write(data)
data_fh.close()
def _file_path(self, path_template, entity_id):
return(path_template % (self._download_folder, entity_id))
def _rest_url(self, url_template, entity_id):
return(url_template % (self._base_url, entity_id))
|
|
3c1cbe7d701e1c465bdb70a570b905a44e3df73f
|
tests/view/results/test_finders.py
|
tests/view/results/test_finders.py
|
# Case Conductor is a Test Case Management system.
# Copyright (C) 2011-12 Mozilla
#
# This file is part of Case Conductor.
#
# Case Conductor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Case Conductor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Case Conductor. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for results finder.
"""
from django.core.urlresolvers import reverse
from tests import case
class CaseColumnTest(case.DBTestCase):
"""Tests for results finder CaseColumn."""
@property
def column(self):
"""The Column class under test."""
from cc.view.results.finders import CaseColumn
return CaseColumn
def test_goto_url(self):
"""goto_url returns results list url for given RCV."""
c = self.column(
None,
None,
self.model.RunCaseVersion.objects.all(),
"results_results",
)
rcv = self.F.RunCaseVersionFactory.create()
url = c.goto_url(rcv)
self.assertEqual(
url, reverse("results_results", kwargs={"rcv_id": rcv.id}))
def test_no_goto_url(self):
"""goto_url still returns None if no url name given."""
c = self.column(
None,
None,
self.model.RunCaseVersion.objects.all(),
)
rcv = self.F.RunCaseVersionFactory.create()
self.assertIsNone(c.goto_url(rcv))
|
Add tests for results finder.
|
Add tests for results finder.
|
Python
|
bsd-2-clause
|
mozilla/moztrap,mccarrmb/moztrap,bobsilverberg/moztrap,shinglyu/moztrap,shinglyu/moztrap,mccarrmb/moztrap,shinglyu/moztrap,mozilla/moztrap,bobsilverberg/moztrap,mozilla/moztrap,shinglyu/moztrap,mccarrmb/moztrap,bobsilverberg/moztrap,mccarrmb/moztrap,mccarrmb/moztrap,mozilla/moztrap,mozilla/moztrap,bobsilverberg/moztrap,shinglyu/moztrap
|
Add tests for results finder.
|
# Case Conductor is a Test Case Management system.
# Copyright (C) 2011-12 Mozilla
#
# This file is part of Case Conductor.
#
# Case Conductor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Case Conductor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Case Conductor. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for results finder.
"""
from django.core.urlresolvers import reverse
from tests import case
class CaseColumnTest(case.DBTestCase):
"""Tests for results finder CaseColumn."""
@property
def column(self):
"""The Column class under test."""
from cc.view.results.finders import CaseColumn
return CaseColumn
def test_goto_url(self):
"""goto_url returns results list url for given RCV."""
c = self.column(
None,
None,
self.model.RunCaseVersion.objects.all(),
"results_results",
)
rcv = self.F.RunCaseVersionFactory.create()
url = c.goto_url(rcv)
self.assertEqual(
url, reverse("results_results", kwargs={"rcv_id": rcv.id}))
def test_no_goto_url(self):
"""goto_url still returns None if no url name given."""
c = self.column(
None,
None,
self.model.RunCaseVersion.objects.all(),
)
rcv = self.F.RunCaseVersionFactory.create()
self.assertIsNone(c.goto_url(rcv))
|
<commit_before><commit_msg>Add tests for results finder.<commit_after>
|
# Case Conductor is a Test Case Management system.
# Copyright (C) 2011-12 Mozilla
#
# This file is part of Case Conductor.
#
# Case Conductor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Case Conductor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Case Conductor. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for results finder.
"""
from django.core.urlresolvers import reverse
from tests import case
class CaseColumnTest(case.DBTestCase):
"""Tests for results finder CaseColumn."""
@property
def column(self):
"""The Column class under test."""
from cc.view.results.finders import CaseColumn
return CaseColumn
def test_goto_url(self):
"""goto_url returns results list url for given RCV."""
c = self.column(
None,
None,
self.model.RunCaseVersion.objects.all(),
"results_results",
)
rcv = self.F.RunCaseVersionFactory.create()
url = c.goto_url(rcv)
self.assertEqual(
url, reverse("results_results", kwargs={"rcv_id": rcv.id}))
def test_no_goto_url(self):
"""goto_url still returns None if no url name given."""
c = self.column(
None,
None,
self.model.RunCaseVersion.objects.all(),
)
rcv = self.F.RunCaseVersionFactory.create()
self.assertIsNone(c.goto_url(rcv))
|
Add tests for results finder.# Case Conductor is a Test Case Management system.
# Copyright (C) 2011-12 Mozilla
#
# This file is part of Case Conductor.
#
# Case Conductor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Case Conductor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Case Conductor. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for results finder.
"""
from django.core.urlresolvers import reverse
from tests import case
class CaseColumnTest(case.DBTestCase):
"""Tests for results finder CaseColumn."""
@property
def column(self):
"""The Column class under test."""
from cc.view.results.finders import CaseColumn
return CaseColumn
def test_goto_url(self):
"""goto_url returns results list url for given RCV."""
c = self.column(
None,
None,
self.model.RunCaseVersion.objects.all(),
"results_results",
)
rcv = self.F.RunCaseVersionFactory.create()
url = c.goto_url(rcv)
self.assertEqual(
url, reverse("results_results", kwargs={"rcv_id": rcv.id}))
def test_no_goto_url(self):
"""goto_url still returns None if no url name given."""
c = self.column(
None,
None,
self.model.RunCaseVersion.objects.all(),
)
rcv = self.F.RunCaseVersionFactory.create()
self.assertIsNone(c.goto_url(rcv))
|
<commit_before><commit_msg>Add tests for results finder.<commit_after># Case Conductor is a Test Case Management system.
# Copyright (C) 2011-12 Mozilla
#
# This file is part of Case Conductor.
#
# Case Conductor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Case Conductor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Case Conductor. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for results finder.
"""
from django.core.urlresolvers import reverse
from tests import case
class CaseColumnTest(case.DBTestCase):
"""Tests for results finder CaseColumn."""
@property
def column(self):
"""The Column class under test."""
from cc.view.results.finders import CaseColumn
return CaseColumn
def test_goto_url(self):
"""goto_url returns results list url for given RCV."""
c = self.column(
None,
None,
self.model.RunCaseVersion.objects.all(),
"results_results",
)
rcv = self.F.RunCaseVersionFactory.create()
url = c.goto_url(rcv)
self.assertEqual(
url, reverse("results_results", kwargs={"rcv_id": rcv.id}))
def test_no_goto_url(self):
"""goto_url still returns None if no url name given."""
c = self.column(
None,
None,
self.model.RunCaseVersion.objects.all(),
)
rcv = self.F.RunCaseVersionFactory.create()
self.assertIsNone(c.goto_url(rcv))
|
|
fd47b9235a95146fc0ccbaf10f4b5c2c217fe401
|
libsrc/test/TestXdmfPythonArray.py
|
libsrc/test/TestXdmfPythonArray.py
|
import Xdmf
from Xdmf import *
if __name__ == '__main__':
array = Xdmf.XdmfArray()
array.SetNumberType(Xdmf.XDMF_INT64_TYPE)
assert(array.GetNumberType() == Xdmf.XDMF_INT64_TYPE)
array.SetShapeFromString("3 3")
assert(array.GetShapeAsString() == "3 3")
assert(array.GetNumberOfElements() == 9)
toWrite = [0,1,2,3,4,500,5000,500000,1000000000000]
i = 0;
for element in toWrite:
array.SetValueFromInt64(i,element)
i += 1
i=0
for element in toWrite:
assert(array.GetValueAsInt64(i) == element)
i += 1
assert (array.GetMaxAsInt64() == 1000000000000)
assert (array.GetMinAsInt64() == 0)
|
Add Xdmf Python Test that writes values to an XdmfArray
|
ENH: Add Xdmf Python Test that writes values to an XdmfArray
|
Python
|
bsd-3-clause
|
cjh1/Xdmf2,cjh1/Xdmf2,cjh1/Xdmf2
|
ENH: Add Xdmf Python Test that writes values to an XdmfArray
|
import Xdmf
from Xdmf import *
if __name__ == '__main__':
array = Xdmf.XdmfArray()
array.SetNumberType(Xdmf.XDMF_INT64_TYPE)
assert(array.GetNumberType() == Xdmf.XDMF_INT64_TYPE)
array.SetShapeFromString("3 3")
assert(array.GetShapeAsString() == "3 3")
assert(array.GetNumberOfElements() == 9)
toWrite = [0,1,2,3,4,500,5000,500000,1000000000000]
i = 0;
for element in toWrite:
array.SetValueFromInt64(i,element)
i += 1
i=0
for element in toWrite:
assert(array.GetValueAsInt64(i) == element)
i += 1
assert (array.GetMaxAsInt64() == 1000000000000)
assert (array.GetMinAsInt64() == 0)
|
<commit_before><commit_msg>ENH: Add Xdmf Python Test that writes values to an XdmfArray<commit_after>
|
import Xdmf
from Xdmf import *
if __name__ == '__main__':
array = Xdmf.XdmfArray()
array.SetNumberType(Xdmf.XDMF_INT64_TYPE)
assert(array.GetNumberType() == Xdmf.XDMF_INT64_TYPE)
array.SetShapeFromString("3 3")
assert(array.GetShapeAsString() == "3 3")
assert(array.GetNumberOfElements() == 9)
toWrite = [0,1,2,3,4,500,5000,500000,1000000000000]
i = 0;
for element in toWrite:
array.SetValueFromInt64(i,element)
i += 1
i=0
for element in toWrite:
assert(array.GetValueAsInt64(i) == element)
i += 1
assert (array.GetMaxAsInt64() == 1000000000000)
assert (array.GetMinAsInt64() == 0)
|
ENH: Add Xdmf Python Test that writes values to an XdmfArrayimport Xdmf
from Xdmf import *
if __name__ == '__main__':
array = Xdmf.XdmfArray()
array.SetNumberType(Xdmf.XDMF_INT64_TYPE)
assert(array.GetNumberType() == Xdmf.XDMF_INT64_TYPE)
array.SetShapeFromString("3 3")
assert(array.GetShapeAsString() == "3 3")
assert(array.GetNumberOfElements() == 9)
toWrite = [0,1,2,3,4,500,5000,500000,1000000000000]
i = 0;
for element in toWrite:
array.SetValueFromInt64(i,element)
i += 1
i=0
for element in toWrite:
assert(array.GetValueAsInt64(i) == element)
i += 1
assert (array.GetMaxAsInt64() == 1000000000000)
assert (array.GetMinAsInt64() == 0)
|
<commit_before><commit_msg>ENH: Add Xdmf Python Test that writes values to an XdmfArray<commit_after>import Xdmf
from Xdmf import *
if __name__ == '__main__':
array = Xdmf.XdmfArray()
array.SetNumberType(Xdmf.XDMF_INT64_TYPE)
assert(array.GetNumberType() == Xdmf.XDMF_INT64_TYPE)
array.SetShapeFromString("3 3")
assert(array.GetShapeAsString() == "3 3")
assert(array.GetNumberOfElements() == 9)
toWrite = [0,1,2,3,4,500,5000,500000,1000000000000]
i = 0;
for element in toWrite:
array.SetValueFromInt64(i,element)
i += 1
i=0
for element in toWrite:
assert(array.GetValueAsInt64(i) == element)
i += 1
assert (array.GetMaxAsInt64() == 1000000000000)
assert (array.GetMinAsInt64() == 0)
|
|
35ff343d39dec11909bfffee86b4d467178f5fe8
|
acq4/util/threadrun.py
|
acq4/util/threadrun.py
|
try:
import queue
except ImportError:
import Queue as queue
from .future import Future
from . import Qt
def runInThread(thread, func, *args, **kwds):
"""Run a function in another thread and return the result.
The remote thread must be running a Qt event loop.
"""
return ThreadCallFuture(thread, func, *args, **kwds)()
def runInGuiThread(func, *args, **kwds):
"""Run a function the main GUI thread and return the result.
"""
return ThreadCallFuture(None, func, *args, **kwds)()
class ThreadCallFuture(Future):
sigRequestCall = Qt.Signal()
def __init__(self, thread, func, *args, **kwds):
Future.__init__(self)
self.func = func
self.args = args
self.kwds = kwds
self.exc = None
if thread is None:
thread = Qt.QApplication.instance().thread()
self.moveToThread(thread)
self.sigRequestCall.connect(self._callRequested)
self.sigRequestCall.emit()
def _callRequested(self):
try:
self.ret = self.func(*self.args, **self.kwds)
self._taskDone()
except Exception as exc:
self.exc = exc
self._taskDone(interrupted=True, error=str(exc))
def __call__(self):
self.wait()
if self.exc is not None:
raise self.exc
else:
return self.ret
|
Add utility for calling functions across threads directly
|
Add utility for calling functions across threads directly
|
Python
|
mit
|
acq4/acq4,pbmanis/acq4,pbmanis/acq4,pbmanis/acq4,campagnola/acq4,pbmanis/acq4,acq4/acq4,campagnola/acq4,campagnola/acq4,acq4/acq4,campagnola/acq4,acq4/acq4
|
Add utility for calling functions across threads directly
|
try:
import queue
except ImportError:
import Queue as queue
from .future import Future
from . import Qt
def runInThread(thread, func, *args, **kwds):
"""Run a function in another thread and return the result.
The remote thread must be running a Qt event loop.
"""
return ThreadCallFuture(thread, func, *args, **kwds)()
def runInGuiThread(func, *args, **kwds):
"""Run a function the main GUI thread and return the result.
"""
return ThreadCallFuture(None, func, *args, **kwds)()
class ThreadCallFuture(Future):
sigRequestCall = Qt.Signal()
def __init__(self, thread, func, *args, **kwds):
Future.__init__(self)
self.func = func
self.args = args
self.kwds = kwds
self.exc = None
if thread is None:
thread = Qt.QApplication.instance().thread()
self.moveToThread(thread)
self.sigRequestCall.connect(self._callRequested)
self.sigRequestCall.emit()
def _callRequested(self):
try:
self.ret = self.func(*self.args, **self.kwds)
self._taskDone()
except Exception as exc:
self.exc = exc
self._taskDone(interrupted=True, error=str(exc))
def __call__(self):
self.wait()
if self.exc is not None:
raise self.exc
else:
return self.ret
|
<commit_before><commit_msg>Add utility for calling functions across threads directly<commit_after>
|
try:
import queue
except ImportError:
import Queue as queue
from .future import Future
from . import Qt
def runInThread(thread, func, *args, **kwds):
"""Run a function in another thread and return the result.
The remote thread must be running a Qt event loop.
"""
return ThreadCallFuture(thread, func, *args, **kwds)()
def runInGuiThread(func, *args, **kwds):
"""Run a function the main GUI thread and return the result.
"""
return ThreadCallFuture(None, func, *args, **kwds)()
class ThreadCallFuture(Future):
sigRequestCall = Qt.Signal()
def __init__(self, thread, func, *args, **kwds):
Future.__init__(self)
self.func = func
self.args = args
self.kwds = kwds
self.exc = None
if thread is None:
thread = Qt.QApplication.instance().thread()
self.moveToThread(thread)
self.sigRequestCall.connect(self._callRequested)
self.sigRequestCall.emit()
def _callRequested(self):
try:
self.ret = self.func(*self.args, **self.kwds)
self._taskDone()
except Exception as exc:
self.exc = exc
self._taskDone(interrupted=True, error=str(exc))
def __call__(self):
self.wait()
if self.exc is not None:
raise self.exc
else:
return self.ret
|
Add utility for calling functions across threads directlytry:
import queue
except ImportError:
import Queue as queue
from .future import Future
from . import Qt
def runInThread(thread, func, *args, **kwds):
"""Run a function in another thread and return the result.
The remote thread must be running a Qt event loop.
"""
return ThreadCallFuture(thread, func, *args, **kwds)()
def runInGuiThread(func, *args, **kwds):
"""Run a function the main GUI thread and return the result.
"""
return ThreadCallFuture(None, func, *args, **kwds)()
class ThreadCallFuture(Future):
sigRequestCall = Qt.Signal()
def __init__(self, thread, func, *args, **kwds):
Future.__init__(self)
self.func = func
self.args = args
self.kwds = kwds
self.exc = None
if thread is None:
thread = Qt.QApplication.instance().thread()
self.moveToThread(thread)
self.sigRequestCall.connect(self._callRequested)
self.sigRequestCall.emit()
def _callRequested(self):
try:
self.ret = self.func(*self.args, **self.kwds)
self._taskDone()
except Exception as exc:
self.exc = exc
self._taskDone(interrupted=True, error=str(exc))
def __call__(self):
self.wait()
if self.exc is not None:
raise self.exc
else:
return self.ret
|
<commit_before><commit_msg>Add utility for calling functions across threads directly<commit_after>try:
import queue
except ImportError:
import Queue as queue
from .future import Future
from . import Qt
def runInThread(thread, func, *args, **kwds):
"""Run a function in another thread and return the result.
The remote thread must be running a Qt event loop.
"""
return ThreadCallFuture(thread, func, *args, **kwds)()
def runInGuiThread(func, *args, **kwds):
"""Run a function the main GUI thread and return the result.
"""
return ThreadCallFuture(None, func, *args, **kwds)()
class ThreadCallFuture(Future):
sigRequestCall = Qt.Signal()
def __init__(self, thread, func, *args, **kwds):
Future.__init__(self)
self.func = func
self.args = args
self.kwds = kwds
self.exc = None
if thread is None:
thread = Qt.QApplication.instance().thread()
self.moveToThread(thread)
self.sigRequestCall.connect(self._callRequested)
self.sigRequestCall.emit()
def _callRequested(self):
try:
self.ret = self.func(*self.args, **self.kwds)
self._taskDone()
except Exception as exc:
self.exc = exc
self._taskDone(interrupted=True, error=str(exc))
def __call__(self):
self.wait()
if self.exc is not None:
raise self.exc
else:
return self.ret
|
|
b55918ee959d8b168b940f44236b9bbab6daed1c
|
src/hack4lt/models.py
|
src/hack4lt/models.py
|
import re
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from django.core import validators
from django.core.mail import send_mail
from django.db import models
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
class Hacker(AbstractBaseUser, PermissionsMixin):
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'),
validators=[
validators.RegexValidator(re.compile('^[\w.@+-]+$'), _('Enter a valid username.'), 'invalid')
])
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('Email address'), max_length=254, unique=True)
email_verified = models.BooleanField(_('Was email approved'), default=False)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=False,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
repository = models.URLField(_('Github or Bitbucket page.')) # repository
stackoverflow_user = models.URLField(_('Stackoverflow user.')) # repository
description = models.TextField()
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def activate(self, domain='demo.damis.lt'):
if not self.is_active:
self.is_active = True
self.save()
receiver = self.email
subject = _('{0} account activated').format(domain)
body = render_to_string('accounts/mail/account_activated.html', {
'domain': domain,
'username': self.username,
})
sender = settings.DEFAULT_FROM_EMAIL
send_mail(subject, body, sender, [receiver])
return True
return False
|
Add CustomUser user model named Hacker.
|
Add CustomUser user model named Hacker.
|
Python
|
bsd-3-clause
|
niekas/Hack4LT
|
Add CustomUser user model named Hacker.
|
import re
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from django.core import validators
from django.core.mail import send_mail
from django.db import models
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
class Hacker(AbstractBaseUser, PermissionsMixin):
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'),
validators=[
validators.RegexValidator(re.compile('^[\w.@+-]+$'), _('Enter a valid username.'), 'invalid')
])
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('Email address'), max_length=254, unique=True)
email_verified = models.BooleanField(_('Was email approved'), default=False)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=False,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
repository = models.URLField(_('Github or Bitbucket page.')) # repository
stackoverflow_user = models.URLField(_('Stackoverflow user.')) # repository
description = models.TextField()
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def activate(self, domain='demo.damis.lt'):
if not self.is_active:
self.is_active = True
self.save()
receiver = self.email
subject = _('{0} account activated').format(domain)
body = render_to_string('accounts/mail/account_activated.html', {
'domain': domain,
'username': self.username,
})
sender = settings.DEFAULT_FROM_EMAIL
send_mail(subject, body, sender, [receiver])
return True
return False
|
<commit_before><commit_msg>Add CustomUser user model named Hacker.<commit_after>
|
import re
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from django.core import validators
from django.core.mail import send_mail
from django.db import models
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
class Hacker(AbstractBaseUser, PermissionsMixin):
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'),
validators=[
validators.RegexValidator(re.compile('^[\w.@+-]+$'), _('Enter a valid username.'), 'invalid')
])
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('Email address'), max_length=254, unique=True)
email_verified = models.BooleanField(_('Was email approved'), default=False)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=False,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
repository = models.URLField(_('Github or Bitbucket page.')) # repository
stackoverflow_user = models.URLField(_('Stackoverflow user.')) # repository
description = models.TextField()
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def activate(self, domain='demo.damis.lt'):
if not self.is_active:
self.is_active = True
self.save()
receiver = self.email
subject = _('{0} account activated').format(domain)
body = render_to_string('accounts/mail/account_activated.html', {
'domain': domain,
'username': self.username,
})
sender = settings.DEFAULT_FROM_EMAIL
send_mail(subject, body, sender, [receiver])
return True
return False
|
Add CustomUser user model named Hacker.import re
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from django.core import validators
from django.core.mail import send_mail
from django.db import models
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
class Hacker(AbstractBaseUser, PermissionsMixin):
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'),
validators=[
validators.RegexValidator(re.compile('^[\w.@+-]+$'), _('Enter a valid username.'), 'invalid')
])
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('Email address'), max_length=254, unique=True)
email_verified = models.BooleanField(_('Was email approved'), default=False)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=False,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
repository = models.URLField(_('Github or Bitbucket page.')) # repository
stackoverflow_user = models.URLField(_('Stackoverflow user.')) # repository
description = models.TextField()
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def activate(self, domain='demo.damis.lt'):
if not self.is_active:
self.is_active = True
self.save()
receiver = self.email
subject = _('{0} account activated').format(domain)
body = render_to_string('accounts/mail/account_activated.html', {
'domain': domain,
'username': self.username,
})
sender = settings.DEFAULT_FROM_EMAIL
send_mail(subject, body, sender, [receiver])
return True
return False
|
<commit_before><commit_msg>Add CustomUser user model named Hacker.<commit_after>import re
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from django.core import validators
from django.core.mail import send_mail
from django.db import models
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
class Hacker(AbstractBaseUser, PermissionsMixin):
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'),
validators=[
validators.RegexValidator(re.compile('^[\w.@+-]+$'), _('Enter a valid username.'), 'invalid')
])
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('Email address'), max_length=254, unique=True)
email_verified = models.BooleanField(_('Was email approved'), default=False)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=False,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
repository = models.URLField(_('Github or Bitbucket page.')) # repository
stackoverflow_user = models.URLField(_('Stackoverflow user.')) # repository
description = models.TextField()
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def activate(self, domain='demo.damis.lt'):
if not self.is_active:
self.is_active = True
self.save()
receiver = self.email
subject = _('{0} account activated').format(domain)
body = render_to_string('accounts/mail/account_activated.html', {
'domain': domain,
'username': self.username,
})
sender = settings.DEFAULT_FROM_EMAIL
send_mail(subject, body, sender, [receiver])
return True
return False
|
|
c9d352b6dbb5d094e97d578d6dd3696c31d02217
|
hyper/http20/error_code_registry.py
|
hyper/http20/error_code_registry.py
|
# -*- coding: utf-8 -*-
"""
hyper/http20/error_code_registry
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Global error code registry containing the established HTTP/2 error codes.
The registry is based on a 32-bit space so we use the error code to index into
the array.
The current registry is available at:
https://tools.ietf.org/html/draft-ietf-httpbis-http2-17#section-11.4
"""
NO_ERROR = {'Name': 'NO_ERROR',
'Description': 'Graceful shutdown'}
PROTOCOL_ERROR = {'Name': 'PROTOCOL_ERROR',
'Description': 'Protocol error detected'}
INTERNAL_ERROR = {'Name': 'INTERNAL_ERROR',
'Description': 'Implementation fault'}
FLOW_CONTROL_ERROR = {'Name': 'FLOW_CONTROL_ERROR',
'Description': 'Flow control limits exceeded'}
SETTINGS_TIMEOUT = {'Name': 'SETTINGS_TIMEOUT',
'Description': 'Settings not acknowledged'}
STREAM_CLOSED = {'Name': 'STREAM_CLOSED',
'Description': 'Frame received for closed stream'}
FRAME_SIZE_ERROR = {'Name': 'FRAME_SIZE_ERROR',
'Description': 'Frame size incorrect'}
REFUSED_STREAM = {'Name': 'REFUSED_STREAM ',
'Description': 'Stream not processed'}
CANCEL = {'Name': 'CANCEL',
'Description': 'Stream cancelled'}
COMPRESSION_ERROR = {'Name': 'COMPRESSION_ERROR',
'Description': 'Compression state not updated'}
CONNECT_ERROR = {'Name': 'CONNECT_ERROR',
'Description':
'TCP connection error for CONNECT method'}
ENHANCE_YOUR_CALM = {'Name': 'ENHANCE_YOUR_CALM',
'Description': 'Processing capacity exceeded'}
INADEQUATE_SECURITY = {'Name': 'INADEQUATE_SECURITY',
'Description':
'Negotiated TLS parameters not acceptable'}
HTTP_1_1_REQUIRED = {'Name': 'HTTP_1_1_REQUIRED',
'Description': 'Use HTTP/1.1 for the request'}
H2_ERROR_CODE_REGISTRY = [NO_ERROR, PROTOCOL_ERROR, INTERNAL_ERROR,
FLOW_CONTROL_ERROR, SETTINGS_TIMEOUT, STREAM_CLOSED,
FRAME_SIZE_ERROR, REFUSED_STREAM, CANCEL,
COMPRESSION_ERROR, CONNECT_ERROR, ENHANCE_YOUR_CALM,
INADEQUATE_SECURITY, HTTP_1_1_REQUIRED]
|
Add HTTP2 error code registry
|
Add HTTP2 error code registry
|
Python
|
mit
|
irvind/hyper,lawnmowerlatte/hyper,irvind/hyper,Lukasa/hyper,jdecuyper/hyper,fredthomsen/hyper,jdecuyper/hyper,lawnmowerlatte/hyper,Lukasa/hyper,plucury/hyper,masaori335/hyper,masaori335/hyper,plucury/hyper,fredthomsen/hyper
|
Add HTTP2 error code registry
|
# -*- coding: utf-8 -*-
"""
hyper/http20/error_code_registry
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Global error code registry containing the established HTTP/2 error codes.
The registry is based on a 32-bit space so we use the error code to index into
the array.
The current registry is available at:
https://tools.ietf.org/html/draft-ietf-httpbis-http2-17#section-11.4
"""
NO_ERROR = {'Name': 'NO_ERROR',
'Description': 'Graceful shutdown'}
PROTOCOL_ERROR = {'Name': 'PROTOCOL_ERROR',
'Description': 'Protocol error detected'}
INTERNAL_ERROR = {'Name': 'INTERNAL_ERROR',
'Description': 'Implementation fault'}
FLOW_CONTROL_ERROR = {'Name': 'FLOW_CONTROL_ERROR',
'Description': 'Flow control limits exceeded'}
SETTINGS_TIMEOUT = {'Name': 'SETTINGS_TIMEOUT',
'Description': 'Settings not acknowledged'}
STREAM_CLOSED = {'Name': 'STREAM_CLOSED',
'Description': 'Frame received for closed stream'}
FRAME_SIZE_ERROR = {'Name': 'FRAME_SIZE_ERROR',
'Description': 'Frame size incorrect'}
REFUSED_STREAM = {'Name': 'REFUSED_STREAM ',
'Description': 'Stream not processed'}
CANCEL = {'Name': 'CANCEL',
'Description': 'Stream cancelled'}
COMPRESSION_ERROR = {'Name': 'COMPRESSION_ERROR',
'Description': 'Compression state not updated'}
CONNECT_ERROR = {'Name': 'CONNECT_ERROR',
'Description':
'TCP connection error for CONNECT method'}
ENHANCE_YOUR_CALM = {'Name': 'ENHANCE_YOUR_CALM',
'Description': 'Processing capacity exceeded'}
INADEQUATE_SECURITY = {'Name': 'INADEQUATE_SECURITY',
'Description':
'Negotiated TLS parameters not acceptable'}
HTTP_1_1_REQUIRED = {'Name': 'HTTP_1_1_REQUIRED',
'Description': 'Use HTTP/1.1 for the request'}
H2_ERROR_CODE_REGISTRY = [NO_ERROR, PROTOCOL_ERROR, INTERNAL_ERROR,
FLOW_CONTROL_ERROR, SETTINGS_TIMEOUT, STREAM_CLOSED,
FRAME_SIZE_ERROR, REFUSED_STREAM, CANCEL,
COMPRESSION_ERROR, CONNECT_ERROR, ENHANCE_YOUR_CALM,
INADEQUATE_SECURITY, HTTP_1_1_REQUIRED]
|
<commit_before><commit_msg>Add HTTP2 error code registry<commit_after>
|
# -*- coding: utf-8 -*-
"""
hyper/http20/error_code_registry
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Global error code registry containing the established HTTP/2 error codes.
The registry is based on a 32-bit space so we use the error code to index into
the array.
The current registry is available at:
https://tools.ietf.org/html/draft-ietf-httpbis-http2-17#section-11.4
"""
NO_ERROR = {'Name': 'NO_ERROR',
'Description': 'Graceful shutdown'}
PROTOCOL_ERROR = {'Name': 'PROTOCOL_ERROR',
'Description': 'Protocol error detected'}
INTERNAL_ERROR = {'Name': 'INTERNAL_ERROR',
'Description': 'Implementation fault'}
FLOW_CONTROL_ERROR = {'Name': 'FLOW_CONTROL_ERROR',
'Description': 'Flow control limits exceeded'}
SETTINGS_TIMEOUT = {'Name': 'SETTINGS_TIMEOUT',
'Description': 'Settings not acknowledged'}
STREAM_CLOSED = {'Name': 'STREAM_CLOSED',
'Description': 'Frame received for closed stream'}
FRAME_SIZE_ERROR = {'Name': 'FRAME_SIZE_ERROR',
'Description': 'Frame size incorrect'}
REFUSED_STREAM = {'Name': 'REFUSED_STREAM ',
'Description': 'Stream not processed'}
CANCEL = {'Name': 'CANCEL',
'Description': 'Stream cancelled'}
COMPRESSION_ERROR = {'Name': 'COMPRESSION_ERROR',
'Description': 'Compression state not updated'}
CONNECT_ERROR = {'Name': 'CONNECT_ERROR',
'Description':
'TCP connection error for CONNECT method'}
ENHANCE_YOUR_CALM = {'Name': 'ENHANCE_YOUR_CALM',
'Description': 'Processing capacity exceeded'}
INADEQUATE_SECURITY = {'Name': 'INADEQUATE_SECURITY',
'Description':
'Negotiated TLS parameters not acceptable'}
HTTP_1_1_REQUIRED = {'Name': 'HTTP_1_1_REQUIRED',
'Description': 'Use HTTP/1.1 for the request'}
H2_ERROR_CODE_REGISTRY = [NO_ERROR, PROTOCOL_ERROR, INTERNAL_ERROR,
FLOW_CONTROL_ERROR, SETTINGS_TIMEOUT, STREAM_CLOSED,
FRAME_SIZE_ERROR, REFUSED_STREAM, CANCEL,
COMPRESSION_ERROR, CONNECT_ERROR, ENHANCE_YOUR_CALM,
INADEQUATE_SECURITY, HTTP_1_1_REQUIRED]
|
Add HTTP2 error code registry# -*- coding: utf-8 -*-
"""
hyper/http20/error_code_registry
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Global error code registry containing the established HTTP/2 error codes.
The registry is based on a 32-bit space so we use the error code to index into
the array.
The current registry is available at:
https://tools.ietf.org/html/draft-ietf-httpbis-http2-17#section-11.4
"""
NO_ERROR = {'Name': 'NO_ERROR',
'Description': 'Graceful shutdown'}
PROTOCOL_ERROR = {'Name': 'PROTOCOL_ERROR',
'Description': 'Protocol error detected'}
INTERNAL_ERROR = {'Name': 'INTERNAL_ERROR',
'Description': 'Implementation fault'}
FLOW_CONTROL_ERROR = {'Name': 'FLOW_CONTROL_ERROR',
'Description': 'Flow control limits exceeded'}
SETTINGS_TIMEOUT = {'Name': 'SETTINGS_TIMEOUT',
'Description': 'Settings not acknowledged'}
STREAM_CLOSED = {'Name': 'STREAM_CLOSED',
'Description': 'Frame received for closed stream'}
FRAME_SIZE_ERROR = {'Name': 'FRAME_SIZE_ERROR',
'Description': 'Frame size incorrect'}
REFUSED_STREAM = {'Name': 'REFUSED_STREAM ',
'Description': 'Stream not processed'}
CANCEL = {'Name': 'CANCEL',
'Description': 'Stream cancelled'}
COMPRESSION_ERROR = {'Name': 'COMPRESSION_ERROR',
'Description': 'Compression state not updated'}
CONNECT_ERROR = {'Name': 'CONNECT_ERROR',
'Description':
'TCP connection error for CONNECT method'}
ENHANCE_YOUR_CALM = {'Name': 'ENHANCE_YOUR_CALM',
'Description': 'Processing capacity exceeded'}
INADEQUATE_SECURITY = {'Name': 'INADEQUATE_SECURITY',
'Description':
'Negotiated TLS parameters not acceptable'}
HTTP_1_1_REQUIRED = {'Name': 'HTTP_1_1_REQUIRED',
'Description': 'Use HTTP/1.1 for the request'}
H2_ERROR_CODE_REGISTRY = [NO_ERROR, PROTOCOL_ERROR, INTERNAL_ERROR,
FLOW_CONTROL_ERROR, SETTINGS_TIMEOUT, STREAM_CLOSED,
FRAME_SIZE_ERROR, REFUSED_STREAM, CANCEL,
COMPRESSION_ERROR, CONNECT_ERROR, ENHANCE_YOUR_CALM,
INADEQUATE_SECURITY, HTTP_1_1_REQUIRED]
|
<commit_before><commit_msg>Add HTTP2 error code registry<commit_after># -*- coding: utf-8 -*-
"""
hyper/http20/error_code_registry
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Global error code registry containing the established HTTP/2 error codes.
The registry is based on a 32-bit space so we use the error code to index into
the array.
The current registry is available at:
https://tools.ietf.org/html/draft-ietf-httpbis-http2-17#section-11.4
"""
NO_ERROR = {'Name': 'NO_ERROR',
'Description': 'Graceful shutdown'}
PROTOCOL_ERROR = {'Name': 'PROTOCOL_ERROR',
'Description': 'Protocol error detected'}
INTERNAL_ERROR = {'Name': 'INTERNAL_ERROR',
'Description': 'Implementation fault'}
FLOW_CONTROL_ERROR = {'Name': 'FLOW_CONTROL_ERROR',
'Description': 'Flow control limits exceeded'}
SETTINGS_TIMEOUT = {'Name': 'SETTINGS_TIMEOUT',
'Description': 'Settings not acknowledged'}
STREAM_CLOSED = {'Name': 'STREAM_CLOSED',
'Description': 'Frame received for closed stream'}
FRAME_SIZE_ERROR = {'Name': 'FRAME_SIZE_ERROR',
'Description': 'Frame size incorrect'}
REFUSED_STREAM = {'Name': 'REFUSED_STREAM ',
'Description': 'Stream not processed'}
CANCEL = {'Name': 'CANCEL',
'Description': 'Stream cancelled'}
COMPRESSION_ERROR = {'Name': 'COMPRESSION_ERROR',
'Description': 'Compression state not updated'}
CONNECT_ERROR = {'Name': 'CONNECT_ERROR',
'Description':
'TCP connection error for CONNECT method'}
ENHANCE_YOUR_CALM = {'Name': 'ENHANCE_YOUR_CALM',
'Description': 'Processing capacity exceeded'}
INADEQUATE_SECURITY = {'Name': 'INADEQUATE_SECURITY',
'Description':
'Negotiated TLS parameters not acceptable'}
HTTP_1_1_REQUIRED = {'Name': 'HTTP_1_1_REQUIRED',
'Description': 'Use HTTP/1.1 for the request'}
H2_ERROR_CODE_REGISTRY = [NO_ERROR, PROTOCOL_ERROR, INTERNAL_ERROR,
FLOW_CONTROL_ERROR, SETTINGS_TIMEOUT, STREAM_CLOSED,
FRAME_SIZE_ERROR, REFUSED_STREAM, CANCEL,
COMPRESSION_ERROR, CONNECT_ERROR, ENHANCE_YOUR_CALM,
INADEQUATE_SECURITY, HTTP_1_1_REQUIRED]
|
|
0979da2fa3fdb59029c1e1e9f12a84388ab4f711
|
templatetags/uuid.py
|
templatetags/uuid.py
|
# -*- coding: utf-8 -*-
#
# File: uuid.py
# Author: Kenson Man <kenosn.idv.hk@gmail.com>
# Desc: The template-tag used to generate a uuid into output.
# Sample:
# {%uuid%} # Generate a UUID and output
# {%uuid as abc%} # Generate a UUID and store into abc
#
from django.template import Library, Node, TemplateSyntaxError
from uuid import uuid4
register = Library()
@register.simple_tag(name='uuid')
def do_uuid():
"""
Desc: The template-tag used to generate a uuid into output.
Sample:
{%uuid%} # Generate a UUID and output
{%uuid as abc%} # Generate a UUID and store into abc
"""
return uuid4().hex
|
Add the UUID template-tag that generate the UUID
|
Add the UUID template-tag that generate the UUID
|
Python
|
apache-2.0
|
kensonman/webframe,kensonman/webframe,kensonman/webframe
|
Add the UUID template-tag that generate the UUID
|
# -*- coding: utf-8 -*-
#
# File: uuid.py
# Author: Kenson Man <kenosn.idv.hk@gmail.com>
# Desc: The template-tag used to generate a uuid into output.
# Sample:
# {%uuid%} # Generate a UUID and output
# {%uuid as abc%} # Generate a UUID and store into abc
#
from django.template import Library, Node, TemplateSyntaxError
from uuid import uuid4
register = Library()
@register.simple_tag(name='uuid')
def do_uuid():
"""
Desc: The template-tag used to generate a uuid into output.
Sample:
{%uuid%} # Generate a UUID and output
{%uuid as abc%} # Generate a UUID and store into abc
"""
return uuid4().hex
|
<commit_before><commit_msg>Add the UUID template-tag that generate the UUID<commit_after>
|
# -*- coding: utf-8 -*-
#
# File: uuid.py
# Author: Kenson Man <kenosn.idv.hk@gmail.com>
# Desc: The template-tag used to generate a uuid into output.
# Sample:
# {%uuid%} # Generate a UUID and output
# {%uuid as abc%} # Generate a UUID and store into abc
#
from django.template import Library, Node, TemplateSyntaxError
from uuid import uuid4
register = Library()
@register.simple_tag(name='uuid')
def do_uuid():
"""
Desc: The template-tag used to generate a uuid into output.
Sample:
{%uuid%} # Generate a UUID and output
{%uuid as abc%} # Generate a UUID and store into abc
"""
return uuid4().hex
|
Add the UUID template-tag that generate the UUID# -*- coding: utf-8 -*-
#
# File: uuid.py
# Author: Kenson Man <kenosn.idv.hk@gmail.com>
# Desc: The template-tag used to generate a uuid into output.
# Sample:
# {%uuid%} # Generate a UUID and output
# {%uuid as abc%} # Generate a UUID and store into abc
#
from django.template import Library, Node, TemplateSyntaxError
from uuid import uuid4
register = Library()
@register.simple_tag(name='uuid')
def do_uuid():
"""
Desc: The template-tag used to generate a uuid into output.
Sample:
{%uuid%} # Generate a UUID and output
{%uuid as abc%} # Generate a UUID and store into abc
"""
return uuid4().hex
|
<commit_before><commit_msg>Add the UUID template-tag that generate the UUID<commit_after># -*- coding: utf-8 -*-
#
# File: uuid.py
# Author: Kenson Man <kenosn.idv.hk@gmail.com>
# Desc: The template-tag used to generate a uuid into output.
# Sample:
# {%uuid%} # Generate a UUID and output
# {%uuid as abc%} # Generate a UUID and store into abc
#
from django.template import Library, Node, TemplateSyntaxError
from uuid import uuid4
register = Library()
@register.simple_tag(name='uuid')
def do_uuid():
"""
Desc: The template-tag used to generate a uuid into output.
Sample:
{%uuid%} # Generate a UUID and output
{%uuid as abc%} # Generate a UUID and store into abc
"""
return uuid4().hex
|
|
eb98ff894b15068291c22fcfb87356e4af193646
|
alembic/versions/fa7e12c88c5_add_filter_field_to_users.py
|
alembic/versions/fa7e12c88c5_add_filter_field_to_users.py
|
"""Add filter field to users.
Revision ID: fa7e12c88c5
Revises: 2325abc7de81
Create Date: 2015-10-13 21:06:15.875359
"""
# revision identifiers, used by Alembic.
revision = 'fa7e12c88c5'
down_revision = '2325abc7de81'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('search_filters', postgresql.ARRAY(sa.Unicode(length=50)), nullable=False, server_default='{}'))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'search_filters')
### end Alembic commands ###
|
Add filter field to users (migration).
|
Add filter field to users (migration).
|
Python
|
agpl-3.0
|
MSPARP/newparp,MSPARP/newparp,MSPARP/newparp
|
Add filter field to users (migration).
|
"""Add filter field to users.
Revision ID: fa7e12c88c5
Revises: 2325abc7de81
Create Date: 2015-10-13 21:06:15.875359
"""
# revision identifiers, used by Alembic.
revision = 'fa7e12c88c5'
down_revision = '2325abc7de81'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('search_filters', postgresql.ARRAY(sa.Unicode(length=50)), nullable=False, server_default='{}'))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'search_filters')
### end Alembic commands ###
|
<commit_before><commit_msg>Add filter field to users (migration).<commit_after>
|
"""Add filter field to users.
Revision ID: fa7e12c88c5
Revises: 2325abc7de81
Create Date: 2015-10-13 21:06:15.875359
"""
# revision identifiers, used by Alembic.
revision = 'fa7e12c88c5'
down_revision = '2325abc7de81'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('search_filters', postgresql.ARRAY(sa.Unicode(length=50)), nullable=False, server_default='{}'))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'search_filters')
### end Alembic commands ###
|
Add filter field to users (migration)."""Add filter field to users.
Revision ID: fa7e12c88c5
Revises: 2325abc7de81
Create Date: 2015-10-13 21:06:15.875359
"""
# revision identifiers, used by Alembic.
revision = 'fa7e12c88c5'
down_revision = '2325abc7de81'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('search_filters', postgresql.ARRAY(sa.Unicode(length=50)), nullable=False, server_default='{}'))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'search_filters')
### end Alembic commands ###
|
<commit_before><commit_msg>Add filter field to users (migration).<commit_after>"""Add filter field to users.
Revision ID: fa7e12c88c5
Revises: 2325abc7de81
Create Date: 2015-10-13 21:06:15.875359
"""
# revision identifiers, used by Alembic.
revision = 'fa7e12c88c5'
down_revision = '2325abc7de81'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('search_filters', postgresql.ARRAY(sa.Unicode(length=50)), nullable=False, server_default='{}'))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'search_filters')
### end Alembic commands ###
|
|
eff1cc5dabd366ca4cbf6e8c97b97727327f29d5
|
test/option--.py
|
test/option--.py
|
#!/usr/bin/env python
__revision__ = "test/option-n.py __REVISION__ __DATE__ __DEVELOPER__"
import TestCmd
import os.path
import string
import sys
test = TestCmd.TestCmd(program = 'scons.py',
workdir = '',
interpreter = 'python')
test.write('build.py', r"""
import sys
file = open(sys.argv[1], 'w')
file.write("build.py: %s\n" % sys.argv[1])
file.close()
""")
test.write('SConstruct', """
MyBuild = Builder(name = "MyBuild",
action = "python build.py %(target)s")
env = Environment(BUILDERS = [MyBuild])
env.MyBuild(target = '-f1.out', source = 'f1.in')
env.MyBuild(target = '-f2.out', source = 'f2.in')
""")
expect = "python build.py -f1.out\npython build.py -f2.out\n"
test.run(chdir = '.', arguments = '-- -f1.out -f2.out')
test.fail_test(test.stdout() != expect)
test.fail_test(test.stderr() != "")
test.fail_test(not os.path.exists(test.workpath('-f1.out')))
test.fail_test(not os.path.exists(test.workpath('-f2.out')))
test.pass_test()
|
Add a test for -- terminating option processing.
|
Add a test for -- terminating option processing.
|
Python
|
mit
|
azatoth/scons,azatoth/scons,azatoth/scons,azatoth/scons,azatoth/scons
|
Add a test for -- terminating option processing.
|
#!/usr/bin/env python
__revision__ = "test/option-n.py __REVISION__ __DATE__ __DEVELOPER__"
import TestCmd
import os.path
import string
import sys
test = TestCmd.TestCmd(program = 'scons.py',
workdir = '',
interpreter = 'python')
test.write('build.py', r"""
import sys
file = open(sys.argv[1], 'w')
file.write("build.py: %s\n" % sys.argv[1])
file.close()
""")
test.write('SConstruct', """
MyBuild = Builder(name = "MyBuild",
action = "python build.py %(target)s")
env = Environment(BUILDERS = [MyBuild])
env.MyBuild(target = '-f1.out', source = 'f1.in')
env.MyBuild(target = '-f2.out', source = 'f2.in')
""")
expect = "python build.py -f1.out\npython build.py -f2.out\n"
test.run(chdir = '.', arguments = '-- -f1.out -f2.out')
test.fail_test(test.stdout() != expect)
test.fail_test(test.stderr() != "")
test.fail_test(not os.path.exists(test.workpath('-f1.out')))
test.fail_test(not os.path.exists(test.workpath('-f2.out')))
test.pass_test()
|
<commit_before><commit_msg>Add a test for -- terminating option processing.<commit_after>
|
#!/usr/bin/env python
__revision__ = "test/option-n.py __REVISION__ __DATE__ __DEVELOPER__"
import TestCmd
import os.path
import string
import sys
test = TestCmd.TestCmd(program = 'scons.py',
workdir = '',
interpreter = 'python')
test.write('build.py', r"""
import sys
file = open(sys.argv[1], 'w')
file.write("build.py: %s\n" % sys.argv[1])
file.close()
""")
test.write('SConstruct', """
MyBuild = Builder(name = "MyBuild",
action = "python build.py %(target)s")
env = Environment(BUILDERS = [MyBuild])
env.MyBuild(target = '-f1.out', source = 'f1.in')
env.MyBuild(target = '-f2.out', source = 'f2.in')
""")
expect = "python build.py -f1.out\npython build.py -f2.out\n"
test.run(chdir = '.', arguments = '-- -f1.out -f2.out')
test.fail_test(test.stdout() != expect)
test.fail_test(test.stderr() != "")
test.fail_test(not os.path.exists(test.workpath('-f1.out')))
test.fail_test(not os.path.exists(test.workpath('-f2.out')))
test.pass_test()
|
Add a test for -- terminating option processing.#!/usr/bin/env python
__revision__ = "test/option-n.py __REVISION__ __DATE__ __DEVELOPER__"
import TestCmd
import os.path
import string
import sys
test = TestCmd.TestCmd(program = 'scons.py',
workdir = '',
interpreter = 'python')
test.write('build.py', r"""
import sys
file = open(sys.argv[1], 'w')
file.write("build.py: %s\n" % sys.argv[1])
file.close()
""")
test.write('SConstruct', """
MyBuild = Builder(name = "MyBuild",
action = "python build.py %(target)s")
env = Environment(BUILDERS = [MyBuild])
env.MyBuild(target = '-f1.out', source = 'f1.in')
env.MyBuild(target = '-f2.out', source = 'f2.in')
""")
expect = "python build.py -f1.out\npython build.py -f2.out\n"
test.run(chdir = '.', arguments = '-- -f1.out -f2.out')
test.fail_test(test.stdout() != expect)
test.fail_test(test.stderr() != "")
test.fail_test(not os.path.exists(test.workpath('-f1.out')))
test.fail_test(not os.path.exists(test.workpath('-f2.out')))
test.pass_test()
|
<commit_before><commit_msg>Add a test for -- terminating option processing.<commit_after>#!/usr/bin/env python
__revision__ = "test/option-n.py __REVISION__ __DATE__ __DEVELOPER__"
import TestCmd
import os.path
import string
import sys
test = TestCmd.TestCmd(program = 'scons.py',
workdir = '',
interpreter = 'python')
test.write('build.py', r"""
import sys
file = open(sys.argv[1], 'w')
file.write("build.py: %s\n" % sys.argv[1])
file.close()
""")
test.write('SConstruct', """
MyBuild = Builder(name = "MyBuild",
action = "python build.py %(target)s")
env = Environment(BUILDERS = [MyBuild])
env.MyBuild(target = '-f1.out', source = 'f1.in')
env.MyBuild(target = '-f2.out', source = 'f2.in')
""")
expect = "python build.py -f1.out\npython build.py -f2.out\n"
test.run(chdir = '.', arguments = '-- -f1.out -f2.out')
test.fail_test(test.stdout() != expect)
test.fail_test(test.stderr() != "")
test.fail_test(not os.path.exists(test.workpath('-f1.out')))
test.fail_test(not os.path.exists(test.workpath('-f2.out')))
test.pass_test()
|
|
53edb67dd3a7a88d9133a85cd2fe1a9d22a3a436
|
doc/examples/plot_piecewise_affine.py
|
doc/examples/plot_piecewise_affine.py
|
"""
===============================
Piecewise Affine Transformation
===============================
This example shows how to use the Piecewise Affine Transformation.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import PiecewiseAffineTransform, warp
from skimage import data
image = data.lena()
rows, cols = image.shape[0], image.shape[1]
src_cols = np.linspace(0, cols, 20)
src_rows = np.linspace(0, rows, 20)
src_rows, src_cols = np.meshgrid(src_rows, src_cols)
src = np.dstack([src_cols.flat, src_rows.flat])[0]
# add sinusoidal oscillation to row coordinates
dst_rows = src[:, 1] - np.sin(np.linspace(0, 3 * np.pi, src.shape[0])) * 50
dst_cols = src[:, 0]
dst_rows *= 1.5
dst_rows -= 1.5 * 50
dst = np.vstack([dst_cols, dst_rows]).T
tform = PiecewiseAffineTransform()
tform.estimate(src, dst)
output_shape = (image.shape[0] - 1.5 * 50, image.shape[1])
out = warp(image, tform, output_shape=output_shape)
plt.imshow(out)
plt.show()
|
Add example script for piecewise affine transform
|
Add example script for piecewise affine transform
|
Python
|
bsd-3-clause
|
jwiggins/scikit-image,jwiggins/scikit-image,ofgulban/scikit-image,SamHames/scikit-image,chintak/scikit-image,rjeli/scikit-image,newville/scikit-image,ofgulban/scikit-image,juliusbierk/scikit-image,oew1v07/scikit-image,almarklein/scikit-image,chriscrosscutler/scikit-image,pratapvardhan/scikit-image,chintak/scikit-image,juliusbierk/scikit-image,emon10005/scikit-image,rjeli/scikit-image,bsipocz/scikit-image,bennlich/scikit-image,dpshelio/scikit-image,michaelaye/scikit-image,WarrenWeckesser/scikits-image,michaelpacer/scikit-image,Hiyorimi/scikit-image,vighneshbirodkar/scikit-image,pratapvardhan/scikit-image,rjeli/scikit-image,ajaybhat/scikit-image,WarrenWeckesser/scikits-image,blink1073/scikit-image,almarklein/scikit-image,michaelpacer/scikit-image,GaZ3ll3/scikit-image,robintw/scikit-image,bennlich/scikit-image,chintak/scikit-image,Midafi/scikit-image,Hiyorimi/scikit-image,vighneshbirodkar/scikit-image,keflavich/scikit-image,paalge/scikit-image,ajaybhat/scikit-image,ClinicalGraphics/scikit-image,ClinicalGraphics/scikit-image,keflavich/scikit-image,almarklein/scikit-image,GaZ3ll3/scikit-image,Britefury/scikit-image,SamHames/scikit-image,Britefury/scikit-image,Midafi/scikit-image,michaelaye/scikit-image,paalge/scikit-image,ofgulban/scikit-image,bsipocz/scikit-image,youprofit/scikit-image,chintak/scikit-image,blink1073/scikit-image,oew1v07/scikit-image,warmspringwinds/scikit-image,SamHames/scikit-image,newville/scikit-image,vighneshbirodkar/scikit-image,almarklein/scikit-image,robintw/scikit-image,SamHames/scikit-image,youprofit/scikit-image,chriscrosscutler/scikit-image,dpshelio/scikit-image,paalge/scikit-image,warmspringwinds/scikit-image,emon10005/scikit-image
|
Add example script for piecewise affine transform
|
"""
===============================
Piecewise Affine Transformation
===============================
This example shows how to use the Piecewise Affine Transformation.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import PiecewiseAffineTransform, warp
from skimage import data
image = data.lena()
rows, cols = image.shape[0], image.shape[1]
src_cols = np.linspace(0, cols, 20)
src_rows = np.linspace(0, rows, 20)
src_rows, src_cols = np.meshgrid(src_rows, src_cols)
src = np.dstack([src_cols.flat, src_rows.flat])[0]
# add sinusoidal oscillation to row coordinates
dst_rows = src[:, 1] - np.sin(np.linspace(0, 3 * np.pi, src.shape[0])) * 50
dst_cols = src[:, 0]
dst_rows *= 1.5
dst_rows -= 1.5 * 50
dst = np.vstack([dst_cols, dst_rows]).T
tform = PiecewiseAffineTransform()
tform.estimate(src, dst)
output_shape = (image.shape[0] - 1.5 * 50, image.shape[1])
out = warp(image, tform, output_shape=output_shape)
plt.imshow(out)
plt.show()
|
<commit_before><commit_msg>Add example script for piecewise affine transform<commit_after>
|
"""
===============================
Piecewise Affine Transformation
===============================
This example shows how to use the Piecewise Affine Transformation.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import PiecewiseAffineTransform, warp
from skimage import data
image = data.lena()
rows, cols = image.shape[0], image.shape[1]
src_cols = np.linspace(0, cols, 20)
src_rows = np.linspace(0, rows, 20)
src_rows, src_cols = np.meshgrid(src_rows, src_cols)
src = np.dstack([src_cols.flat, src_rows.flat])[0]
# add sinusoidal oscillation to row coordinates
dst_rows = src[:, 1] - np.sin(np.linspace(0, 3 * np.pi, src.shape[0])) * 50
dst_cols = src[:, 0]
dst_rows *= 1.5
dst_rows -= 1.5 * 50
dst = np.vstack([dst_cols, dst_rows]).T
tform = PiecewiseAffineTransform()
tform.estimate(src, dst)
output_shape = (image.shape[0] - 1.5 * 50, image.shape[1])
out = warp(image, tform, output_shape=output_shape)
plt.imshow(out)
plt.show()
|
Add example script for piecewise affine transform"""
===============================
Piecewise Affine Transformation
===============================
This example shows how to use the Piecewise Affine Transformation.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import PiecewiseAffineTransform, warp
from skimage import data
image = data.lena()
rows, cols = image.shape[0], image.shape[1]
src_cols = np.linspace(0, cols, 20)
src_rows = np.linspace(0, rows, 20)
src_rows, src_cols = np.meshgrid(src_rows, src_cols)
src = np.dstack([src_cols.flat, src_rows.flat])[0]
# add sinusoidal oscillation to row coordinates
dst_rows = src[:, 1] - np.sin(np.linspace(0, 3 * np.pi, src.shape[0])) * 50
dst_cols = src[:, 0]
dst_rows *= 1.5
dst_rows -= 1.5 * 50
dst = np.vstack([dst_cols, dst_rows]).T
tform = PiecewiseAffineTransform()
tform.estimate(src, dst)
output_shape = (image.shape[0] - 1.5 * 50, image.shape[1])
out = warp(image, tform, output_shape=output_shape)
plt.imshow(out)
plt.show()
|
<commit_before><commit_msg>Add example script for piecewise affine transform<commit_after>"""
===============================
Piecewise Affine Transformation
===============================
This example shows how to use the Piecewise Affine Transformation.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import PiecewiseAffineTransform, warp
from skimage import data
image = data.lena()
rows, cols = image.shape[0], image.shape[1]
src_cols = np.linspace(0, cols, 20)
src_rows = np.linspace(0, rows, 20)
src_rows, src_cols = np.meshgrid(src_rows, src_cols)
src = np.dstack([src_cols.flat, src_rows.flat])[0]
# add sinusoidal oscillation to row coordinates
dst_rows = src[:, 1] - np.sin(np.linspace(0, 3 * np.pi, src.shape[0])) * 50
dst_cols = src[:, 0]
dst_rows *= 1.5
dst_rows -= 1.5 * 50
dst = np.vstack([dst_cols, dst_rows]).T
tform = PiecewiseAffineTransform()
tform.estimate(src, dst)
output_shape = (image.shape[0] - 1.5 * 50, image.shape[1])
out = warp(image, tform, output_shape=output_shape)
plt.imshow(out)
plt.show()
|
|
e85dd9773b6c76a0b437bee182d2aafedd3ead8d
|
cenaming/us_states.py
|
cenaming/us_states.py
|
STATE_NAMES = (
'Alabama',
'Alaska',
'Arizona',
'Arkansas',
'California',
'Colorado',
'Connecticut',
'Delaware',
'Florida',
'Georgia',
'Hawaii',
'Idaho',
'Illinois',
'Indiana',
'Iowa',
'Kansas',
'Kentucky',
'Louisiana',
'Maine',
'Maryland',
'Massachusetts',
'Michigan',
'Minnesota',
'Mississippi',
'Missouri',
'Montana',
'Nebraska',
'Nevada',
'New Hampshire',
'New Jersey',
'New Mexico',
'New York',
'North Carolina',
'North Dakota',
'Ohio',
'Oklahoma',
'Oregon',
'Pennsylvania',
'Rhode Island',
'South Carolina',
'South Dakota',
'Tennessee',
'Texas',
'Utah',
'Vermont',
'Virginia',
'Washington',
'West Virginia',
'Wisconsin',
'Wyoming'
)
|
Add list of US state names.
|
Add list of US state names.
|
Python
|
mit
|
portfoliome/cenaming
|
Add list of US state names.
|
STATE_NAMES = (
'Alabama',
'Alaska',
'Arizona',
'Arkansas',
'California',
'Colorado',
'Connecticut',
'Delaware',
'Florida',
'Georgia',
'Hawaii',
'Idaho',
'Illinois',
'Indiana',
'Iowa',
'Kansas',
'Kentucky',
'Louisiana',
'Maine',
'Maryland',
'Massachusetts',
'Michigan',
'Minnesota',
'Mississippi',
'Missouri',
'Montana',
'Nebraska',
'Nevada',
'New Hampshire',
'New Jersey',
'New Mexico',
'New York',
'North Carolina',
'North Dakota',
'Ohio',
'Oklahoma',
'Oregon',
'Pennsylvania',
'Rhode Island',
'South Carolina',
'South Dakota',
'Tennessee',
'Texas',
'Utah',
'Vermont',
'Virginia',
'Washington',
'West Virginia',
'Wisconsin',
'Wyoming'
)
|
<commit_before><commit_msg>Add list of US state names.<commit_after>
|
STATE_NAMES = (
'Alabama',
'Alaska',
'Arizona',
'Arkansas',
'California',
'Colorado',
'Connecticut',
'Delaware',
'Florida',
'Georgia',
'Hawaii',
'Idaho',
'Illinois',
'Indiana',
'Iowa',
'Kansas',
'Kentucky',
'Louisiana',
'Maine',
'Maryland',
'Massachusetts',
'Michigan',
'Minnesota',
'Mississippi',
'Missouri',
'Montana',
'Nebraska',
'Nevada',
'New Hampshire',
'New Jersey',
'New Mexico',
'New York',
'North Carolina',
'North Dakota',
'Ohio',
'Oklahoma',
'Oregon',
'Pennsylvania',
'Rhode Island',
'South Carolina',
'South Dakota',
'Tennessee',
'Texas',
'Utah',
'Vermont',
'Virginia',
'Washington',
'West Virginia',
'Wisconsin',
'Wyoming'
)
|
Add list of US state names.
STATE_NAMES = (
'Alabama',
'Alaska',
'Arizona',
'Arkansas',
'California',
'Colorado',
'Connecticut',
'Delaware',
'Florida',
'Georgia',
'Hawaii',
'Idaho',
'Illinois',
'Indiana',
'Iowa',
'Kansas',
'Kentucky',
'Louisiana',
'Maine',
'Maryland',
'Massachusetts',
'Michigan',
'Minnesota',
'Mississippi',
'Missouri',
'Montana',
'Nebraska',
'Nevada',
'New Hampshire',
'New Jersey',
'New Mexico',
'New York',
'North Carolina',
'North Dakota',
'Ohio',
'Oklahoma',
'Oregon',
'Pennsylvania',
'Rhode Island',
'South Carolina',
'South Dakota',
'Tennessee',
'Texas',
'Utah',
'Vermont',
'Virginia',
'Washington',
'West Virginia',
'Wisconsin',
'Wyoming'
)
|
<commit_before><commit_msg>Add list of US state names.<commit_after>
STATE_NAMES = (
'Alabama',
'Alaska',
'Arizona',
'Arkansas',
'California',
'Colorado',
'Connecticut',
'Delaware',
'Florida',
'Georgia',
'Hawaii',
'Idaho',
'Illinois',
'Indiana',
'Iowa',
'Kansas',
'Kentucky',
'Louisiana',
'Maine',
'Maryland',
'Massachusetts',
'Michigan',
'Minnesota',
'Mississippi',
'Missouri',
'Montana',
'Nebraska',
'Nevada',
'New Hampshire',
'New Jersey',
'New Mexico',
'New York',
'North Carolina',
'North Dakota',
'Ohio',
'Oklahoma',
'Oregon',
'Pennsylvania',
'Rhode Island',
'South Carolina',
'South Dakota',
'Tennessee',
'Texas',
'Utah',
'Vermont',
'Virginia',
'Washington',
'West Virginia',
'Wisconsin',
'Wyoming'
)
|
|
03edf22323d78989210a4cbefda7597158d33b42
|
fire_rs/planning/front_observation.py
|
fire_rs/planning/front_observation.py
|
import logging
import numpy as np
import matplotlib.pyplot as plt
from fire_rs.geodata import environment
import fire_rs.firemodel.propagation as propagation
import fire_rs.display
import fire_rs.planning.observation_path_search
def burn(area_bounds, ignition_point, wind):
"""Burn some area from an ignition point with given wind conditions"""
env = propagation.Environment(area_bounds, wind_speed=wind[0], wind_dir=wind[1])
ignition_map = propagation.propagate(env, *ignition_point)
return ignition_map
def main():
area = [[530000.0, 535000.0], [6230000.0, 6235000.0]]
ignition_point = (100, 100)
area_wind = (10, np.pi)
ignition_times = burn(area, ignition_point, area_wind)
world = environment.World()
some_area = world.get_elevation(area)
x = np.arange(some_area.data.shape[0])
x = (x * some_area.cell_width) + some_area.x_offset
y = np.arange(some_area.data.shape[1])
y = (y * some_area.cell_height) + some_area.y_offset
X, Y = np.meshgrid(x, y)
figure, axis = fire_rs.display.get_default_figure_and_axis()
fire_rs.display.plot_firefront_contour(axis, X, Y, ignition_times.data['ignition']/60, nfronts=50)
print("")
for low, high in [[60, 61], [70, 71], [100, 101]]:
ignition_array = ignition_times.data['ignition']/60
low_bound = ignition_array>low
up_bound = ignition_array<high
valid_points = low_bound & up_bound
selected_points = []
for hor in range(len(x)):
for ver in range(len(y)):
if valid_points[hor, ver]:
selected_points.append([hor, ver])
selected_points = np.array(selected_points)
try:
fire_rs.planning.observation_path_search.process_points(selected_points, (10,10), eps=5)
except ValueError:
logging.exception("")
if __name__ == '__main__':
main()
|
Test the regression path planning on simulated fire fronts
|
Test the regression path planning on simulated fire fronts
|
Python
|
bsd-2-clause
|
fire-rs-laas/fire-rs-saop,fire-rs-laas/fire-rs-saop,fire-rs-laas/fire-rs-saop,fire-rs-laas/fire-rs-saop
|
Test the regression path planning on simulated fire fronts
|
import logging
import numpy as np
import matplotlib.pyplot as plt
from fire_rs.geodata import environment
import fire_rs.firemodel.propagation as propagation
import fire_rs.display
import fire_rs.planning.observation_path_search
def burn(area_bounds, ignition_point, wind):
"""Burn some area from an ignition point with given wind conditions"""
env = propagation.Environment(area_bounds, wind_speed=wind[0], wind_dir=wind[1])
ignition_map = propagation.propagate(env, *ignition_point)
return ignition_map
def main():
area = [[530000.0, 535000.0], [6230000.0, 6235000.0]]
ignition_point = (100, 100)
area_wind = (10, np.pi)
ignition_times = burn(area, ignition_point, area_wind)
world = environment.World()
some_area = world.get_elevation(area)
x = np.arange(some_area.data.shape[0])
x = (x * some_area.cell_width) + some_area.x_offset
y = np.arange(some_area.data.shape[1])
y = (y * some_area.cell_height) + some_area.y_offset
X, Y = np.meshgrid(x, y)
figure, axis = fire_rs.display.get_default_figure_and_axis()
fire_rs.display.plot_firefront_contour(axis, X, Y, ignition_times.data['ignition']/60, nfronts=50)
print("")
for low, high in [[60, 61], [70, 71], [100, 101]]:
ignition_array = ignition_times.data['ignition']/60
low_bound = ignition_array>low
up_bound = ignition_array<high
valid_points = low_bound & up_bound
selected_points = []
for hor in range(len(x)):
for ver in range(len(y)):
if valid_points[hor, ver]:
selected_points.append([hor, ver])
selected_points = np.array(selected_points)
try:
fire_rs.planning.observation_path_search.process_points(selected_points, (10,10), eps=5)
except ValueError:
logging.exception("")
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Test the regression path planning on simulated fire fronts<commit_after>
|
import logging
import numpy as np
import matplotlib.pyplot as plt
from fire_rs.geodata import environment
import fire_rs.firemodel.propagation as propagation
import fire_rs.display
import fire_rs.planning.observation_path_search
def burn(area_bounds, ignition_point, wind):
"""Burn some area from an ignition point with given wind conditions"""
env = propagation.Environment(area_bounds, wind_speed=wind[0], wind_dir=wind[1])
ignition_map = propagation.propagate(env, *ignition_point)
return ignition_map
def main():
area = [[530000.0, 535000.0], [6230000.0, 6235000.0]]
ignition_point = (100, 100)
area_wind = (10, np.pi)
ignition_times = burn(area, ignition_point, area_wind)
world = environment.World()
some_area = world.get_elevation(area)
x = np.arange(some_area.data.shape[0])
x = (x * some_area.cell_width) + some_area.x_offset
y = np.arange(some_area.data.shape[1])
y = (y * some_area.cell_height) + some_area.y_offset
X, Y = np.meshgrid(x, y)
figure, axis = fire_rs.display.get_default_figure_and_axis()
fire_rs.display.plot_firefront_contour(axis, X, Y, ignition_times.data['ignition']/60, nfronts=50)
print("")
for low, high in [[60, 61], [70, 71], [100, 101]]:
ignition_array = ignition_times.data['ignition']/60
low_bound = ignition_array>low
up_bound = ignition_array<high
valid_points = low_bound & up_bound
selected_points = []
for hor in range(len(x)):
for ver in range(len(y)):
if valid_points[hor, ver]:
selected_points.append([hor, ver])
selected_points = np.array(selected_points)
try:
fire_rs.planning.observation_path_search.process_points(selected_points, (10,10), eps=5)
except ValueError:
logging.exception("")
if __name__ == '__main__':
main()
|
Test the regression path planning on simulated fire frontsimport logging
import numpy as np
import matplotlib.pyplot as plt
from fire_rs.geodata import environment
import fire_rs.firemodel.propagation as propagation
import fire_rs.display
import fire_rs.planning.observation_path_search
def burn(area_bounds, ignition_point, wind):
"""Burn some area from an ignition point with given wind conditions"""
env = propagation.Environment(area_bounds, wind_speed=wind[0], wind_dir=wind[1])
ignition_map = propagation.propagate(env, *ignition_point)
return ignition_map
def main():
area = [[530000.0, 535000.0], [6230000.0, 6235000.0]]
ignition_point = (100, 100)
area_wind = (10, np.pi)
ignition_times = burn(area, ignition_point, area_wind)
world = environment.World()
some_area = world.get_elevation(area)
x = np.arange(some_area.data.shape[0])
x = (x * some_area.cell_width) + some_area.x_offset
y = np.arange(some_area.data.shape[1])
y = (y * some_area.cell_height) + some_area.y_offset
X, Y = np.meshgrid(x, y)
figure, axis = fire_rs.display.get_default_figure_and_axis()
fire_rs.display.plot_firefront_contour(axis, X, Y, ignition_times.data['ignition']/60, nfronts=50)
print("")
for low, high in [[60, 61], [70, 71], [100, 101]]:
ignition_array = ignition_times.data['ignition']/60
low_bound = ignition_array>low
up_bound = ignition_array<high
valid_points = low_bound & up_bound
selected_points = []
for hor in range(len(x)):
for ver in range(len(y)):
if valid_points[hor, ver]:
selected_points.append([hor, ver])
selected_points = np.array(selected_points)
try:
fire_rs.planning.observation_path_search.process_points(selected_points, (10,10), eps=5)
except ValueError:
logging.exception("")
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Test the regression path planning on simulated fire fronts<commit_after>import logging
import numpy as np
import matplotlib.pyplot as plt
from fire_rs.geodata import environment
import fire_rs.firemodel.propagation as propagation
import fire_rs.display
import fire_rs.planning.observation_path_search
def burn(area_bounds, ignition_point, wind):
"""Burn some area from an ignition point with given wind conditions"""
env = propagation.Environment(area_bounds, wind_speed=wind[0], wind_dir=wind[1])
ignition_map = propagation.propagate(env, *ignition_point)
return ignition_map
def main():
area = [[530000.0, 535000.0], [6230000.0, 6235000.0]]
ignition_point = (100, 100)
area_wind = (10, np.pi)
ignition_times = burn(area, ignition_point, area_wind)
world = environment.World()
some_area = world.get_elevation(area)
x = np.arange(some_area.data.shape[0])
x = (x * some_area.cell_width) + some_area.x_offset
y = np.arange(some_area.data.shape[1])
y = (y * some_area.cell_height) + some_area.y_offset
X, Y = np.meshgrid(x, y)
figure, axis = fire_rs.display.get_default_figure_and_axis()
fire_rs.display.plot_firefront_contour(axis, X, Y, ignition_times.data['ignition']/60, nfronts=50)
print("")
for low, high in [[60, 61], [70, 71], [100, 101]]:
ignition_array = ignition_times.data['ignition']/60
low_bound = ignition_array>low
up_bound = ignition_array<high
valid_points = low_bound & up_bound
selected_points = []
for hor in range(len(x)):
for ver in range(len(y)):
if valid_points[hor, ver]:
selected_points.append([hor, ver])
selected_points = np.array(selected_points)
try:
fire_rs.planning.observation_path_search.process_points(selected_points, (10,10), eps=5)
except ValueError:
logging.exception("")
if __name__ == '__main__':
main()
|
|
6e5460b61f0b12e913b12d609c87df3f8e3e8afb
|
froide/foirequest/migrations/0041_auto_20191024_2025.py
|
froide/foirequest/migrations/0041_auto_20191024_2025.py
|
# Generated by Django 2.2.2 on 2019-10-24 18:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('foirequest', '0040_auto_20190718_1815'),
]
operations = [
migrations.AlterField(
model_name='foimessage',
name='kind',
field=models.CharField(choices=[('email', 'Email'), ('post', 'Postal mail'), ('fax', 'Fax'), ('upload', 'Upload'), ('phone', 'Phone call'), ('visit', 'Personal visit')], default='email', max_length=10),
),
]
|
Add upload to foimessage kind choices
|
Add upload to foimessage kind choices
|
Python
|
mit
|
fin/froide,fin/froide,stefanw/froide,stefanw/froide,stefanw/froide,stefanw/froide,fin/froide,fin/froide,stefanw/froide
|
Add upload to foimessage kind choices
|
# Generated by Django 2.2.2 on 2019-10-24 18:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('foirequest', '0040_auto_20190718_1815'),
]
operations = [
migrations.AlterField(
model_name='foimessage',
name='kind',
field=models.CharField(choices=[('email', 'Email'), ('post', 'Postal mail'), ('fax', 'Fax'), ('upload', 'Upload'), ('phone', 'Phone call'), ('visit', 'Personal visit')], default='email', max_length=10),
),
]
|
<commit_before><commit_msg>Add upload to foimessage kind choices<commit_after>
|
# Generated by Django 2.2.2 on 2019-10-24 18:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('foirequest', '0040_auto_20190718_1815'),
]
operations = [
migrations.AlterField(
model_name='foimessage',
name='kind',
field=models.CharField(choices=[('email', 'Email'), ('post', 'Postal mail'), ('fax', 'Fax'), ('upload', 'Upload'), ('phone', 'Phone call'), ('visit', 'Personal visit')], default='email', max_length=10),
),
]
|
Add upload to foimessage kind choices# Generated by Django 2.2.2 on 2019-10-24 18:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('foirequest', '0040_auto_20190718_1815'),
]
operations = [
migrations.AlterField(
model_name='foimessage',
name='kind',
field=models.CharField(choices=[('email', 'Email'), ('post', 'Postal mail'), ('fax', 'Fax'), ('upload', 'Upload'), ('phone', 'Phone call'), ('visit', 'Personal visit')], default='email', max_length=10),
),
]
|
<commit_before><commit_msg>Add upload to foimessage kind choices<commit_after># Generated by Django 2.2.2 on 2019-10-24 18:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('foirequest', '0040_auto_20190718_1815'),
]
operations = [
migrations.AlterField(
model_name='foimessage',
name='kind',
field=models.CharField(choices=[('email', 'Email'), ('post', 'Postal mail'), ('fax', 'Fax'), ('upload', 'Upload'), ('phone', 'Phone call'), ('visit', 'Personal visit')], default='email', max_length=10),
),
]
|
|
9f05f7f9367c89d5fd2073adb3969f2b010bdf1f
|
tools/id4glossary.py
|
tools/id4glossary.py
|
#!/usr/bin/env python
"""Pandoc filter to convert add ids to glossary entries.
Usage:
pandoc source.md --filter=id4glossary.py --output=output.html
"""
import pandocfilters as pf
def normalize_keyword(keyword):
"""Normalize keyword for became id
- Replace white space with '-'
- Convert to lowercase"""
return keyword.lower().replace(' ', '-')
def keyword2html(keyword_node):
"""Return HTML version of keyword with id."""
keyword = ' '.join([word['c'] for word in keyword_node if word['t'] == 'Str'])
id = normalize_keyword(keyword)
return [{"t": "Span",
"c": [[id, [],[]],
keyword_node]}]
def id4glossary(key, value, format, meta):
"""Add id to keywords at glossary."""
if "subtitle" in meta and \
''.join([string['c'] for string in meta["subtitle"]['c']]) == 'Reference':
if key == "DefinitionList":
for definition in value:
definition[0] = keyword2html(definition[0])
return {"t": key,
"c": value}
if __name__ == '__main__':
pf.toJSONFilter(id4glossary)
|
Add id to glossary entry
|
Add id to glossary entry
This is needed to enable users jump to glossary entries.
|
Python
|
mit
|
RobIsaTeam/courses,easyreporting/fmri_reporting,easyreporting/fmri_reporting,easyreporting/fmri_reporting,RobIsaTeam/courses,easyreporting/fmri_reporting,easyreporting/fmri_reporting,easyreporting/fmri_reporting
|
Add id to glossary entry
This is needed to enable users jump to glossary entries.
|
#!/usr/bin/env python
"""Pandoc filter to convert add ids to glossary entries.
Usage:
pandoc source.md --filter=id4glossary.py --output=output.html
"""
import pandocfilters as pf
def normalize_keyword(keyword):
"""Normalize keyword for became id
- Replace white space with '-'
- Convert to lowercase"""
return keyword.lower().replace(' ', '-')
def keyword2html(keyword_node):
"""Return HTML version of keyword with id."""
keyword = ' '.join([word['c'] for word in keyword_node if word['t'] == 'Str'])
id = normalize_keyword(keyword)
return [{"t": "Span",
"c": [[id, [],[]],
keyword_node]}]
def id4glossary(key, value, format, meta):
"""Add id to keywords at glossary."""
if "subtitle" in meta and \
''.join([string['c'] for string in meta["subtitle"]['c']]) == 'Reference':
if key == "DefinitionList":
for definition in value:
definition[0] = keyword2html(definition[0])
return {"t": key,
"c": value}
if __name__ == '__main__':
pf.toJSONFilter(id4glossary)
|
<commit_before><commit_msg>Add id to glossary entry
This is needed to enable users jump to glossary entries.<commit_after>
|
#!/usr/bin/env python
"""Pandoc filter to convert add ids to glossary entries.
Usage:
pandoc source.md --filter=id4glossary.py --output=output.html
"""
import pandocfilters as pf
def normalize_keyword(keyword):
"""Normalize keyword for became id
- Replace white space with '-'
- Convert to lowercase"""
return keyword.lower().replace(' ', '-')
def keyword2html(keyword_node):
"""Return HTML version of keyword with id."""
keyword = ' '.join([word['c'] for word in keyword_node if word['t'] == 'Str'])
id = normalize_keyword(keyword)
return [{"t": "Span",
"c": [[id, [],[]],
keyword_node]}]
def id4glossary(key, value, format, meta):
"""Add id to keywords at glossary."""
if "subtitle" in meta and \
''.join([string['c'] for string in meta["subtitle"]['c']]) == 'Reference':
if key == "DefinitionList":
for definition in value:
definition[0] = keyword2html(definition[0])
return {"t": key,
"c": value}
if __name__ == '__main__':
pf.toJSONFilter(id4glossary)
|
Add id to glossary entry
This is needed to enable users jump to glossary entries.#!/usr/bin/env python
"""Pandoc filter to convert add ids to glossary entries.
Usage:
pandoc source.md --filter=id4glossary.py --output=output.html
"""
import pandocfilters as pf
def normalize_keyword(keyword):
"""Normalize keyword for became id
- Replace white space with '-'
- Convert to lowercase"""
return keyword.lower().replace(' ', '-')
def keyword2html(keyword_node):
"""Return HTML version of keyword with id."""
keyword = ' '.join([word['c'] for word in keyword_node if word['t'] == 'Str'])
id = normalize_keyword(keyword)
return [{"t": "Span",
"c": [[id, [],[]],
keyword_node]}]
def id4glossary(key, value, format, meta):
"""Add id to keywords at glossary."""
if "subtitle" in meta and \
''.join([string['c'] for string in meta["subtitle"]['c']]) == 'Reference':
if key == "DefinitionList":
for definition in value:
definition[0] = keyword2html(definition[0])
return {"t": key,
"c": value}
if __name__ == '__main__':
pf.toJSONFilter(id4glossary)
|
<commit_before><commit_msg>Add id to glossary entry
This is needed to enable users jump to glossary entries.<commit_after>#!/usr/bin/env python
"""Pandoc filter to convert add ids to glossary entries.
Usage:
pandoc source.md --filter=id4glossary.py --output=output.html
"""
import pandocfilters as pf
def normalize_keyword(keyword):
"""Normalize keyword for became id
- Replace white space with '-'
- Convert to lowercase"""
return keyword.lower().replace(' ', '-')
def keyword2html(keyword_node):
"""Return HTML version of keyword with id."""
keyword = ' '.join([word['c'] for word in keyword_node if word['t'] == 'Str'])
id = normalize_keyword(keyword)
return [{"t": "Span",
"c": [[id, [],[]],
keyword_node]}]
def id4glossary(key, value, format, meta):
"""Add id to keywords at glossary."""
if "subtitle" in meta and \
''.join([string['c'] for string in meta["subtitle"]['c']]) == 'Reference':
if key == "DefinitionList":
for definition in value:
definition[0] = keyword2html(definition[0])
return {"t": key,
"c": value}
if __name__ == '__main__':
pf.toJSONFilter(id4glossary)
|
|
655a18258d13c00e39e1f0cbf9369215d5d084ae
|
parse_setup_data.py
|
parse_setup_data.py
|
#!/usr/bin/env python
# Copyright (C) 2014 Instituto Nokia de Tecnologia - INdT
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import print_function
import re
import argparse
from binascii import crc_hqx
from pprint import pprint
import struct
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parse nRF8001 setup data.")
parser.add_argument("report", type=file,
help="report generated by nRFgoStudio (ublue_setup.gen.out.txt)")
args = parser.parse_args()
setup_data = None
crc = 0xffff
for line in args.report.readlines():
if line.strip() == "[Setup Data]":
setup_data = {}
if not line.strip() or setup_data is None:
continue
if not re.match("^[0-9A-F-]+$", line.strip()):
continue
data = line.strip().split("-")
assert int(data[0], 16) == len(data[1:])
# Opcode: Setup (0x06)
assert data[1] == '06'
target = int(data[2], 16)
if not setup_data.get(target):
setup_data[target] = ""
setup_data[target] += "".join(data[3:])
if target == 0xf0:
# Remove existing CRC when calculating new CRC
data = data[:-2]
crc = crc_hqx("".join(data).decode("hex"), crc)
# Check CRC
expected_crc = struct.unpack(">H",
"".join(setup_data[0xf0].decode("hex")[-2:]))[0]
assert crc == expected_crc
print("Setup Data:")
pprint(setup_data)
|
Add initial script for parsing Setup Data packets
|
Add initial script for parsing Setup Data packets
|
Python
|
mit
|
lizardo/nrf8001,lizardo/nrf8001,lizardo/nrf8001
|
Add initial script for parsing Setup Data packets
|
#!/usr/bin/env python
# Copyright (C) 2014 Instituto Nokia de Tecnologia - INdT
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import print_function
import re
import argparse
from binascii import crc_hqx
from pprint import pprint
import struct
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parse nRF8001 setup data.")
parser.add_argument("report", type=file,
help="report generated by nRFgoStudio (ublue_setup.gen.out.txt)")
args = parser.parse_args()
setup_data = None
crc = 0xffff
for line in args.report.readlines():
if line.strip() == "[Setup Data]":
setup_data = {}
if not line.strip() or setup_data is None:
continue
if not re.match("^[0-9A-F-]+$", line.strip()):
continue
data = line.strip().split("-")
assert int(data[0], 16) == len(data[1:])
# Opcode: Setup (0x06)
assert data[1] == '06'
target = int(data[2], 16)
if not setup_data.get(target):
setup_data[target] = ""
setup_data[target] += "".join(data[3:])
if target == 0xf0:
# Remove existing CRC when calculating new CRC
data = data[:-2]
crc = crc_hqx("".join(data).decode("hex"), crc)
# Check CRC
expected_crc = struct.unpack(">H",
"".join(setup_data[0xf0].decode("hex")[-2:]))[0]
assert crc == expected_crc
print("Setup Data:")
pprint(setup_data)
|
<commit_before><commit_msg>Add initial script for parsing Setup Data packets<commit_after>
|
#!/usr/bin/env python
# Copyright (C) 2014 Instituto Nokia de Tecnologia - INdT
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import print_function
import re
import argparse
from binascii import crc_hqx
from pprint import pprint
import struct
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parse nRF8001 setup data.")
parser.add_argument("report", type=file,
help="report generated by nRFgoStudio (ublue_setup.gen.out.txt)")
args = parser.parse_args()
setup_data = None
crc = 0xffff
for line in args.report.readlines():
if line.strip() == "[Setup Data]":
setup_data = {}
if not line.strip() or setup_data is None:
continue
if not re.match("^[0-9A-F-]+$", line.strip()):
continue
data = line.strip().split("-")
assert int(data[0], 16) == len(data[1:])
# Opcode: Setup (0x06)
assert data[1] == '06'
target = int(data[2], 16)
if not setup_data.get(target):
setup_data[target] = ""
setup_data[target] += "".join(data[3:])
if target == 0xf0:
# Remove existing CRC when calculating new CRC
data = data[:-2]
crc = crc_hqx("".join(data).decode("hex"), crc)
# Check CRC
expected_crc = struct.unpack(">H",
"".join(setup_data[0xf0].decode("hex")[-2:]))[0]
assert crc == expected_crc
print("Setup Data:")
pprint(setup_data)
|
Add initial script for parsing Setup Data packets#!/usr/bin/env python
# Copyright (C) 2014 Instituto Nokia de Tecnologia - INdT
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import print_function
import re
import argparse
from binascii import crc_hqx
from pprint import pprint
import struct
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parse nRF8001 setup data.")
parser.add_argument("report", type=file,
help="report generated by nRFgoStudio (ublue_setup.gen.out.txt)")
args = parser.parse_args()
setup_data = None
crc = 0xffff
for line in args.report.readlines():
if line.strip() == "[Setup Data]":
setup_data = {}
if not line.strip() or setup_data is None:
continue
if not re.match("^[0-9A-F-]+$", line.strip()):
continue
data = line.strip().split("-")
assert int(data[0], 16) == len(data[1:])
# Opcode: Setup (0x06)
assert data[1] == '06'
target = int(data[2], 16)
if not setup_data.get(target):
setup_data[target] = ""
setup_data[target] += "".join(data[3:])
if target == 0xf0:
# Remove existing CRC when calculating new CRC
data = data[:-2]
crc = crc_hqx("".join(data).decode("hex"), crc)
# Check CRC
expected_crc = struct.unpack(">H",
"".join(setup_data[0xf0].decode("hex")[-2:]))[0]
assert crc == expected_crc
print("Setup Data:")
pprint(setup_data)
|
<commit_before><commit_msg>Add initial script for parsing Setup Data packets<commit_after>#!/usr/bin/env python
# Copyright (C) 2014 Instituto Nokia de Tecnologia - INdT
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import print_function
import re
import argparse
from binascii import crc_hqx
from pprint import pprint
import struct
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parse nRF8001 setup data.")
parser.add_argument("report", type=file,
help="report generated by nRFgoStudio (ublue_setup.gen.out.txt)")
args = parser.parse_args()
setup_data = None
crc = 0xffff
for line in args.report.readlines():
if line.strip() == "[Setup Data]":
setup_data = {}
if not line.strip() or setup_data is None:
continue
if not re.match("^[0-9A-F-]+$", line.strip()):
continue
data = line.strip().split("-")
assert int(data[0], 16) == len(data[1:])
# Opcode: Setup (0x06)
assert data[1] == '06'
target = int(data[2], 16)
if not setup_data.get(target):
setup_data[target] = ""
setup_data[target] += "".join(data[3:])
if target == 0xf0:
# Remove existing CRC when calculating new CRC
data = data[:-2]
crc = crc_hqx("".join(data).decode("hex"), crc)
# Check CRC
expected_crc = struct.unpack(">H",
"".join(setup_data[0xf0].decode("hex")[-2:]))[0]
assert crc == expected_crc
print("Setup Data:")
pprint(setup_data)
|
|
6ce12955861c1f586d69dedffc014a1d5bab5432
|
util/item_name_gen.py
|
util/item_name_gen.py
|
'''Script to help generate item names.'''
def int_to_str(num, alphabet):
'''Convert integer to string.'''
# http://stackoverflow.com/a/1119769/1524507
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
def main():
start_num = 0
end_num = 868128192
pics_per_item = 100
alphabet = '0123456789abcdefghijklmnopqrstuvwxyz'
counter = start_num
while True:
lower = counter
upper = min(counter + pics_per_item - 1, end_num)
print('image:{0}:{1}'.format(
int_to_str(lower, alphabet),
int_to_str(upper, alphabet)
))
counter += pics_per_item
if counter > end_num:
break
if __name__ == '__main__':
main()
|
Add item name gen script
|
Add item name gen script
|
Python
|
unlicense
|
ArchiveTeam/twitpic-grab2,ArchiveTeam/twitpic-grab2
|
Add item name gen script
|
'''Script to help generate item names.'''
def int_to_str(num, alphabet):
'''Convert integer to string.'''
# http://stackoverflow.com/a/1119769/1524507
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
def main():
start_num = 0
end_num = 868128192
pics_per_item = 100
alphabet = '0123456789abcdefghijklmnopqrstuvwxyz'
counter = start_num
while True:
lower = counter
upper = min(counter + pics_per_item - 1, end_num)
print('image:{0}:{1}'.format(
int_to_str(lower, alphabet),
int_to_str(upper, alphabet)
))
counter += pics_per_item
if counter > end_num:
break
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add item name gen script<commit_after>
|
'''Script to help generate item names.'''
def int_to_str(num, alphabet):
'''Convert integer to string.'''
# http://stackoverflow.com/a/1119769/1524507
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
def main():
start_num = 0
end_num = 868128192
pics_per_item = 100
alphabet = '0123456789abcdefghijklmnopqrstuvwxyz'
counter = start_num
while True:
lower = counter
upper = min(counter + pics_per_item - 1, end_num)
print('image:{0}:{1}'.format(
int_to_str(lower, alphabet),
int_to_str(upper, alphabet)
))
counter += pics_per_item
if counter > end_num:
break
if __name__ == '__main__':
main()
|
Add item name gen script'''Script to help generate item names.'''
def int_to_str(num, alphabet):
'''Convert integer to string.'''
# http://stackoverflow.com/a/1119769/1524507
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
def main():
start_num = 0
end_num = 868128192
pics_per_item = 100
alphabet = '0123456789abcdefghijklmnopqrstuvwxyz'
counter = start_num
while True:
lower = counter
upper = min(counter + pics_per_item - 1, end_num)
print('image:{0}:{1}'.format(
int_to_str(lower, alphabet),
int_to_str(upper, alphabet)
))
counter += pics_per_item
if counter > end_num:
break
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add item name gen script<commit_after>'''Script to help generate item names.'''
def int_to_str(num, alphabet):
'''Convert integer to string.'''
# http://stackoverflow.com/a/1119769/1524507
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
def main():
start_num = 0
end_num = 868128192
pics_per_item = 100
alphabet = '0123456789abcdefghijklmnopqrstuvwxyz'
counter = start_num
while True:
lower = counter
upper = min(counter + pics_per_item - 1, end_num)
print('image:{0}:{1}'.format(
int_to_str(lower, alphabet),
int_to_str(upper, alphabet)
))
counter += pics_per_item
if counter > end_num:
break
if __name__ == '__main__':
main()
|
|
65f8b529af8672ac074d4c45684d8e0f7baaab2c
|
ideascube/conf/kb_jor_croixrouge.py
|
ideascube/conf/kb_jor_croixrouge.py
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'ar'
IDEASCUBE_NAME = 'Red Cross'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'wikipedia',
'languages': ['ar', 'en']
},
{
'id': 'khanacademy',
},
{
'id': 'wiktionary',
'languages': ['en', 'ar']
},
{
'id': 'wikisource',
'languages': ['ar', 'en']
},
{
'id': 'vikidia',
'languages': ['en']
},
{
'id': 'gutenberg',
'lang': 'en',
},
{
'id': 'ted',
'sessions': [
('tedbusiness.en', 'Business'),
('teddesign.en', 'Design'),
('tedentertainment.en', 'Entertainment'),
('tedglobalissues.en', 'Global Issues'),
('tedscience.en', 'Science'),
('tedtechnology.en', 'Technology'),
]
},
{
'id': 'wikiquote',
'languages': ['ar', 'en']
},
{
'id': 'wikibooks',
'languages': ['ar', 'en']
},
{
'id': 'wikiversity',
'languages': ['en', 'ar']
},
{
'id': 'bil-tunisia',
'languages': ['ar']
},
]
|
Add conf for Red Cross Jordanie
|
Add conf for Red Cross Jordanie
|
Python
|
agpl-3.0
|
ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube
|
Add conf for Red Cross Jordanie
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'ar'
IDEASCUBE_NAME = 'Red Cross'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'wikipedia',
'languages': ['ar', 'en']
},
{
'id': 'khanacademy',
},
{
'id': 'wiktionary',
'languages': ['en', 'ar']
},
{
'id': 'wikisource',
'languages': ['ar', 'en']
},
{
'id': 'vikidia',
'languages': ['en']
},
{
'id': 'gutenberg',
'lang': 'en',
},
{
'id': 'ted',
'sessions': [
('tedbusiness.en', 'Business'),
('teddesign.en', 'Design'),
('tedentertainment.en', 'Entertainment'),
('tedglobalissues.en', 'Global Issues'),
('tedscience.en', 'Science'),
('tedtechnology.en', 'Technology'),
]
},
{
'id': 'wikiquote',
'languages': ['ar', 'en']
},
{
'id': 'wikibooks',
'languages': ['ar', 'en']
},
{
'id': 'wikiversity',
'languages': ['en', 'ar']
},
{
'id': 'bil-tunisia',
'languages': ['ar']
},
]
|
<commit_before><commit_msg>Add conf for Red Cross Jordanie<commit_after>
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'ar'
IDEASCUBE_NAME = 'Red Cross'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'wikipedia',
'languages': ['ar', 'en']
},
{
'id': 'khanacademy',
},
{
'id': 'wiktionary',
'languages': ['en', 'ar']
},
{
'id': 'wikisource',
'languages': ['ar', 'en']
},
{
'id': 'vikidia',
'languages': ['en']
},
{
'id': 'gutenberg',
'lang': 'en',
},
{
'id': 'ted',
'sessions': [
('tedbusiness.en', 'Business'),
('teddesign.en', 'Design'),
('tedentertainment.en', 'Entertainment'),
('tedglobalissues.en', 'Global Issues'),
('tedscience.en', 'Science'),
('tedtechnology.en', 'Technology'),
]
},
{
'id': 'wikiquote',
'languages': ['ar', 'en']
},
{
'id': 'wikibooks',
'languages': ['ar', 'en']
},
{
'id': 'wikiversity',
'languages': ['en', 'ar']
},
{
'id': 'bil-tunisia',
'languages': ['ar']
},
]
|
Add conf for Red Cross Jordanie# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'ar'
IDEASCUBE_NAME = 'Red Cross'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'wikipedia',
'languages': ['ar', 'en']
},
{
'id': 'khanacademy',
},
{
'id': 'wiktionary',
'languages': ['en', 'ar']
},
{
'id': 'wikisource',
'languages': ['ar', 'en']
},
{
'id': 'vikidia',
'languages': ['en']
},
{
'id': 'gutenberg',
'lang': 'en',
},
{
'id': 'ted',
'sessions': [
('tedbusiness.en', 'Business'),
('teddesign.en', 'Design'),
('tedentertainment.en', 'Entertainment'),
('tedglobalissues.en', 'Global Issues'),
('tedscience.en', 'Science'),
('tedtechnology.en', 'Technology'),
]
},
{
'id': 'wikiquote',
'languages': ['ar', 'en']
},
{
'id': 'wikibooks',
'languages': ['ar', 'en']
},
{
'id': 'wikiversity',
'languages': ['en', 'ar']
},
{
'id': 'bil-tunisia',
'languages': ['ar']
},
]
|
<commit_before><commit_msg>Add conf for Red Cross Jordanie<commit_after># -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'ar'
IDEASCUBE_NAME = 'Red Cross'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'wikipedia',
'languages': ['ar', 'en']
},
{
'id': 'khanacademy',
},
{
'id': 'wiktionary',
'languages': ['en', 'ar']
},
{
'id': 'wikisource',
'languages': ['ar', 'en']
},
{
'id': 'vikidia',
'languages': ['en']
},
{
'id': 'gutenberg',
'lang': 'en',
},
{
'id': 'ted',
'sessions': [
('tedbusiness.en', 'Business'),
('teddesign.en', 'Design'),
('tedentertainment.en', 'Entertainment'),
('tedglobalissues.en', 'Global Issues'),
('tedscience.en', 'Science'),
('tedtechnology.en', 'Technology'),
]
},
{
'id': 'wikiquote',
'languages': ['ar', 'en']
},
{
'id': 'wikibooks',
'languages': ['ar', 'en']
},
{
'id': 'wikiversity',
'languages': ['en', 'ar']
},
{
'id': 'bil-tunisia',
'languages': ['ar']
},
]
|
|
6d255990983405fe7eb1322c9d02414c859d1e96
|
tests/app/test_openid_listener.py
|
tests/app/test_openid_listener.py
|
# -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import patch, Mock
from parameterized import parameterized
from synapse.app.federation_reader import FederationReaderServer
from tests.unittest import HomeserverTestCase
@patch("synapse.app.homeserver.KeyApiV2Resource", new=Mock())
class FederationReaderOpenIDListenerTests(HomeserverTestCase):
def make_homeserver(self, reactor, clock):
hs = self.setup_test_homeserver(
http_client=None, homeserverToUse=FederationReaderServer,
)
return hs
@parameterized.expand([
(["federation"], "auth_fail"),
([], "no_resource"),
(["openid", "federation"], "auth_fail"),
(["openid"], "auth_fail"),
])
def test_openid_listener(self, names, expectation):
"""
Test different openid listener configurations.
401 is success here since it means we hit the handler and auth failed.
"""
config = {
"port": 8080,
"bind_addresses": ["0.0.0.0"],
"resources": [{"names": names}],
}
# Listen with the config
self.hs._listen_http(config)
# Grab the resource from the site that was told to listen
site = self.reactor.tcpServers[0][1]
try:
self.resource = (
site.resource.children[b"_matrix"].children[b"federation"].children[b"v1"]
)
except KeyError:
if expectation == "no_resource":
return
raise
request, channel = self.make_request("GET", "/_matrix/federation/v1/openid/userinfo")
self.render(request)
self.assertEqual(channel.code, 401)
|
Add tests for the openid lister for FederationReaderServer
|
Add tests for the openid lister for FederationReaderServer
Check all possible variants of openid and federation listener on/off
possibilities.
Signed-off-by: Jason Robinson <73c4b53c33b2f03b4e26d9578bea93e0ed8e16b0@matrix.org>
|
Python
|
apache-2.0
|
matrix-org/synapse,matrix-org/synapse,matrix-org/synapse,matrix-org/synapse,matrix-org/synapse,matrix-org/synapse
|
Add tests for the openid lister for FederationReaderServer
Check all possible variants of openid and federation listener on/off
possibilities.
Signed-off-by: Jason Robinson <73c4b53c33b2f03b4e26d9578bea93e0ed8e16b0@matrix.org>
|
# -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import patch, Mock
from parameterized import parameterized
from synapse.app.federation_reader import FederationReaderServer
from tests.unittest import HomeserverTestCase
@patch("synapse.app.homeserver.KeyApiV2Resource", new=Mock())
class FederationReaderOpenIDListenerTests(HomeserverTestCase):
def make_homeserver(self, reactor, clock):
hs = self.setup_test_homeserver(
http_client=None, homeserverToUse=FederationReaderServer,
)
return hs
@parameterized.expand([
(["federation"], "auth_fail"),
([], "no_resource"),
(["openid", "federation"], "auth_fail"),
(["openid"], "auth_fail"),
])
def test_openid_listener(self, names, expectation):
"""
Test different openid listener configurations.
401 is success here since it means we hit the handler and auth failed.
"""
config = {
"port": 8080,
"bind_addresses": ["0.0.0.0"],
"resources": [{"names": names}],
}
# Listen with the config
self.hs._listen_http(config)
# Grab the resource from the site that was told to listen
site = self.reactor.tcpServers[0][1]
try:
self.resource = (
site.resource.children[b"_matrix"].children[b"federation"].children[b"v1"]
)
except KeyError:
if expectation == "no_resource":
return
raise
request, channel = self.make_request("GET", "/_matrix/federation/v1/openid/userinfo")
self.render(request)
self.assertEqual(channel.code, 401)
|
<commit_before><commit_msg>Add tests for the openid lister for FederationReaderServer
Check all possible variants of openid and federation listener on/off
possibilities.
Signed-off-by: Jason Robinson <73c4b53c33b2f03b4e26d9578bea93e0ed8e16b0@matrix.org><commit_after>
|
# -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import patch, Mock
from parameterized import parameterized
from synapse.app.federation_reader import FederationReaderServer
from tests.unittest import HomeserverTestCase
@patch("synapse.app.homeserver.KeyApiV2Resource", new=Mock())
class FederationReaderOpenIDListenerTests(HomeserverTestCase):
def make_homeserver(self, reactor, clock):
hs = self.setup_test_homeserver(
http_client=None, homeserverToUse=FederationReaderServer,
)
return hs
@parameterized.expand([
(["federation"], "auth_fail"),
([], "no_resource"),
(["openid", "federation"], "auth_fail"),
(["openid"], "auth_fail"),
])
def test_openid_listener(self, names, expectation):
"""
Test different openid listener configurations.
401 is success here since it means we hit the handler and auth failed.
"""
config = {
"port": 8080,
"bind_addresses": ["0.0.0.0"],
"resources": [{"names": names}],
}
# Listen with the config
self.hs._listen_http(config)
# Grab the resource from the site that was told to listen
site = self.reactor.tcpServers[0][1]
try:
self.resource = (
site.resource.children[b"_matrix"].children[b"federation"].children[b"v1"]
)
except KeyError:
if expectation == "no_resource":
return
raise
request, channel = self.make_request("GET", "/_matrix/federation/v1/openid/userinfo")
self.render(request)
self.assertEqual(channel.code, 401)
|
Add tests for the openid lister for FederationReaderServer
Check all possible variants of openid and federation listener on/off
possibilities.
Signed-off-by: Jason Robinson <73c4b53c33b2f03b4e26d9578bea93e0ed8e16b0@matrix.org># -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import patch, Mock
from parameterized import parameterized
from synapse.app.federation_reader import FederationReaderServer
from tests.unittest import HomeserverTestCase
@patch("synapse.app.homeserver.KeyApiV2Resource", new=Mock())
class FederationReaderOpenIDListenerTests(HomeserverTestCase):
def make_homeserver(self, reactor, clock):
hs = self.setup_test_homeserver(
http_client=None, homeserverToUse=FederationReaderServer,
)
return hs
@parameterized.expand([
(["federation"], "auth_fail"),
([], "no_resource"),
(["openid", "federation"], "auth_fail"),
(["openid"], "auth_fail"),
])
def test_openid_listener(self, names, expectation):
"""
Test different openid listener configurations.
401 is success here since it means we hit the handler and auth failed.
"""
config = {
"port": 8080,
"bind_addresses": ["0.0.0.0"],
"resources": [{"names": names}],
}
# Listen with the config
self.hs._listen_http(config)
# Grab the resource from the site that was told to listen
site = self.reactor.tcpServers[0][1]
try:
self.resource = (
site.resource.children[b"_matrix"].children[b"federation"].children[b"v1"]
)
except KeyError:
if expectation == "no_resource":
return
raise
request, channel = self.make_request("GET", "/_matrix/federation/v1/openid/userinfo")
self.render(request)
self.assertEqual(channel.code, 401)
|
<commit_before><commit_msg>Add tests for the openid lister for FederationReaderServer
Check all possible variants of openid and federation listener on/off
possibilities.
Signed-off-by: Jason Robinson <73c4b53c33b2f03b4e26d9578bea93e0ed8e16b0@matrix.org><commit_after># -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import patch, Mock
from parameterized import parameterized
from synapse.app.federation_reader import FederationReaderServer
from tests.unittest import HomeserverTestCase
@patch("synapse.app.homeserver.KeyApiV2Resource", new=Mock())
class FederationReaderOpenIDListenerTests(HomeserverTestCase):
def make_homeserver(self, reactor, clock):
hs = self.setup_test_homeserver(
http_client=None, homeserverToUse=FederationReaderServer,
)
return hs
@parameterized.expand([
(["federation"], "auth_fail"),
([], "no_resource"),
(["openid", "federation"], "auth_fail"),
(["openid"], "auth_fail"),
])
def test_openid_listener(self, names, expectation):
"""
Test different openid listener configurations.
401 is success here since it means we hit the handler and auth failed.
"""
config = {
"port": 8080,
"bind_addresses": ["0.0.0.0"],
"resources": [{"names": names}],
}
# Listen with the config
self.hs._listen_http(config)
# Grab the resource from the site that was told to listen
site = self.reactor.tcpServers[0][1]
try:
self.resource = (
site.resource.children[b"_matrix"].children[b"federation"].children[b"v1"]
)
except KeyError:
if expectation == "no_resource":
return
raise
request, channel = self.make_request("GET", "/_matrix/federation/v1/openid/userinfo")
self.render(request)
self.assertEqual(channel.code, 401)
|
|
a24d0fce91f3f7a261a410d859ecd4250276fa28
|
python/smqtk/tests/utils/test_image_utils.py
|
python/smqtk/tests/utils/test_image_utils.py
|
import os
import unittest
import nose.tools as ntools
from smqtk.representation.data_element.file_element import DataFileElement
from smqtk.tests import TEST_DATA_DIR
from smqtk.utils.image_utils import is_loadable_image, is_valid_element
class TestIsLoadableImage(unittest.TestCase):
def setUp(self):
self.good_image = DataFileElement(os.path.join(TEST_DATA_DIR,
'Lenna.png'))
self.non_image = DataFileElement(os.path.join(TEST_DATA_DIR,
'test_file.dat'))
@ntools.raises(AttributeError)
def test_non_data_element_raises_exception(self):
# should throw:
# AttributeError: 'bool' object has no attribute 'get_bytes'
is_loadable_image(False)
def test_unloadable_image_returns_false(self):
assert is_loadable_image(self.non_image) == False
def test_unloadable_image_logs_warning(self):
pass
def test_loadable_image_returns_true(self):
assert is_loadable_image(self.good_image) == True
class TestIsValidElement(unittest.TestCase):
def setUp(self):
self.good_image = DataFileElement(os.path.join(TEST_DATA_DIR,
'Lenna.png'))
self.non_image = DataFileElement(os.path.join(TEST_DATA_DIR,
'test_file.dat'))
def test_non_data_element(self):
assert is_valid_element(False) == False
def test_invalid_content_type(self):
# test it logs to debug
assert is_valid_element(self.good_image, valid_content_types=[]) == False
def test_valid_content_type(self):
assert is_valid_element(self.good_image,
valid_content_types=['image/png']) == True
def test_invalid_image_returns_false(self):
assert is_valid_element(self.non_image, check_image=True) == False
|
Add unit tests for image_utils
|
Add unit tests for image_utils
|
Python
|
bsd-3-clause
|
Purg/SMQTK,Purg/SMQTK,Purg/SMQTK,Purg/SMQTK,Purg/SMQTK,Purg/SMQTK,Purg/SMQTK,Purg/SMQTK,Purg/SMQTK
|
Add unit tests for image_utils
|
import os
import unittest
import nose.tools as ntools
from smqtk.representation.data_element.file_element import DataFileElement
from smqtk.tests import TEST_DATA_DIR
from smqtk.utils.image_utils import is_loadable_image, is_valid_element
class TestIsLoadableImage(unittest.TestCase):
def setUp(self):
self.good_image = DataFileElement(os.path.join(TEST_DATA_DIR,
'Lenna.png'))
self.non_image = DataFileElement(os.path.join(TEST_DATA_DIR,
'test_file.dat'))
@ntools.raises(AttributeError)
def test_non_data_element_raises_exception(self):
# should throw:
# AttributeError: 'bool' object has no attribute 'get_bytes'
is_loadable_image(False)
def test_unloadable_image_returns_false(self):
assert is_loadable_image(self.non_image) == False
def test_unloadable_image_logs_warning(self):
pass
def test_loadable_image_returns_true(self):
assert is_loadable_image(self.good_image) == True
class TestIsValidElement(unittest.TestCase):
def setUp(self):
self.good_image = DataFileElement(os.path.join(TEST_DATA_DIR,
'Lenna.png'))
self.non_image = DataFileElement(os.path.join(TEST_DATA_DIR,
'test_file.dat'))
def test_non_data_element(self):
assert is_valid_element(False) == False
def test_invalid_content_type(self):
# test it logs to debug
assert is_valid_element(self.good_image, valid_content_types=[]) == False
def test_valid_content_type(self):
assert is_valid_element(self.good_image,
valid_content_types=['image/png']) == True
def test_invalid_image_returns_false(self):
assert is_valid_element(self.non_image, check_image=True) == False
|
<commit_before><commit_msg>Add unit tests for image_utils<commit_after>
|
import os
import unittest
import nose.tools as ntools
from smqtk.representation.data_element.file_element import DataFileElement
from smqtk.tests import TEST_DATA_DIR
from smqtk.utils.image_utils import is_loadable_image, is_valid_element
class TestIsLoadableImage(unittest.TestCase):
def setUp(self):
self.good_image = DataFileElement(os.path.join(TEST_DATA_DIR,
'Lenna.png'))
self.non_image = DataFileElement(os.path.join(TEST_DATA_DIR,
'test_file.dat'))
@ntools.raises(AttributeError)
def test_non_data_element_raises_exception(self):
# should throw:
# AttributeError: 'bool' object has no attribute 'get_bytes'
is_loadable_image(False)
def test_unloadable_image_returns_false(self):
assert is_loadable_image(self.non_image) == False
def test_unloadable_image_logs_warning(self):
pass
def test_loadable_image_returns_true(self):
assert is_loadable_image(self.good_image) == True
class TestIsValidElement(unittest.TestCase):
def setUp(self):
self.good_image = DataFileElement(os.path.join(TEST_DATA_DIR,
'Lenna.png'))
self.non_image = DataFileElement(os.path.join(TEST_DATA_DIR,
'test_file.dat'))
def test_non_data_element(self):
assert is_valid_element(False) == False
def test_invalid_content_type(self):
# test it logs to debug
assert is_valid_element(self.good_image, valid_content_types=[]) == False
def test_valid_content_type(self):
assert is_valid_element(self.good_image,
valid_content_types=['image/png']) == True
def test_invalid_image_returns_false(self):
assert is_valid_element(self.non_image, check_image=True) == False
|
Add unit tests for image_utilsimport os
import unittest
import nose.tools as ntools
from smqtk.representation.data_element.file_element import DataFileElement
from smqtk.tests import TEST_DATA_DIR
from smqtk.utils.image_utils import is_loadable_image, is_valid_element
class TestIsLoadableImage(unittest.TestCase):
def setUp(self):
self.good_image = DataFileElement(os.path.join(TEST_DATA_DIR,
'Lenna.png'))
self.non_image = DataFileElement(os.path.join(TEST_DATA_DIR,
'test_file.dat'))
@ntools.raises(AttributeError)
def test_non_data_element_raises_exception(self):
# should throw:
# AttributeError: 'bool' object has no attribute 'get_bytes'
is_loadable_image(False)
def test_unloadable_image_returns_false(self):
assert is_loadable_image(self.non_image) == False
def test_unloadable_image_logs_warning(self):
pass
def test_loadable_image_returns_true(self):
assert is_loadable_image(self.good_image) == True
class TestIsValidElement(unittest.TestCase):
def setUp(self):
self.good_image = DataFileElement(os.path.join(TEST_DATA_DIR,
'Lenna.png'))
self.non_image = DataFileElement(os.path.join(TEST_DATA_DIR,
'test_file.dat'))
def test_non_data_element(self):
assert is_valid_element(False) == False
def test_invalid_content_type(self):
# test it logs to debug
assert is_valid_element(self.good_image, valid_content_types=[]) == False
def test_valid_content_type(self):
assert is_valid_element(self.good_image,
valid_content_types=['image/png']) == True
def test_invalid_image_returns_false(self):
assert is_valid_element(self.non_image, check_image=True) == False
|
<commit_before><commit_msg>Add unit tests for image_utils<commit_after>import os
import unittest
import nose.tools as ntools
from smqtk.representation.data_element.file_element import DataFileElement
from smqtk.tests import TEST_DATA_DIR
from smqtk.utils.image_utils import is_loadable_image, is_valid_element
class TestIsLoadableImage(unittest.TestCase):
def setUp(self):
self.good_image = DataFileElement(os.path.join(TEST_DATA_DIR,
'Lenna.png'))
self.non_image = DataFileElement(os.path.join(TEST_DATA_DIR,
'test_file.dat'))
@ntools.raises(AttributeError)
def test_non_data_element_raises_exception(self):
# should throw:
# AttributeError: 'bool' object has no attribute 'get_bytes'
is_loadable_image(False)
def test_unloadable_image_returns_false(self):
assert is_loadable_image(self.non_image) == False
def test_unloadable_image_logs_warning(self):
pass
def test_loadable_image_returns_true(self):
assert is_loadable_image(self.good_image) == True
class TestIsValidElement(unittest.TestCase):
def setUp(self):
self.good_image = DataFileElement(os.path.join(TEST_DATA_DIR,
'Lenna.png'))
self.non_image = DataFileElement(os.path.join(TEST_DATA_DIR,
'test_file.dat'))
def test_non_data_element(self):
assert is_valid_element(False) == False
def test_invalid_content_type(self):
# test it logs to debug
assert is_valid_element(self.good_image, valid_content_types=[]) == False
def test_valid_content_type(self):
assert is_valid_element(self.good_image,
valid_content_types=['image/png']) == True
def test_invalid_image_returns_false(self):
assert is_valid_element(self.non_image, check_image=True) == False
|
|
4a89c8c136f4af1ec699a53211a9423efe33c3be
|
libbmc/papers/tearpages.py
|
libbmc/papers/tearpages.py
|
"""
This file contains the necessary functions to determine whether we should tear
the first page from a PDF file, and actually tear it.
"""
import tearpages
# List of bad publishers which adds an extra useless first page, which can be
# teared. Please, submit a PR to include new ones which I may not be aware of!
BAD_PUBLISHERS = [
"IOP"
]
def tearpage_needed(bibtex):
"""
Check whether a given paper needs the first page to be teared or not.
:params bibtex: The bibtex entry associated to the paper, to guess \
whether tearing is needed.
:returns: A boolean indicating whether first page should be teared or not.
"""
# For each bad publisher, look for it in the publisher bibtex entry
has_bad_publisher = [p in bibtex.get("publisher", [])
for p in BAD_PUBLISHERS]
# Return True iff there is at least one bad publisher
return (True in has_bad_publisher)
def tearpage(filename, bibtex=None):
"""
Tear the first page of the file if needed.
:params filename: Path to the file to handle.
:params bibtex: BibTeX dict associated to this file, as the one given by \
``bibtexparser``.
:returns: A boolean indicating whether the file has been teared or not. \
Side effect is tearing the first page from the file.
"""
if bibtex is not None and tearpage_needed(bibtex):
# If tearing is needed, do it and return True
tearpages.tearpage(filename)
return True
# Else, simply return False
return False
|
Add some functions to tear first pages from a PDF
|
Add some functions to tear first pages from a PDF
|
Python
|
mit
|
Phyks/libbmc
|
Add some functions to tear first pages from a PDF
|
"""
This file contains the necessary functions to determine whether we should tear
the first page from a PDF file, and actually tear it.
"""
import tearpages
# List of bad publishers which adds an extra useless first page, which can be
# teared. Please, submit a PR to include new ones which I may not be aware of!
BAD_PUBLISHERS = [
"IOP"
]
def tearpage_needed(bibtex):
"""
Check whether a given paper needs the first page to be teared or not.
:params bibtex: The bibtex entry associated to the paper, to guess \
whether tearing is needed.
:returns: A boolean indicating whether first page should be teared or not.
"""
# For each bad publisher, look for it in the publisher bibtex entry
has_bad_publisher = [p in bibtex.get("publisher", [])
for p in BAD_PUBLISHERS]
# Return True iff there is at least one bad publisher
return (True in has_bad_publisher)
def tearpage(filename, bibtex=None):
"""
Tear the first page of the file if needed.
:params filename: Path to the file to handle.
:params bibtex: BibTeX dict associated to this file, as the one given by \
``bibtexparser``.
:returns: A boolean indicating whether the file has been teared or not. \
Side effect is tearing the first page from the file.
"""
if bibtex is not None and tearpage_needed(bibtex):
# If tearing is needed, do it and return True
tearpages.tearpage(filename)
return True
# Else, simply return False
return False
|
<commit_before><commit_msg>Add some functions to tear first pages from a PDF<commit_after>
|
"""
This file contains the necessary functions to determine whether we should tear
the first page from a PDF file, and actually tear it.
"""
import tearpages
# List of bad publishers which adds an extra useless first page, which can be
# teared. Please, submit a PR to include new ones which I may not be aware of!
BAD_PUBLISHERS = [
"IOP"
]
def tearpage_needed(bibtex):
"""
Check whether a given paper needs the first page to be teared or not.
:params bibtex: The bibtex entry associated to the paper, to guess \
whether tearing is needed.
:returns: A boolean indicating whether first page should be teared or not.
"""
# For each bad publisher, look for it in the publisher bibtex entry
has_bad_publisher = [p in bibtex.get("publisher", [])
for p in BAD_PUBLISHERS]
# Return True iff there is at least one bad publisher
return (True in has_bad_publisher)
def tearpage(filename, bibtex=None):
"""
Tear the first page of the file if needed.
:params filename: Path to the file to handle.
:params bibtex: BibTeX dict associated to this file, as the one given by \
``bibtexparser``.
:returns: A boolean indicating whether the file has been teared or not. \
Side effect is tearing the first page from the file.
"""
if bibtex is not None and tearpage_needed(bibtex):
# If tearing is needed, do it and return True
tearpages.tearpage(filename)
return True
# Else, simply return False
return False
|
Add some functions to tear first pages from a PDF"""
This file contains the necessary functions to determine whether we should tear
the first page from a PDF file, and actually tear it.
"""
import tearpages
# List of bad publishers which adds an extra useless first page, which can be
# teared. Please, submit a PR to include new ones which I may not be aware of!
BAD_PUBLISHERS = [
"IOP"
]
def tearpage_needed(bibtex):
"""
Check whether a given paper needs the first page to be teared or not.
:params bibtex: The bibtex entry associated to the paper, to guess \
whether tearing is needed.
:returns: A boolean indicating whether first page should be teared or not.
"""
# For each bad publisher, look for it in the publisher bibtex entry
has_bad_publisher = [p in bibtex.get("publisher", [])
for p in BAD_PUBLISHERS]
# Return True iff there is at least one bad publisher
return (True in has_bad_publisher)
def tearpage(filename, bibtex=None):
"""
Tear the first page of the file if needed.
:params filename: Path to the file to handle.
:params bibtex: BibTeX dict associated to this file, as the one given by \
``bibtexparser``.
:returns: A boolean indicating whether the file has been teared or not. \
Side effect is tearing the first page from the file.
"""
if bibtex is not None and tearpage_needed(bibtex):
# If tearing is needed, do it and return True
tearpages.tearpage(filename)
return True
# Else, simply return False
return False
|
<commit_before><commit_msg>Add some functions to tear first pages from a PDF<commit_after>"""
This file contains the necessary functions to determine whether we should tear
the first page from a PDF file, and actually tear it.
"""
import tearpages
# List of bad publishers which adds an extra useless first page, which can be
# teared. Please, submit a PR to include new ones which I may not be aware of!
BAD_PUBLISHERS = [
"IOP"
]
def tearpage_needed(bibtex):
"""
Check whether a given paper needs the first page to be teared or not.
:params bibtex: The bibtex entry associated to the paper, to guess \
whether tearing is needed.
:returns: A boolean indicating whether first page should be teared or not.
"""
# For each bad publisher, look for it in the publisher bibtex entry
has_bad_publisher = [p in bibtex.get("publisher", [])
for p in BAD_PUBLISHERS]
# Return True iff there is at least one bad publisher
return (True in has_bad_publisher)
def tearpage(filename, bibtex=None):
"""
Tear the first page of the file if needed.
:params filename: Path to the file to handle.
:params bibtex: BibTeX dict associated to this file, as the one given by \
``bibtexparser``.
:returns: A boolean indicating whether the file has been teared or not. \
Side effect is tearing the first page from the file.
"""
if bibtex is not None and tearpage_needed(bibtex):
# If tearing is needed, do it and return True
tearpages.tearpage(filename)
return True
# Else, simply return False
return False
|
|
69fe4ba3cc0338b4cd962e0571b9ae1d54e139ee
|
website/addons/base/serializer.py
|
website/addons/base/serializer.py
|
import abc
from website.util import web_url_for
class AddonSerializer(object):
__metaclass__ = abc.ABCMeta
def __init__(self, addon_node_settings, user):
self.addon_node_settings = addon_node_settings
self.user = user
@abc.abstractproperty
def serialized_urls(self):
pass
@abc.abstractproperty
def has_valid_credentials(self):
pass
@abc.abstractproperty
def node_has_auth(self):
pass
@abc.abstractproperty
def user_has_auth(self):
pass
@abc.abstractproperty
def user_is_owner(self):
pass
@abc.abstractproperty
def credentials_owner(self):
pass
@property
def serialized_settings(self):
node_has_auth = self.node_has_auth
result = {
'nodeHasAuth': node_has_auth,
'userHasAuth': self.user_has_auth,
'userIsOwner': self.user_is_owner,
'validCredentials': self.has_valid_credentials,
'urls': self.serialized_urls,
}
if node_has_auth:
owner = self.credentials_owner
if owner:
result['urls']['owner'] = web_url_for('profile_view_id',
uid=owner._primary_key)
result['ownerName'] = owner.fullname
return result
class StorageAddonSerializer(AddonSerializer):
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def serialized_folder(self):
pass
@property
def serialized_settings(self):
result = super(StorageAddonSerializer, self).serialized_settings
result['folder'] = self.serialized_folder
return result
class CitationsAddonSerializer(AddonSerializer):
__metaclass__ = abc.ABCMeta
|
Add base class for serializing addons.
|
Add base class for serializing addons.
|
Python
|
apache-2.0
|
SSJohns/osf.io,chrisseto/osf.io,aaxelb/osf.io,Nesiehr/osf.io,petermalcolm/osf.io,kwierman/osf.io,MerlinZhang/osf.io,ZobairAlijan/osf.io,SSJohns/osf.io,caseyrygt/osf.io,DanielSBrown/osf.io,baylee-d/osf.io,jmcarp/osf.io,ticklemepierce/osf.io,CenterForOpenScience/osf.io,njantrania/osf.io,asanfilippo7/osf.io,HarryRybacki/osf.io,jeffreyliu3230/osf.io,HarryRybacki/osf.io,cldershem/osf.io,icereval/osf.io,abought/osf.io,lamdnhan/osf.io,KAsante95/osf.io,danielneis/osf.io,mluke93/osf.io,zkraime/osf.io,bdyetton/prettychart,Johnetordoff/osf.io,monikagrabowska/osf.io,barbour-em/osf.io,zamattiac/osf.io,brandonPurvis/osf.io,monikagrabowska/osf.io,DanielSBrown/osf.io,felliott/osf.io,samchrisinger/osf.io,rdhyee/osf.io,jnayak1/osf.io,cosenal/osf.io,CenterForOpenScience/osf.io,samchrisinger/osf.io,chrisseto/osf.io,MerlinZhang/osf.io,zamattiac/osf.io,RomanZWang/osf.io,lyndsysimon/osf.io,njantrania/osf.io,mluke93/osf.io,felliott/osf.io,mluo613/osf.io,kushG/osf.io,hmoco/osf.io,sbt9uc/osf.io,mfraezz/osf.io,kushG/osf.io,zamattiac/osf.io,reinaH/osf.io,bdyetton/prettychart,ZobairAlijan/osf.io,HalcyonChimera/osf.io,acshi/osf.io,emetsger/osf.io,saradbowman/osf.io,abought/osf.io,chennan47/osf.io,GageGaskins/osf.io,wearpants/osf.io,KAsante95/osf.io,kch8qx/osf.io,cwisecarver/osf.io,monikagrabowska/osf.io,emetsger/osf.io,kch8qx/osf.io,hmoco/osf.io,reinaH/osf.io,cldershem/osf.io,RomanZWang/osf.io,HalcyonChimera/osf.io,Johnetordoff/osf.io,ticklemepierce/osf.io,dplorimer/osf,mluo613/osf.io,GageGaskins/osf.io,cslzchen/osf.io,lamdnhan/osf.io,GageGaskins/osf.io,billyhunt/osf.io,GaryKriebel/osf.io,GaryKriebel/osf.io,samanehsan/osf.io,wearpants/osf.io,petermalcolm/osf.io,adlius/osf.io,asanfilippo7/osf.io,KAsante95/osf.io,HarryRybacki/osf.io,cosenal/osf.io,acshi/osf.io,felliott/osf.io,caseyrollins/osf.io,billyhunt/osf.io,TomBaxter/osf.io,rdhyee/osf.io,samchrisinger/osf.io,zkraime/osf.io,caneruguz/osf.io,KAsante95/osf.io,sloria/osf.io,lyndsysimon/osf.io,lyndsysimon/osf.io,petermalcolm/osf.io,samanehsan/osf.io,zachjanicki/osf.io,caseyrollins/osf.io,himanshuo/osf.io,barbour-em/osf.io,fabianvf/osf.io,chennan47/osf.io,cslzchen/osf.io,chennan47/osf.io,TomHeatwole/osf.io,brianjgeiger/osf.io,alexschiller/osf.io,acshi/osf.io,caseyrollins/osf.io,DanielSBrown/osf.io,adlius/osf.io,njantrania/osf.io,fabianvf/osf.io,mluke93/osf.io,sloria/osf.io,caneruguz/osf.io,petermalcolm/osf.io,GageGaskins/osf.io,adlius/osf.io,barbour-em/osf.io,amyshi188/osf.io,binoculars/osf.io,Nesiehr/osf.io,cldershem/osf.io,mfraezz/osf.io,cosenal/osf.io,jolene-esposito/osf.io,caseyrygt/osf.io,wearpants/osf.io,doublebits/osf.io,bdyetton/prettychart,RomanZWang/osf.io,sbt9uc/osf.io,MerlinZhang/osf.io,caseyrygt/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,hmoco/osf.io,ticklemepierce/osf.io,haoyuchen1992/osf.io,acshi/osf.io,baylee-d/osf.io,brianjgeiger/osf.io,samanehsan/osf.io,alexschiller/osf.io,doublebits/osf.io,doublebits/osf.io,doublebits/osf.io,caseyrygt/osf.io,mfraezz/osf.io,revanthkolli/osf.io,cwisecarver/osf.io,emetsger/osf.io,jinluyuan/osf.io,jmcarp/osf.io,reinaH/osf.io,rdhyee/osf.io,jolene-esposito/osf.io,saradbowman/osf.io,barbour-em/osf.io,lamdnhan/osf.io,GaryKriebel/osf.io,GaryKriebel/osf.io,KAsante95/osf.io,amyshi188/osf.io,lyndsysimon/osf.io,binoculars/osf.io,caneruguz/osf.io,brandonPurvis/osf.io,cwisecarver/osf.io,ticklemepierce/osf.io,ckc6cz/osf.io,kushG/osf.io,jmcarp/osf.io,reinaH/osf.io,haoyuchen1992/osf.io,jnayak1/osf.io,TomBaxter/osf.io,mluo613/osf.io,emetsger/osf.io,leb2dg/osf.io,alexschiller/osf.io,kch8qx/osf.io,bdyetton/prettychart,jolene-esposito/osf.io,RomanZWang/osf.io,kwierman/osf.io,jeffreyliu3230/osf.io,mattclark/osf.io,cosenal/osf.io,wearpants/osf.io,jeffreyliu3230/osf.io,billyhunt/osf.io,asanfilippo7/osf.io,leb2dg/osf.io,ckc6cz/osf.io,revanthkolli/osf.io,ZobairAlijan/osf.io,cwisecarver/osf.io,chrisseto/osf.io,fabianvf/osf.io,zkraime/osf.io,Johnetordoff/osf.io,erinspace/osf.io,dplorimer/osf,ckc6cz/osf.io,revanthkolli/osf.io,brandonPurvis/osf.io,arpitar/osf.io,mattclark/osf.io,pattisdr/osf.io,aaxelb/osf.io,Ghalko/osf.io,danielneis/osf.io,SSJohns/osf.io,jnayak1/osf.io,lamdnhan/osf.io,TomBaxter/osf.io,caneruguz/osf.io,billyhunt/osf.io,felliott/osf.io,Ghalko/osf.io,arpitar/osf.io,GageGaskins/osf.io,arpitar/osf.io,pattisdr/osf.io,himanshuo/osf.io,zachjanicki/osf.io,revanthkolli/osf.io,abought/osf.io,mluke93/osf.io,himanshuo/osf.io,laurenrevere/osf.io,TomHeatwole/osf.io,leb2dg/osf.io,mluo613/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,billyhunt/osf.io,himanshuo/osf.io,abought/osf.io,haoyuchen1992/osf.io,TomHeatwole/osf.io,icereval/osf.io,Ghalko/osf.io,mattclark/osf.io,SSJohns/osf.io,crcresearch/osf.io,alexschiller/osf.io,kushG/osf.io,alexschiller/osf.io,zkraime/osf.io,kwierman/osf.io,jinluyuan/osf.io,laurenrevere/osf.io,MerlinZhang/osf.io,acshi/osf.io,leb2dg/osf.io,brandonPurvis/osf.io,jeffreyliu3230/osf.io,hmoco/osf.io,amyshi188/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,erinspace/osf.io,jmcarp/osf.io,jolene-esposito/osf.io,jinluyuan/osf.io,asanfilippo7/osf.io,Johnetordoff/osf.io,brandonPurvis/osf.io,cldershem/osf.io,samchrisinger/osf.io,rdhyee/osf.io,monikagrabowska/osf.io,HarryRybacki/osf.io,zachjanicki/osf.io,TomHeatwole/osf.io,danielneis/osf.io,binoculars/osf.io,monikagrabowska/osf.io,ckc6cz/osf.io,jnayak1/osf.io,crcresearch/osf.io,jinluyuan/osf.io,crcresearch/osf.io,HalcyonChimera/osf.io,kch8qx/osf.io,njantrania/osf.io,icereval/osf.io,kwierman/osf.io,RomanZWang/osf.io,zachjanicki/osf.io,sbt9uc/osf.io,fabianvf/osf.io,Nesiehr/osf.io,samanehsan/osf.io,kch8qx/osf.io,baylee-d/osf.io,cslzchen/osf.io,cslzchen/osf.io,doublebits/osf.io,danielneis/osf.io,sloria/osf.io,mluo613/osf.io,Nesiehr/osf.io,erinspace/osf.io,dplorimer/osf,arpitar/osf.io,amyshi188/osf.io,haoyuchen1992/osf.io,sbt9uc/osf.io,brianjgeiger/osf.io,Ghalko/osf.io,zamattiac/osf.io,DanielSBrown/osf.io,mfraezz/osf.io,laurenrevere/osf.io,aaxelb/osf.io,ZobairAlijan/osf.io,chrisseto/osf.io,dplorimer/osf
|
Add base class for serializing addons.
|
import abc
from website.util import web_url_for
class AddonSerializer(object):
__metaclass__ = abc.ABCMeta
def __init__(self, addon_node_settings, user):
self.addon_node_settings = addon_node_settings
self.user = user
@abc.abstractproperty
def serialized_urls(self):
pass
@abc.abstractproperty
def has_valid_credentials(self):
pass
@abc.abstractproperty
def node_has_auth(self):
pass
@abc.abstractproperty
def user_has_auth(self):
pass
@abc.abstractproperty
def user_is_owner(self):
pass
@abc.abstractproperty
def credentials_owner(self):
pass
@property
def serialized_settings(self):
node_has_auth = self.node_has_auth
result = {
'nodeHasAuth': node_has_auth,
'userHasAuth': self.user_has_auth,
'userIsOwner': self.user_is_owner,
'validCredentials': self.has_valid_credentials,
'urls': self.serialized_urls,
}
if node_has_auth:
owner = self.credentials_owner
if owner:
result['urls']['owner'] = web_url_for('profile_view_id',
uid=owner._primary_key)
result['ownerName'] = owner.fullname
return result
class StorageAddonSerializer(AddonSerializer):
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def serialized_folder(self):
pass
@property
def serialized_settings(self):
result = super(StorageAddonSerializer, self).serialized_settings
result['folder'] = self.serialized_folder
return result
class CitationsAddonSerializer(AddonSerializer):
__metaclass__ = abc.ABCMeta
|
<commit_before><commit_msg>Add base class for serializing addons.<commit_after>
|
import abc
from website.util import web_url_for
class AddonSerializer(object):
__metaclass__ = abc.ABCMeta
def __init__(self, addon_node_settings, user):
self.addon_node_settings = addon_node_settings
self.user = user
@abc.abstractproperty
def serialized_urls(self):
pass
@abc.abstractproperty
def has_valid_credentials(self):
pass
@abc.abstractproperty
def node_has_auth(self):
pass
@abc.abstractproperty
def user_has_auth(self):
pass
@abc.abstractproperty
def user_is_owner(self):
pass
@abc.abstractproperty
def credentials_owner(self):
pass
@property
def serialized_settings(self):
node_has_auth = self.node_has_auth
result = {
'nodeHasAuth': node_has_auth,
'userHasAuth': self.user_has_auth,
'userIsOwner': self.user_is_owner,
'validCredentials': self.has_valid_credentials,
'urls': self.serialized_urls,
}
if node_has_auth:
owner = self.credentials_owner
if owner:
result['urls']['owner'] = web_url_for('profile_view_id',
uid=owner._primary_key)
result['ownerName'] = owner.fullname
return result
class StorageAddonSerializer(AddonSerializer):
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def serialized_folder(self):
pass
@property
def serialized_settings(self):
result = super(StorageAddonSerializer, self).serialized_settings
result['folder'] = self.serialized_folder
return result
class CitationsAddonSerializer(AddonSerializer):
__metaclass__ = abc.ABCMeta
|
Add base class for serializing addons.import abc
from website.util import web_url_for
class AddonSerializer(object):
__metaclass__ = abc.ABCMeta
def __init__(self, addon_node_settings, user):
self.addon_node_settings = addon_node_settings
self.user = user
@abc.abstractproperty
def serialized_urls(self):
pass
@abc.abstractproperty
def has_valid_credentials(self):
pass
@abc.abstractproperty
def node_has_auth(self):
pass
@abc.abstractproperty
def user_has_auth(self):
pass
@abc.abstractproperty
def user_is_owner(self):
pass
@abc.abstractproperty
def credentials_owner(self):
pass
@property
def serialized_settings(self):
node_has_auth = self.node_has_auth
result = {
'nodeHasAuth': node_has_auth,
'userHasAuth': self.user_has_auth,
'userIsOwner': self.user_is_owner,
'validCredentials': self.has_valid_credentials,
'urls': self.serialized_urls,
}
if node_has_auth:
owner = self.credentials_owner
if owner:
result['urls']['owner'] = web_url_for('profile_view_id',
uid=owner._primary_key)
result['ownerName'] = owner.fullname
return result
class StorageAddonSerializer(AddonSerializer):
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def serialized_folder(self):
pass
@property
def serialized_settings(self):
result = super(StorageAddonSerializer, self).serialized_settings
result['folder'] = self.serialized_folder
return result
class CitationsAddonSerializer(AddonSerializer):
__metaclass__ = abc.ABCMeta
|
<commit_before><commit_msg>Add base class for serializing addons.<commit_after>import abc
from website.util import web_url_for
class AddonSerializer(object):
__metaclass__ = abc.ABCMeta
def __init__(self, addon_node_settings, user):
self.addon_node_settings = addon_node_settings
self.user = user
@abc.abstractproperty
def serialized_urls(self):
pass
@abc.abstractproperty
def has_valid_credentials(self):
pass
@abc.abstractproperty
def node_has_auth(self):
pass
@abc.abstractproperty
def user_has_auth(self):
pass
@abc.abstractproperty
def user_is_owner(self):
pass
@abc.abstractproperty
def credentials_owner(self):
pass
@property
def serialized_settings(self):
node_has_auth = self.node_has_auth
result = {
'nodeHasAuth': node_has_auth,
'userHasAuth': self.user_has_auth,
'userIsOwner': self.user_is_owner,
'validCredentials': self.has_valid_credentials,
'urls': self.serialized_urls,
}
if node_has_auth:
owner = self.credentials_owner
if owner:
result['urls']['owner'] = web_url_for('profile_view_id',
uid=owner._primary_key)
result['ownerName'] = owner.fullname
return result
class StorageAddonSerializer(AddonSerializer):
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def serialized_folder(self):
pass
@property
def serialized_settings(self):
result = super(StorageAddonSerializer, self).serialized_settings
result['folder'] = self.serialized_folder
return result
class CitationsAddonSerializer(AddonSerializer):
__metaclass__ = abc.ABCMeta
|
|
0d897c469d168d0362e71c26d039163fd0d3bdd2
|
zerver/management/commands/client-activity.py
|
zerver/management/commands/client-activity.py
|
from __future__ import absolute_import
from django.core.management.base import BaseCommand
from django.db.models import Count
from zerver.models import UserActivity, UserProfile, Realm, \
get_realm, get_user_profile_by_email
import datetime
class Command(BaseCommand):
help = """Report rough client activity globally, for a realm, or for a user
Usage examples:
python manage.py client-activity
python manage.py client-activity zulip.com
python manage.py client-activity jesstess@zulip.com"""
def compute_activity(self, user_activity_objects):
# Report data from the past week.
#
# This is a rough report of client activity because we inconsistently
# register activity from various clients; think of it as telling you
# approximately how many people from a group have used a particular
# client recently. For example, this might be useful to get a sense of
# how popular different versions of a desktop client are.
#
# Importantly, this does NOT tell you anything about the relative
# volumes of requests from clients.
threshold = datetime.datetime.now() - datetime.timedelta(days=7)
client_counts = user_activity_objects.filter(
last_visit__gt=threshold).values("client__name").annotate(
count=Count('client__name'))
total = 0
counts = []
for client_type in client_counts:
count = client_type["count"]
client = client_type["client__name"]
total += count
counts.append((count, client))
counts.sort()
for count in counts:
print "%25s %15d" % (count[1], count[0])
print "Total:", total
def handle(self, *args, **options):
if len(args) == 0:
# Report global activity.
self.compute_activity(UserActivity.objects.all())
elif len(args) == 1:
try:
# Report activity for a user.
user_profile = get_user_profile_by_email(args[0])
self.compute_activity(UserActivity.objects.filter(
user_profile=user_profile))
except UserProfile.DoesNotExist:
try:
# Report activity for a realm.
realm = get_realm(args[0])
self.compute_activity(UserActivity.objects.filter(
user_profile__realm=realm))
except Realm.DoesNotExist:
print "Unknown user or domain %s" % (args[0],)
exit(1)
|
Add a utility to report some rough recent client activity metrics.
|
Add a utility to report some rough recent client activity metrics.
(imported from commit 27b4a70871939b2728fcbe0ce5824326ff4decc2)
|
Python
|
apache-2.0
|
pradiptad/zulip,esander91/zulip,JPJPJPOPOP/zulip,hustlzp/zulip,MayB/zulip,AZtheAsian/zulip,ApsOps/zulip,MayB/zulip,amallia/zulip,bowlofstew/zulip,ufosky-server/zulip,showell/zulip,vabs22/zulip,qq1012803704/zulip,mohsenSy/zulip,he15his/zulip,atomic-labs/zulip,andersk/zulip,johnnygaddarr/zulip,tiansiyuan/zulip,AZtheAsian/zulip,Cheppers/zulip,themass/zulip,synicalsyntax/zulip,MariaFaBella85/zulip,ApsOps/zulip,xuxiao/zulip,udxxabp/zulip,grave-w-grave/zulip,itnihao/zulip,guiquanz/zulip,sup95/zulip,amyliu345/zulip,zwily/zulip,alliejones/zulip,themass/zulip,rishig/zulip,moria/zulip,shrikrishnaholla/zulip,punchagan/zulip,eastlhu/zulip,jackrzhang/zulip,eastlhu/zulip,dotcool/zulip,dotcool/zulip,arpith/zulip,bitemyapp/zulip,bssrdf/zulip,Diptanshu8/zulip,ApsOps/zulip,tbutter/zulip,dxq-git/zulip,j831/zulip,fw1121/zulip,PaulPetring/zulip,adnanh/zulip,sonali0901/zulip,aliceriot/zulip,xuanhan863/zulip,peguin40/zulip,ryansnowboarder/zulip,umkay/zulip,zulip/zulip,hj3938/zulip,itnihao/zulip,zwily/zulip,DazWorrall/zulip,avastu/zulip,seapasulli/zulip,dwrpayne/zulip,levixie/zulip,Suninus/zulip,vaidap/zulip,praveenaki/zulip,ikasumiwt/zulip,ipernet/zulip,susansls/zulip,proliming/zulip,cosmicAsymmetry/zulip,arpitpanwar/zulip,LeeRisk/zulip,reyha/zulip,ipernet/zulip,SmartPeople/zulip,codeKonami/zulip,littledogboy/zulip,arpitpanwar/zulip,stamhe/zulip,sharmaeklavya2/zulip,zhaoweigg/zulip,zorojean/zulip,atomic-labs/zulip,eastlhu/zulip,kaiyuanheshang/zulip,dotcool/zulip,mdavid/zulip,AZtheAsian/zulip,bowlofstew/zulip,saitodisse/zulip,jphilipsen05/zulip,hackerkid/zulip,tdr130/zulip,pradiptad/zulip,sonali0901/zulip,jackrzhang/zulip,wavelets/zulip,voidException/zulip,johnnygaddarr/zulip,tbutter/zulip,firstblade/zulip,levixie/zulip,souravbadami/zulip,adnanh/zulip,j831/zulip,wweiradio/zulip,verma-varsha/zulip,rht/zulip,m1ssou/zulip,Jianchun1/zulip,udxxabp/zulip,saitodisse/zulip,Batterfii/zulip,jrowan/zulip,nicholasbs/zulip,ipernet/zulip,Cheppers/zulip,dxq-git/zulip,PhilSk/zulip,amallia/zulip,aakash-cr7/zulip,jrowan/zulip,wweiradio/zulip,kou/zulip,zofuthan/zulip,Gabriel0402/zulip,arpith/zulip,hayderimran7/zulip,aliceriot/zulip,dhcrzf/zulip,Batterfii/zulip,glovebx/zulip,rishig/zulip,yuvipanda/zulip,Jianchun1/zulip,Batterfii/zulip,kou/zulip,zorojean/zulip,stamhe/zulip,mansilladev/zulip,TigorC/zulip,bssrdf/zulip,wdaher/zulip,RobotCaleb/zulip,nicholasbs/zulip,seapasulli/zulip,JanzTam/zulip,udxxabp/zulip,developerfm/zulip,levixie/zulip,wavelets/zulip,dwrpayne/zulip,praveenaki/zulip,natanovia/zulip,zachallaun/zulip,hafeez3000/zulip,Drooids/zulip,umkay/zulip,kokoar/zulip,xuxiao/zulip,wdaher/zulip,susansls/zulip,punchagan/zulip,so0k/zulip,noroot/zulip,codeKonami/zulip,adnanh/zulip,guiquanz/zulip,Juanvulcano/zulip,dwrpayne/zulip,jimmy54/zulip,Suninus/zulip,ApsOps/zulip,mdavid/zulip,jeffcao/zulip,akuseru/zulip,cosmicAsymmetry/zulip,amallia/zulip,dhcrzf/zulip,umkay/zulip,souravbadami/zulip,yocome/zulip,umkay/zulip,so0k/zulip,schatt/zulip,Suninus/zulip,JPJPJPOPOP/zulip,dattatreya303/zulip,zofuthan/zulip,babbage/zulip,bastianh/zulip,karamcnair/zulip,timabbott/zulip,xuxiao/zulip,peguin40/zulip,andersk/zulip,luyifan/zulip,guiquanz/zulip,yocome/zulip,praveenaki/zulip,amyliu345/zulip,rishig/zulip,kaiyuanheshang/zulip,hustlzp/zulip,isht3/zulip,saitodisse/zulip,dxq-git/zulip,gigawhitlocks/zulip,hengqujushi/zulip,johnnygaddarr/zulip,ufosky-server/zulip,suxinde2009/zulip,grave-w-grave/zulip,thomasboyt/zulip,vaidap/zulip,bssrdf/zulip,adnanh/zulip,timabbott/zulip,schatt/zulip,aps-sids/zulip,aps-sids/zulip,Qgap/zulip,huangkebo/zulip,aps-sids/zulip,thomasboyt/zulip,RobotCaleb/zulip,natanovia/zulip,shrikrishnaholla/zulip,LeeRisk/zulip,shaunstanislaus/zulip,showell/zulip,qq1012803704/zulip,zulip/zulip,bluesea/zulip,dawran6/zulip,brainwane/zulip,aakash-cr7/zulip,natanovia/zulip,zwily/zulip,nicholasbs/zulip,tiansiyuan/zulip,huangkebo/zulip,tommyip/zulip,gkotian/zulip,SmartPeople/zulip,eastlhu/zulip,j831/zulip,Diptanshu8/zulip,swinghu/zulip,aakash-cr7/zulip,bluesea/zulip,wavelets/zulip,atomic-labs/zulip,hackerkid/zulip,Vallher/zulip,bluesea/zulip,hayderimran7/zulip,vaidap/zulip,dattatreya303/zulip,jonesgithub/zulip,Qgap/zulip,itnihao/zulip,MariaFaBella85/zulip,rht/zulip,rht/zulip,ufosky-server/zulip,ericzhou2008/zulip,schatt/zulip,easyfmxu/zulip,KJin99/zulip,zulip/zulip,samatdav/zulip,ryansnowboarder/zulip,DazWorrall/zulip,hj3938/zulip,DazWorrall/zulip,seapasulli/zulip,he15his/zulip,stamhe/zulip,hustlzp/zulip,krtkmj/zulip,itnihao/zulip,gkotian/zulip,bitemyapp/zulip,littledogboy/zulip,akuseru/zulip,zhaoweigg/zulip,technicalpickles/zulip,JanzTam/zulip,vakila/zulip,Cheppers/zulip,MariaFaBella85/zulip,zwily/zulip,vakila/zulip,kaiyuanheshang/zulip,arpitpanwar/zulip,LeeRisk/zulip,susansls/zulip,calvinleenyc/zulip,technicalpickles/zulip,joshisa/zulip,yuvipanda/zulip,jessedhillon/zulip,PaulPetring/zulip,guiquanz/zulip,eeshangarg/zulip,jonesgithub/zulip,cosmicAsymmetry/zulip,ryanbackman/zulip,verma-varsha/zulip,zacps/zulip,LAndreas/zulip,deer-hope/zulip,Gabriel0402/zulip,zachallaun/zulip,wweiradio/zulip,SmartPeople/zulip,zachallaun/zulip,bitemyapp/zulip,LeeRisk/zulip,glovebx/zulip,amyliu345/zulip,Gabriel0402/zulip,ikasumiwt/zulip,developerfm/zulip,TigorC/zulip,Diptanshu8/zulip,yocome/zulip,zorojean/zulip,sup95/zulip,vikas-parashar/zulip,bluesea/zulip,moria/zulip,bssrdf/zulip,luyifan/zulip,ahmadassaf/zulip,jphilipsen05/zulip,seapasulli/zulip,paxapy/zulip,pradiptad/zulip,noroot/zulip,vikas-parashar/zulip,zorojean/zulip,jainayush975/zulip,zwily/zulip,ipernet/zulip,yuvipanda/zulip,KJin99/zulip,luyifan/zulip,xuxiao/zulip,schatt/zulip,peiwei/zulip,xuanhan863/zulip,luyifan/zulip,tiansiyuan/zulip,wweiradio/zulip,stamhe/zulip,kokoar/zulip,dxq-git/zulip,hayderimran7/zulip,eeshangarg/zulip,jrowan/zulip,christi3k/zulip,jimmy54/zulip,eastlhu/zulip,bowlofstew/zulip,xuanhan863/zulip,itnihao/zulip,avastu/zulip,ikasumiwt/zulip,verma-varsha/zulip,easyfmxu/zulip,TigorC/zulip,susansls/zulip,lfranchi/zulip,johnny9/zulip,atomic-labs/zulip,TigorC/zulip,sonali0901/zulip,mohsenSy/zulip,jerryge/zulip,grave-w-grave/zulip,seapasulli/zulip,suxinde2009/zulip,Galexrt/zulip,hj3938/zulip,ashwinirudrappa/zulip,ashwinirudrappa/zulip,noroot/zulip,saitodisse/zulip,mohsenSy/zulip,alliejones/zulip,wangdeshui/zulip,glovebx/zulip,udxxabp/zulip,rht/zulip,jessedhillon/zulip,dawran6/zulip,DazWorrall/zulip,KingxBanana/zulip,DazWorrall/zulip,zulip/zulip,sonali0901/zulip,wavelets/zulip,niftynei/zulip,amanharitsh123/zulip,kaiyuanheshang/zulip,EasonYi/zulip,amyliu345/zulip,natanovia/zulip,zulip/zulip,jonesgithub/zulip,verma-varsha/zulip,huangkebo/zulip,glovebx/zulip,umkay/zulip,seapasulli/zulip,voidException/zulip,reyha/zulip,vabs22/zulip,showell/zulip,proliming/zulip,deer-hope/zulip,johnnygaddarr/zulip,verma-varsha/zulip,jerryge/zulip,stamhe/zulip,jerryge/zulip,shubhamdhama/zulip,armooo/zulip,gkotian/zulip,Galexrt/zulip,ashwinirudrappa/zulip,vabs22/zulip,kokoar/zulip,dwrpayne/zulip,willingc/zulip,rishig/zulip,dotcool/zulip,Drooids/zulip,zachallaun/zulip,ericzhou2008/zulip,esander91/zulip,bastianh/zulip,vakila/zulip,arpitpanwar/zulip,saitodisse/zulip,dnmfarrell/zulip,ashwinirudrappa/zulip,paxapy/zulip,samatdav/zulip,avastu/zulip,proliming/zulip,peiwei/zulip,brainwane/zulip,littledogboy/zulip,blaze225/zulip,mahim97/zulip,ryanbackman/zulip,DazWorrall/zulip,KJin99/zulip,Qgap/zulip,levixie/zulip,brockwhittaker/zulip,calvinleenyc/zulip,jimmy54/zulip,eeshangarg/zulip,Juanvulcano/zulip,jainayush975/zulip,joshisa/zulip,Juanvulcano/zulip,jphilipsen05/zulip,ryansnowboarder/zulip,easyfmxu/zulip,andersk/zulip,swinghu/zulip,rishig/zulip,bssrdf/zulip,babbage/zulip,dawran6/zulip,dhcrzf/zulip,tommyip/zulip,peguin40/zulip,atomic-labs/zulip,gigawhitlocks/zulip,Suninus/zulip,bastianh/zulip,shaunstanislaus/zulip,levixie/zulip,codeKonami/zulip,alliejones/zulip,christi3k/zulip,EasonYi/zulip,zorojean/zulip,hayderimran7/zulip,joyhchen/zulip,pradiptad/zulip,isht3/zulip,joyhchen/zulip,jainayush975/zulip,moria/zulip,noroot/zulip,eastlhu/zulip,akuseru/zulip,wavelets/zulip,MayB/zulip,so0k/zulip,tdr130/zulip,AZtheAsian/zulip,easyfmxu/zulip,fw1121/zulip,kokoar/zulip,christi3k/zulip,bitemyapp/zulip,vaidap/zulip,saitodisse/zulip,gkotian/zulip,ryansnowboarder/zulip,guiquanz/zulip,LAndreas/zulip,LAndreas/zulip,jackrzhang/zulip,TigorC/zulip,lfranchi/zulip,firstblade/zulip,swinghu/zulip,zorojean/zulip,Suninus/zulip,JanzTam/zulip,karamcnair/zulip,yuvipanda/zulip,paxapy/zulip,xuxiao/zulip,zacps/zulip,gigawhitlocks/zulip,punchagan/zulip,gkotian/zulip,voidException/zulip,Cheppers/zulip,karamcnair/zulip,kaiyuanheshang/zulip,hayderimran7/zulip,zwily/zulip,MayB/zulip,ahmadassaf/zulip,thomasboyt/zulip,ashwinirudrappa/zulip,jessedhillon/zulip,deer-hope/zulip,lfranchi/zulip,xuanhan863/zulip,schatt/zulip,tiansiyuan/zulip,mdavid/zulip,jonesgithub/zulip,blaze225/zulip,jonesgithub/zulip,he15his/zulip,krtkmj/zulip,hackerkid/zulip,m1ssou/zulip,wangdeshui/zulip,glovebx/zulip,Diptanshu8/zulip,ericzhou2008/zulip,KJin99/zulip,andersk/zulip,natanovia/zulip,pradiptad/zulip,sharmaeklavya2/zulip,firstblade/zulip,littledogboy/zulip,tbutter/zulip,KingxBanana/zulip,fw1121/zulip,glovebx/zulip,hengqujushi/zulip,jeffcao/zulip,levixie/zulip,luyifan/zulip,isht3/zulip,aakash-cr7/zulip,EasonYi/zulip,synicalsyntax/zulip,moria/zulip,willingc/zulip,joyhchen/zulip,huangkebo/zulip,brainwane/zulip,swinghu/zulip,reyha/zulip,dotcool/zulip,huangkebo/zulip,babbage/zulip,so0k/zulip,peiwei/zulip,babbage/zulip,Cheppers/zulip,yocome/zulip,Jianchun1/zulip,synicalsyntax/zulip,wweiradio/zulip,kou/zulip,eeshangarg/zulip,sup95/zulip,rishig/zulip,Gabriel0402/zulip,shrikrishnaholla/zulip,kou/zulip,ryansnowboarder/zulip,wangdeshui/zulip,Frouk/zulip,armooo/zulip,EasonYi/zulip,wangdeshui/zulip,Frouk/zulip,mohsenSy/zulip,ahmadassaf/zulip,karamcnair/zulip,LAndreas/zulip,developerfm/zulip,jessedhillon/zulip,lfranchi/zulip,gigawhitlocks/zulip,dattatreya303/zulip,jessedhillon/zulip,esander91/zulip,mohsenSy/zulip,jphilipsen05/zulip,gigawhitlocks/zulip,ufosky-server/zulip,hayderimran7/zulip,bitemyapp/zulip,niftynei/zulip,JanzTam/zulip,technicalpickles/zulip,armooo/zulip,blaze225/zulip,calvinleenyc/zulip,themass/zulip,samatdav/zulip,thomasboyt/zulip,arpith/zulip,eastlhu/zulip,brockwhittaker/zulip,babbage/zulip,akuseru/zulip,aps-sids/zulip,developerfm/zulip,aliceriot/zulip,blaze225/zulip,hengqujushi/zulip,xuxiao/zulip,samatdav/zulip,sharmaeklavya2/zulip,Juanvulcano/zulip,KJin99/zulip,mdavid/zulip,JanzTam/zulip,praveenaki/zulip,zhaoweigg/zulip,Frouk/zulip,yocome/zulip,Qgap/zulip,brockwhittaker/zulip,verma-varsha/zulip,hafeez3000/zulip,eeshangarg/zulip,armooo/zulip,showell/zulip,shaunstanislaus/zulip,JPJPJPOPOP/zulip,Drooids/zulip,johnnygaddarr/zulip,lfranchi/zulip,timabbott/zulip,adnanh/zulip,zacps/zulip,yocome/zulip,showell/zulip,paxapy/zulip,sonali0901/zulip,luyifan/zulip,jimmy54/zulip,moria/zulip,Vallher/zulip,guiquanz/zulip,joshisa/zulip,gkotian/zulip,zachallaun/zulip,easyfmxu/zulip,themass/zulip,Gabriel0402/zulip,dnmfarrell/zulip,wdaher/zulip,m1ssou/zulip,vikas-parashar/zulip,andersk/zulip,RobotCaleb/zulip,hafeez3000/zulip,paxapy/zulip,zacps/zulip,hustlzp/zulip,krtkmj/zulip,bluesea/zulip,andersk/zulip,wweiradio/zulip,adnanh/zulip,Galexrt/zulip,DazWorrall/zulip,shaunstanislaus/zulip,JPJPJPOPOP/zulip,dnmfarrell/zulip,kokoar/zulip,zhaoweigg/zulip,aliceriot/zulip,synicalsyntax/zulip,suxinde2009/zulip,niftynei/zulip,m1ssou/zulip,KingxBanana/zulip,krtkmj/zulip,wdaher/zulip,Vallher/zulip,vakila/zulip,dawran6/zulip,souravbadami/zulip,zachallaun/zulip,bluesea/zulip,MayB/zulip,jphilipsen05/zulip,mansilladev/zulip,krtkmj/zulip,ipernet/zulip,akuseru/zulip,karamcnair/zulip,christi3k/zulip,JanzTam/zulip,developerfm/zulip,ryanbackman/zulip,punchagan/zulip,vabs22/zulip,tbutter/zulip,willingc/zulip,aakash-cr7/zulip,ikasumiwt/zulip,Suninus/zulip,littledogboy/zulip,firstblade/zulip,udxxabp/zulip,bowlofstew/zulip,hackerkid/zulip,Jianchun1/zulip,esander91/zulip,timabbott/zulip,moria/zulip,wdaher/zulip,EasonYi/zulip,tdr130/zulip,kokoar/zulip,Drooids/zulip,tommyip/zulip,Qgap/zulip,mdavid/zulip,qq1012803704/zulip,avastu/zulip,zhaoweigg/zulip,gkotian/zulip,jeffcao/zulip,mahim97/zulip,PhilSk/zulip,hengqujushi/zulip,tbutter/zulip,Galexrt/zulip,he15his/zulip,m1ssou/zulip,PaulPetring/zulip,esander91/zulip,AZtheAsian/zulip,amallia/zulip,shubhamdhama/zulip,fw1121/zulip,zachallaun/zulip,jackrzhang/zulip,ericzhou2008/zulip,dwrpayne/zulip,itnihao/zulip,niftynei/zulip,timabbott/zulip,reyha/zulip,wangdeshui/zulip,mahim97/zulip,PhilSk/zulip,cosmicAsymmetry/zulip,bowlofstew/zulip,jessedhillon/zulip,dwrpayne/zulip,brainwane/zulip,deer-hope/zulip,isht3/zulip,dxq-git/zulip,tbutter/zulip,sup95/zulip,joyhchen/zulip,ikasumiwt/zulip,wangdeshui/zulip,LAndreas/zulip,Drooids/zulip,mahim97/zulip,punchagan/zulip,johnny9/zulip,kou/zulip,nicholasbs/zulip,jonesgithub/zulip,amallia/zulip,swinghu/zulip,brockwhittaker/zulip,voidException/zulip,arpith/zulip,arpith/zulip,littledogboy/zulip,wangdeshui/zulip,babbage/zulip,jrowan/zulip,LeeRisk/zulip,amanharitsh123/zulip,joshisa/zulip,ipernet/zulip,samatdav/zulip,isht3/zulip,jrowan/zulip,ahmadassaf/zulip,qq1012803704/zulip,jessedhillon/zulip,rht/zulip,Batterfii/zulip,shubhamdhama/zulip,cosmicAsymmetry/zulip,PhilSk/zulip,atomic-labs/zulip,peiwei/zulip,ApsOps/zulip,Galexrt/zulip,aps-sids/zulip,punchagan/zulip,hackerkid/zulip,LeeRisk/zulip,joyhchen/zulip,EasonYi/zulip,SmartPeople/zulip,mansilladev/zulip,ashwinirudrappa/zulip,tommyip/zulip,Frouk/zulip,peguin40/zulip,technicalpickles/zulip,huangkebo/zulip,grave-w-grave/zulip,AZtheAsian/zulip,proliming/zulip,shrikrishnaholla/zulip,voidException/zulip,bastianh/zulip,xuanhan863/zulip,thomasboyt/zulip,shrikrishnaholla/zulip,MariaFaBella85/zulip,punchagan/zulip,developerfm/zulip,showell/zulip,christi3k/zulip,MayB/zulip,Frouk/zulip,developerfm/zulip,vikas-parashar/zulip,technicalpickles/zulip,niftynei/zulip,KJin99/zulip,deer-hope/zulip,Galexrt/zulip,natanovia/zulip,mdavid/zulip,bitemyapp/zulip,ikasumiwt/zulip,PaulPetring/zulip,m1ssou/zulip,tiansiyuan/zulip,joyhchen/zulip,johnny9/zulip,jeffcao/zulip,alliejones/zulip,zhaoweigg/zulip,dattatreya303/zulip,amallia/zulip,yuvipanda/zulip,esander91/zulip,arpitpanwar/zulip,yuvipanda/zulip,PhilSk/zulip,deer-hope/zulip,hj3938/zulip,gigawhitlocks/zulip,ericzhou2008/zulip,lfranchi/zulip,LAndreas/zulip,cosmicAsymmetry/zulip,bssrdf/zulip,Qgap/zulip,vabs22/zulip,hj3938/zulip,mahim97/zulip,kaiyuanheshang/zulip,Juanvulcano/zulip,peguin40/zulip,sup95/zulip,udxxabp/zulip,rishig/zulip,bastianh/zulip,so0k/zulip,proliming/zulip,easyfmxu/zulip,qq1012803704/zulip,ericzhou2008/zulip,synicalsyntax/zulip,qq1012803704/zulip,joshisa/zulip,sharmaeklavya2/zulip,wdaher/zulip,zhaoweigg/zulip,dnmfarrell/zulip,hustlzp/zulip,ryanbackman/zulip,lfranchi/zulip,zulip/zulip,ryansnowboarder/zulip,RobotCaleb/zulip,jerryge/zulip,sonali0901/zulip,jimmy54/zulip,Batterfii/zulip,wweiradio/zulip,he15his/zulip,so0k/zulip,thomasboyt/zulip,tiansiyuan/zulip,calvinleenyc/zulip,jackrzhang/zulip,Juanvulcano/zulip,udxxabp/zulip,ryanbackman/zulip,johnnygaddarr/zulip,christi3k/zulip,voidException/zulip,fw1121/zulip,arpith/zulip,vakila/zulip,dhcrzf/zulip,so0k/zulip,SmartPeople/zulip,joshisa/zulip,johnny9/zulip,zacps/zulip,vikas-parashar/zulip,Cheppers/zulip,zacps/zulip,samatdav/zulip,codeKonami/zulip,amanharitsh123/zulip,synicalsyntax/zulip,hafeez3000/zulip,xuanhan863/zulip,bastianh/zulip,karamcnair/zulip,MariaFaBella85/zulip,he15his/zulip,dattatreya303/zulip,shaunstanislaus/zulip,Cheppers/zulip,akuseru/zulip,xuxiao/zulip,KJin99/zulip,arpitpanwar/zulip,dnmfarrell/zulip,mohsenSy/zulip,susansls/zulip,MayB/zulip,shubhamdhama/zulip,bastianh/zulip,jeffcao/zulip,Frouk/zulip,pradiptad/zulip,proliming/zulip,jerryge/zulip,avastu/zulip,alliejones/zulip,kou/zulip,dawran6/zulip,sharmaeklavya2/zulip,johnny9/zulip,tbutter/zulip,tdr130/zulip,stamhe/zulip,tdr130/zulip,hengqujushi/zulip,krtkmj/zulip,suxinde2009/zulip,hj3938/zulip,blaze225/zulip,jimmy54/zulip,shaunstanislaus/zulip,guiquanz/zulip,timabbott/zulip,shaunstanislaus/zulip,amyliu345/zulip,Drooids/zulip,JPJPJPOPOP/zulip,deer-hope/zulip,jainayush975/zulip,tdr130/zulip,alliejones/zulip,praveenaki/zulip,wavelets/zulip,gigawhitlocks/zulip,akuseru/zulip,zofuthan/zulip,dawran6/zulip,dhcrzf/zulip,Gabriel0402/zulip,willingc/zulip,avastu/zulip,dattatreya303/zulip,amallia/zulip,fw1121/zulip,zofuthan/zulip,eeshangarg/zulip,Vallher/zulip,mansilladev/zulip,SmartPeople/zulip,noroot/zulip,jerryge/zulip,brainwane/zulip,glovebx/zulip,atomic-labs/zulip,amanharitsh123/zulip,jonesgithub/zulip,dnmfarrell/zulip,hackerkid/zulip,sharmaeklavya2/zulip,brockwhittaker/zulip,Jianchun1/zulip,nicholasbs/zulip,codeKonami/zulip,kaiyuanheshang/zulip,saitodisse/zulip,stamhe/zulip,vakila/zulip,jeffcao/zulip,tiansiyuan/zulip,ApsOps/zulip,dwrpayne/zulip,wdaher/zulip,hustlzp/zulip,shubhamdhama/zulip,noroot/zulip,pradiptad/zulip,zwily/zulip,umkay/zulip,esander91/zulip,peiwei/zulip,swinghu/zulip,susansls/zulip,ufosky-server/zulip,noroot/zulip,willingc/zulip,vabs22/zulip,blaze225/zulip,showell/zulip,shrikrishnaholla/zulip,Vallher/zulip,MariaFaBella85/zulip,PaulPetring/zulip,ryanbackman/zulip,Batterfii/zulip,tdr130/zulip,hustlzp/zulip,tommyip/zulip,xuanhan863/zulip,peguin40/zulip,jainayush975/zulip,themass/zulip,mdavid/zulip,aliceriot/zulip,hengqujushi/zulip,fw1121/zulip,sup95/zulip,ikasumiwt/zulip,he15his/zulip,alliejones/zulip,amanharitsh123/zulip,RobotCaleb/zulip,vikas-parashar/zulip,ahmadassaf/zulip,synicalsyntax/zulip,dotcool/zulip,ericzhou2008/zulip,PhilSk/zulip,dxq-git/zulip,amanharitsh123/zulip,bluesea/zulip,firstblade/zulip,calvinleenyc/zulip,paxapy/zulip,andersk/zulip,praveenaki/zulip,zofuthan/zulip,EasonYi/zulip,joshisa/zulip,easyfmxu/zulip,levixie/zulip,shubhamdhama/zulip,armooo/zulip,bowlofstew/zulip,ufosky-server/zulip,johnny9/zulip,arpitpanwar/zulip,Diptanshu8/zulip,technicalpickles/zulip,swinghu/zulip,schatt/zulip,natanovia/zulip,johnny9/zulip,mahim97/zulip,itnihao/zulip,shrikrishnaholla/zulip,yocome/zulip,zofuthan/zulip,codeKonami/zulip,shubhamdhama/zulip,souravbadami/zulip,karamcnair/zulip,jphilipsen05/zulip,rht/zulip,hackerkid/zulip,RobotCaleb/zulip,jimmy54/zulip,codeKonami/zulip,aakash-cr7/zulip,adnanh/zulip,thomasboyt/zulip,jeffcao/zulip,grave-w-grave/zulip,Batterfii/zulip,j831/zulip,proliming/zulip,LeeRisk/zulip,qq1012803704/zulip,dnmfarrell/zulip,schatt/zulip,jackrzhang/zulip,ashwinirudrappa/zulip,dhcrzf/zulip,jainayush975/zulip,yuvipanda/zulip,TigorC/zulip,aps-sids/zulip,nicholasbs/zulip,RobotCaleb/zulip,littledogboy/zulip,armooo/zulip,johnnygaddarr/zulip,MariaFaBella85/zulip,Galexrt/zulip,willingc/zulip,hafeez3000/zulip,huangkebo/zulip,voidException/zulip,reyha/zulip,technicalpickles/zulip,themass/zulip,vakila/zulip,LAndreas/zulip,zorojean/zulip,firstblade/zulip,brainwane/zulip,Vallher/zulip,KingxBanana/zulip,reyha/zulip,kokoar/zulip,j831/zulip,jrowan/zulip,krtkmj/zulip,JPJPJPOPOP/zulip,hafeez3000/zulip,themass/zulip,grave-w-grave/zulip,niftynei/zulip,kou/zulip,hj3938/zulip,dxq-git/zulip,moria/zulip,ahmadassaf/zulip,zulip/zulip,aps-sids/zulip,armooo/zulip,calvinleenyc/zulip,mansilladev/zulip,brainwane/zulip,willingc/zulip,wavelets/zulip,ApsOps/zulip,suxinde2009/zulip,aliceriot/zulip,peiwei/zulip,vaidap/zulip,zofuthan/zulip,suxinde2009/zulip,rht/zulip,suxinde2009/zulip,amyliu345/zulip,KingxBanana/zulip,Qgap/zulip,tommyip/zulip,souravbadami/zulip,ipernet/zulip,m1ssou/zulip,Vallher/zulip,mansilladev/zulip,vaidap/zulip,ryansnowboarder/zulip,Suninus/zulip,aliceriot/zulip,dotcool/zulip,umkay/zulip,brockwhittaker/zulip,peiwei/zulip,ufosky-server/zulip,JanzTam/zulip,praveenaki/zulip,ahmadassaf/zulip,hafeez3000/zulip,souravbadami/zulip,bowlofstew/zulip,luyifan/zulip,Jianchun1/zulip,bssrdf/zulip,KingxBanana/zulip,avastu/zulip,PaulPetring/zulip,eeshangarg/zulip,babbage/zulip,Diptanshu8/zulip,timabbott/zulip,tommyip/zulip,mansilladev/zulip,Drooids/zulip,bitemyapp/zulip,Gabriel0402/zulip,PaulPetring/zulip,jackrzhang/zulip,isht3/zulip,Frouk/zulip,nicholasbs/zulip,j831/zulip,firstblade/zulip,jerryge/zulip,hayderimran7/zulip,dhcrzf/zulip,seapasulli/zulip,hengqujushi/zulip
|
Add a utility to report some rough recent client activity metrics.
(imported from commit 27b4a70871939b2728fcbe0ce5824326ff4decc2)
|
from __future__ import absolute_import
from django.core.management.base import BaseCommand
from django.db.models import Count
from zerver.models import UserActivity, UserProfile, Realm, \
get_realm, get_user_profile_by_email
import datetime
class Command(BaseCommand):
help = """Report rough client activity globally, for a realm, or for a user
Usage examples:
python manage.py client-activity
python manage.py client-activity zulip.com
python manage.py client-activity jesstess@zulip.com"""
def compute_activity(self, user_activity_objects):
# Report data from the past week.
#
# This is a rough report of client activity because we inconsistently
# register activity from various clients; think of it as telling you
# approximately how many people from a group have used a particular
# client recently. For example, this might be useful to get a sense of
# how popular different versions of a desktop client are.
#
# Importantly, this does NOT tell you anything about the relative
# volumes of requests from clients.
threshold = datetime.datetime.now() - datetime.timedelta(days=7)
client_counts = user_activity_objects.filter(
last_visit__gt=threshold).values("client__name").annotate(
count=Count('client__name'))
total = 0
counts = []
for client_type in client_counts:
count = client_type["count"]
client = client_type["client__name"]
total += count
counts.append((count, client))
counts.sort()
for count in counts:
print "%25s %15d" % (count[1], count[0])
print "Total:", total
def handle(self, *args, **options):
if len(args) == 0:
# Report global activity.
self.compute_activity(UserActivity.objects.all())
elif len(args) == 1:
try:
# Report activity for a user.
user_profile = get_user_profile_by_email(args[0])
self.compute_activity(UserActivity.objects.filter(
user_profile=user_profile))
except UserProfile.DoesNotExist:
try:
# Report activity for a realm.
realm = get_realm(args[0])
self.compute_activity(UserActivity.objects.filter(
user_profile__realm=realm))
except Realm.DoesNotExist:
print "Unknown user or domain %s" % (args[0],)
exit(1)
|
<commit_before><commit_msg>Add a utility to report some rough recent client activity metrics.
(imported from commit 27b4a70871939b2728fcbe0ce5824326ff4decc2)<commit_after>
|
from __future__ import absolute_import
from django.core.management.base import BaseCommand
from django.db.models import Count
from zerver.models import UserActivity, UserProfile, Realm, \
get_realm, get_user_profile_by_email
import datetime
class Command(BaseCommand):
help = """Report rough client activity globally, for a realm, or for a user
Usage examples:
python manage.py client-activity
python manage.py client-activity zulip.com
python manage.py client-activity jesstess@zulip.com"""
def compute_activity(self, user_activity_objects):
# Report data from the past week.
#
# This is a rough report of client activity because we inconsistently
# register activity from various clients; think of it as telling you
# approximately how many people from a group have used a particular
# client recently. For example, this might be useful to get a sense of
# how popular different versions of a desktop client are.
#
# Importantly, this does NOT tell you anything about the relative
# volumes of requests from clients.
threshold = datetime.datetime.now() - datetime.timedelta(days=7)
client_counts = user_activity_objects.filter(
last_visit__gt=threshold).values("client__name").annotate(
count=Count('client__name'))
total = 0
counts = []
for client_type in client_counts:
count = client_type["count"]
client = client_type["client__name"]
total += count
counts.append((count, client))
counts.sort()
for count in counts:
print "%25s %15d" % (count[1], count[0])
print "Total:", total
def handle(self, *args, **options):
if len(args) == 0:
# Report global activity.
self.compute_activity(UserActivity.objects.all())
elif len(args) == 1:
try:
# Report activity for a user.
user_profile = get_user_profile_by_email(args[0])
self.compute_activity(UserActivity.objects.filter(
user_profile=user_profile))
except UserProfile.DoesNotExist:
try:
# Report activity for a realm.
realm = get_realm(args[0])
self.compute_activity(UserActivity.objects.filter(
user_profile__realm=realm))
except Realm.DoesNotExist:
print "Unknown user or domain %s" % (args[0],)
exit(1)
|
Add a utility to report some rough recent client activity metrics.
(imported from commit 27b4a70871939b2728fcbe0ce5824326ff4decc2)from __future__ import absolute_import
from django.core.management.base import BaseCommand
from django.db.models import Count
from zerver.models import UserActivity, UserProfile, Realm, \
get_realm, get_user_profile_by_email
import datetime
class Command(BaseCommand):
help = """Report rough client activity globally, for a realm, or for a user
Usage examples:
python manage.py client-activity
python manage.py client-activity zulip.com
python manage.py client-activity jesstess@zulip.com"""
def compute_activity(self, user_activity_objects):
# Report data from the past week.
#
# This is a rough report of client activity because we inconsistently
# register activity from various clients; think of it as telling you
# approximately how many people from a group have used a particular
# client recently. For example, this might be useful to get a sense of
# how popular different versions of a desktop client are.
#
# Importantly, this does NOT tell you anything about the relative
# volumes of requests from clients.
threshold = datetime.datetime.now() - datetime.timedelta(days=7)
client_counts = user_activity_objects.filter(
last_visit__gt=threshold).values("client__name").annotate(
count=Count('client__name'))
total = 0
counts = []
for client_type in client_counts:
count = client_type["count"]
client = client_type["client__name"]
total += count
counts.append((count, client))
counts.sort()
for count in counts:
print "%25s %15d" % (count[1], count[0])
print "Total:", total
def handle(self, *args, **options):
if len(args) == 0:
# Report global activity.
self.compute_activity(UserActivity.objects.all())
elif len(args) == 1:
try:
# Report activity for a user.
user_profile = get_user_profile_by_email(args[0])
self.compute_activity(UserActivity.objects.filter(
user_profile=user_profile))
except UserProfile.DoesNotExist:
try:
# Report activity for a realm.
realm = get_realm(args[0])
self.compute_activity(UserActivity.objects.filter(
user_profile__realm=realm))
except Realm.DoesNotExist:
print "Unknown user or domain %s" % (args[0],)
exit(1)
|
<commit_before><commit_msg>Add a utility to report some rough recent client activity metrics.
(imported from commit 27b4a70871939b2728fcbe0ce5824326ff4decc2)<commit_after>from __future__ import absolute_import
from django.core.management.base import BaseCommand
from django.db.models import Count
from zerver.models import UserActivity, UserProfile, Realm, \
get_realm, get_user_profile_by_email
import datetime
class Command(BaseCommand):
help = """Report rough client activity globally, for a realm, or for a user
Usage examples:
python manage.py client-activity
python manage.py client-activity zulip.com
python manage.py client-activity jesstess@zulip.com"""
def compute_activity(self, user_activity_objects):
# Report data from the past week.
#
# This is a rough report of client activity because we inconsistently
# register activity from various clients; think of it as telling you
# approximately how many people from a group have used a particular
# client recently. For example, this might be useful to get a sense of
# how popular different versions of a desktop client are.
#
# Importantly, this does NOT tell you anything about the relative
# volumes of requests from clients.
threshold = datetime.datetime.now() - datetime.timedelta(days=7)
client_counts = user_activity_objects.filter(
last_visit__gt=threshold).values("client__name").annotate(
count=Count('client__name'))
total = 0
counts = []
for client_type in client_counts:
count = client_type["count"]
client = client_type["client__name"]
total += count
counts.append((count, client))
counts.sort()
for count in counts:
print "%25s %15d" % (count[1], count[0])
print "Total:", total
def handle(self, *args, **options):
if len(args) == 0:
# Report global activity.
self.compute_activity(UserActivity.objects.all())
elif len(args) == 1:
try:
# Report activity for a user.
user_profile = get_user_profile_by_email(args[0])
self.compute_activity(UserActivity.objects.filter(
user_profile=user_profile))
except UserProfile.DoesNotExist:
try:
# Report activity for a realm.
realm = get_realm(args[0])
self.compute_activity(UserActivity.objects.filter(
user_profile__realm=realm))
except Realm.DoesNotExist:
print "Unknown user or domain %s" % (args[0],)
exit(1)
|
|
582ceb3f429415251c06f576712a3b2ddbf4496a
|
src/ggrc/migrations/versions/20161109010604_4afe69ce3c38_remove_invalid_person_objects.py
|
src/ggrc/migrations/versions/20161109010604_4afe69ce3c38_remove_invalid_person_objects.py
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Remove invalid person objects
Create Date: 2016-11-09 01:06:04.745331
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '4afe69ce3c38'
down_revision = '2105a9db99fc'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute(
"""DELETE FROM object_people WHERE personable_type IN (
'InterviewResponse',
'DocumentationResponse'
)"""
)
def downgrade():
"""Nothing to be done about removed data."""
|
Remove bad data form object_people table
|
Remove bad data form object_people table
The InterviewResponse object was not properly removed from all tables
when the model was deleted. Memcache then tried to remove this non
existent model, which resulted in an exception. This migration makes
sure that such bad data is properly purged from our database.
|
Python
|
apache-2.0
|
AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,josthkko/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core
|
Remove bad data form object_people table
The InterviewResponse object was not properly removed from all tables
when the model was deleted. Memcache then tried to remove this non
existent model, which resulted in an exception. This migration makes
sure that such bad data is properly purged from our database.
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Remove invalid person objects
Create Date: 2016-11-09 01:06:04.745331
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '4afe69ce3c38'
down_revision = '2105a9db99fc'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute(
"""DELETE FROM object_people WHERE personable_type IN (
'InterviewResponse',
'DocumentationResponse'
)"""
)
def downgrade():
"""Nothing to be done about removed data."""
|
<commit_before><commit_msg>Remove bad data form object_people table
The InterviewResponse object was not properly removed from all tables
when the model was deleted. Memcache then tried to remove this non
existent model, which resulted in an exception. This migration makes
sure that such bad data is properly purged from our database.<commit_after>
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Remove invalid person objects
Create Date: 2016-11-09 01:06:04.745331
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '4afe69ce3c38'
down_revision = '2105a9db99fc'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute(
"""DELETE FROM object_people WHERE personable_type IN (
'InterviewResponse',
'DocumentationResponse'
)"""
)
def downgrade():
"""Nothing to be done about removed data."""
|
Remove bad data form object_people table
The InterviewResponse object was not properly removed from all tables
when the model was deleted. Memcache then tried to remove this non
existent model, which resulted in an exception. This migration makes
sure that such bad data is properly purged from our database.# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Remove invalid person objects
Create Date: 2016-11-09 01:06:04.745331
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '4afe69ce3c38'
down_revision = '2105a9db99fc'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute(
"""DELETE FROM object_people WHERE personable_type IN (
'InterviewResponse',
'DocumentationResponse'
)"""
)
def downgrade():
"""Nothing to be done about removed data."""
|
<commit_before><commit_msg>Remove bad data form object_people table
The InterviewResponse object was not properly removed from all tables
when the model was deleted. Memcache then tried to remove this non
existent model, which resulted in an exception. This migration makes
sure that such bad data is properly purged from our database.<commit_after># Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Remove invalid person objects
Create Date: 2016-11-09 01:06:04.745331
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '4afe69ce3c38'
down_revision = '2105a9db99fc'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute(
"""DELETE FROM object_people WHERE personable_type IN (
'InterviewResponse',
'DocumentationResponse'
)"""
)
def downgrade():
"""Nothing to be done about removed data."""
|
|
61625103a328f3377afd5565d246d3bc43bee766
|
murano/db/migration/alembic_migrations/versions/016_increase_task_description_text_size.py
|
murano/db/migration/alembic_migrations/versions/016_increase_task_description_text_size.py
|
# Copyright 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Increase the size of the text columns storing object model in the task table
Revision ID: 016
Revises: 015
Create Date: 2016-08-30 10:45:00
"""
# revision identifiers, used by Alembic.
revision = '016'
down_revision = '015'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.dialects.mysql as sa_mysql
MYSQL_ENGINE = 'InnoDB'
MYSQL_CHARSET = 'utf8'
def upgrade():
engine = op.get_bind()
if engine.dialect.dialect_description.startswith('mysql'):
with op.batch_alter_table('task') as batch_op:
batch_op.alter_column('description',
type_=sa_mysql.LONGTEXT())
def downgrade():
engine = op.get_bind()
if engine.dialect.dialect_description.startswith('mysql'):
with op.batch_alter_table('task') as batch_op:
batch_op.alter_column('description',
type_=sa.TEXT())
|
Increase description column length in task table
|
Increase description column length in task table
Previously we changed description column in environment
table from TEXT to LONGTEXT but forgot to do the same
for similar column in the task task table
Change-Id: I3a2bd0204a9fac2d583d8742ca72868bd20c49b4
Closes-Bug: #1616997
|
Python
|
apache-2.0
|
DavidPurcell/murano_temp,DavidPurcell/murano_temp,openstack/murano,DavidPurcell/murano_temp,DavidPurcell/murano_temp,openstack/murano
|
Increase description column length in task table
Previously we changed description column in environment
table from TEXT to LONGTEXT but forgot to do the same
for similar column in the task task table
Change-Id: I3a2bd0204a9fac2d583d8742ca72868bd20c49b4
Closes-Bug: #1616997
|
# Copyright 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Increase the size of the text columns storing object model in the task table
Revision ID: 016
Revises: 015
Create Date: 2016-08-30 10:45:00
"""
# revision identifiers, used by Alembic.
revision = '016'
down_revision = '015'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.dialects.mysql as sa_mysql
MYSQL_ENGINE = 'InnoDB'
MYSQL_CHARSET = 'utf8'
def upgrade():
engine = op.get_bind()
if engine.dialect.dialect_description.startswith('mysql'):
with op.batch_alter_table('task') as batch_op:
batch_op.alter_column('description',
type_=sa_mysql.LONGTEXT())
def downgrade():
engine = op.get_bind()
if engine.dialect.dialect_description.startswith('mysql'):
with op.batch_alter_table('task') as batch_op:
batch_op.alter_column('description',
type_=sa.TEXT())
|
<commit_before><commit_msg>Increase description column length in task table
Previously we changed description column in environment
table from TEXT to LONGTEXT but forgot to do the same
for similar column in the task task table
Change-Id: I3a2bd0204a9fac2d583d8742ca72868bd20c49b4
Closes-Bug: #1616997<commit_after>
|
# Copyright 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Increase the size of the text columns storing object model in the task table
Revision ID: 016
Revises: 015
Create Date: 2016-08-30 10:45:00
"""
# revision identifiers, used by Alembic.
revision = '016'
down_revision = '015'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.dialects.mysql as sa_mysql
MYSQL_ENGINE = 'InnoDB'
MYSQL_CHARSET = 'utf8'
def upgrade():
engine = op.get_bind()
if engine.dialect.dialect_description.startswith('mysql'):
with op.batch_alter_table('task') as batch_op:
batch_op.alter_column('description',
type_=sa_mysql.LONGTEXT())
def downgrade():
engine = op.get_bind()
if engine.dialect.dialect_description.startswith('mysql'):
with op.batch_alter_table('task') as batch_op:
batch_op.alter_column('description',
type_=sa.TEXT())
|
Increase description column length in task table
Previously we changed description column in environment
table from TEXT to LONGTEXT but forgot to do the same
for similar column in the task task table
Change-Id: I3a2bd0204a9fac2d583d8742ca72868bd20c49b4
Closes-Bug: #1616997# Copyright 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Increase the size of the text columns storing object model in the task table
Revision ID: 016
Revises: 015
Create Date: 2016-08-30 10:45:00
"""
# revision identifiers, used by Alembic.
revision = '016'
down_revision = '015'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.dialects.mysql as sa_mysql
MYSQL_ENGINE = 'InnoDB'
MYSQL_CHARSET = 'utf8'
def upgrade():
engine = op.get_bind()
if engine.dialect.dialect_description.startswith('mysql'):
with op.batch_alter_table('task') as batch_op:
batch_op.alter_column('description',
type_=sa_mysql.LONGTEXT())
def downgrade():
engine = op.get_bind()
if engine.dialect.dialect_description.startswith('mysql'):
with op.batch_alter_table('task') as batch_op:
batch_op.alter_column('description',
type_=sa.TEXT())
|
<commit_before><commit_msg>Increase description column length in task table
Previously we changed description column in environment
table from TEXT to LONGTEXT but forgot to do the same
for similar column in the task task table
Change-Id: I3a2bd0204a9fac2d583d8742ca72868bd20c49b4
Closes-Bug: #1616997<commit_after># Copyright 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Increase the size of the text columns storing object model in the task table
Revision ID: 016
Revises: 015
Create Date: 2016-08-30 10:45:00
"""
# revision identifiers, used by Alembic.
revision = '016'
down_revision = '015'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.dialects.mysql as sa_mysql
MYSQL_ENGINE = 'InnoDB'
MYSQL_CHARSET = 'utf8'
def upgrade():
engine = op.get_bind()
if engine.dialect.dialect_description.startswith('mysql'):
with op.batch_alter_table('task') as batch_op:
batch_op.alter_column('description',
type_=sa_mysql.LONGTEXT())
def downgrade():
engine = op.get_bind()
if engine.dialect.dialect_description.startswith('mysql'):
with op.batch_alter_table('task') as batch_op:
batch_op.alter_column('description',
type_=sa.TEXT())
|
|
8999ac7e9808a47a5f751edb2fed6d421357112c
|
aurorawatchuk/examples/get_status.py
|
aurorawatchuk/examples/get_status.py
|
#!/usr/bin/env python
import aurorawatchuk
import datetime
import logging
import os
import time
logger = logging.getLogger(__name__)
# Set logging level to debug to that HTTP GETs are indicated
logging.basicConfig(level=logging.DEBUG)
# If desired set user agent string. Must be set before first use.
aurorawatchuk.user_agent = 'Python aurorawatchuk module (%s)' % os.path.basename(__file__)
# Creating an AuroraWatchUK object. Its fields are accessors which return the latest status and other information.
aw = aurorawatchuk.AuroraWatchUK()
# Print the current status level, and when it was updated.
print('Current status level: ' + aw.status)
print('Current status updated: ' + aw.status_updated.strftime('%Y-%m-%d %H:%M:%S'))
# Print the color, meaning and description for each status level
print('Status descriptions:')
desc = aw.descriptions
for status_level in desc:
print(' Level: ' + status_level)
print(' Color: ' + desc[status_level]['color'])
print(' Description: ' + desc[status_level]['description'])
print(' Meaning: ' + desc[status_level]['meaning'])
print('----------------------------')
while True:
now = datetime.datetime.utcnow()
print('{now:%Y-%m-%d %H:%M:%S}: {status}'.format(now=now, status=aw.status))
time.sleep(10)
|
Add example to show basic usage
|
Add example to show basic usage
|
Python
|
mit
|
stevemarple/python-aurorawatchuk
|
Add example to show basic usage
|
#!/usr/bin/env python
import aurorawatchuk
import datetime
import logging
import os
import time
logger = logging.getLogger(__name__)
# Set logging level to debug to that HTTP GETs are indicated
logging.basicConfig(level=logging.DEBUG)
# If desired set user agent string. Must be set before first use.
aurorawatchuk.user_agent = 'Python aurorawatchuk module (%s)' % os.path.basename(__file__)
# Creating an AuroraWatchUK object. Its fields are accessors which return the latest status and other information.
aw = aurorawatchuk.AuroraWatchUK()
# Print the current status level, and when it was updated.
print('Current status level: ' + aw.status)
print('Current status updated: ' + aw.status_updated.strftime('%Y-%m-%d %H:%M:%S'))
# Print the color, meaning and description for each status level
print('Status descriptions:')
desc = aw.descriptions
for status_level in desc:
print(' Level: ' + status_level)
print(' Color: ' + desc[status_level]['color'])
print(' Description: ' + desc[status_level]['description'])
print(' Meaning: ' + desc[status_level]['meaning'])
print('----------------------------')
while True:
now = datetime.datetime.utcnow()
print('{now:%Y-%m-%d %H:%M:%S}: {status}'.format(now=now, status=aw.status))
time.sleep(10)
|
<commit_before><commit_msg>Add example to show basic usage<commit_after>
|
#!/usr/bin/env python
import aurorawatchuk
import datetime
import logging
import os
import time
logger = logging.getLogger(__name__)
# Set logging level to debug to that HTTP GETs are indicated
logging.basicConfig(level=logging.DEBUG)
# If desired set user agent string. Must be set before first use.
aurorawatchuk.user_agent = 'Python aurorawatchuk module (%s)' % os.path.basename(__file__)
# Creating an AuroraWatchUK object. Its fields are accessors which return the latest status and other information.
aw = aurorawatchuk.AuroraWatchUK()
# Print the current status level, and when it was updated.
print('Current status level: ' + aw.status)
print('Current status updated: ' + aw.status_updated.strftime('%Y-%m-%d %H:%M:%S'))
# Print the color, meaning and description for each status level
print('Status descriptions:')
desc = aw.descriptions
for status_level in desc:
print(' Level: ' + status_level)
print(' Color: ' + desc[status_level]['color'])
print(' Description: ' + desc[status_level]['description'])
print(' Meaning: ' + desc[status_level]['meaning'])
print('----------------------------')
while True:
now = datetime.datetime.utcnow()
print('{now:%Y-%m-%d %H:%M:%S}: {status}'.format(now=now, status=aw.status))
time.sleep(10)
|
Add example to show basic usage#!/usr/bin/env python
import aurorawatchuk
import datetime
import logging
import os
import time
logger = logging.getLogger(__name__)
# Set logging level to debug to that HTTP GETs are indicated
logging.basicConfig(level=logging.DEBUG)
# If desired set user agent string. Must be set before first use.
aurorawatchuk.user_agent = 'Python aurorawatchuk module (%s)' % os.path.basename(__file__)
# Creating an AuroraWatchUK object. Its fields are accessors which return the latest status and other information.
aw = aurorawatchuk.AuroraWatchUK()
# Print the current status level, and when it was updated.
print('Current status level: ' + aw.status)
print('Current status updated: ' + aw.status_updated.strftime('%Y-%m-%d %H:%M:%S'))
# Print the color, meaning and description for each status level
print('Status descriptions:')
desc = aw.descriptions
for status_level in desc:
print(' Level: ' + status_level)
print(' Color: ' + desc[status_level]['color'])
print(' Description: ' + desc[status_level]['description'])
print(' Meaning: ' + desc[status_level]['meaning'])
print('----------------------------')
while True:
now = datetime.datetime.utcnow()
print('{now:%Y-%m-%d %H:%M:%S}: {status}'.format(now=now, status=aw.status))
time.sleep(10)
|
<commit_before><commit_msg>Add example to show basic usage<commit_after>#!/usr/bin/env python
import aurorawatchuk
import datetime
import logging
import os
import time
logger = logging.getLogger(__name__)
# Set logging level to debug to that HTTP GETs are indicated
logging.basicConfig(level=logging.DEBUG)
# If desired set user agent string. Must be set before first use.
aurorawatchuk.user_agent = 'Python aurorawatchuk module (%s)' % os.path.basename(__file__)
# Creating an AuroraWatchUK object. Its fields are accessors which return the latest status and other information.
aw = aurorawatchuk.AuroraWatchUK()
# Print the current status level, and when it was updated.
print('Current status level: ' + aw.status)
print('Current status updated: ' + aw.status_updated.strftime('%Y-%m-%d %H:%M:%S'))
# Print the color, meaning and description for each status level
print('Status descriptions:')
desc = aw.descriptions
for status_level in desc:
print(' Level: ' + status_level)
print(' Color: ' + desc[status_level]['color'])
print(' Description: ' + desc[status_level]['description'])
print(' Meaning: ' + desc[status_level]['meaning'])
print('----------------------------')
while True:
now = datetime.datetime.utcnow()
print('{now:%Y-%m-%d %H:%M:%S}: {status}'.format(now=now, status=aw.status))
time.sleep(10)
|
|
3ca5a70286974457bce38a52cce81c8e076f0a0c
|
python/hashlib_md5_sha/md5sum_string.py
|
python/hashlib_md5_sha/md5sum_string.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import hashlib
def main():
"""Main function"""
# LONG VERSION
hash = hashlib.md5()
hash.update("Hello")
hash.update(" world!")
print hash.hexdigest() # str
# SHORT VERSION
print hashlib.md5("Hello world!").hexdigest()
if __name__ == '__main__': main()
|
Add a snippet in 'python/hashlib_md5_sha'.
|
Add a snippet in 'python/hashlib_md5_sha'.
|
Python
|
mit
|
jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets
|
Add a snippet in 'python/hashlib_md5_sha'.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import hashlib
def main():
"""Main function"""
# LONG VERSION
hash = hashlib.md5()
hash.update("Hello")
hash.update(" world!")
print hash.hexdigest() # str
# SHORT VERSION
print hashlib.md5("Hello world!").hexdigest()
if __name__ == '__main__': main()
|
<commit_before><commit_msg>Add a snippet in 'python/hashlib_md5_sha'.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import hashlib
def main():
"""Main function"""
# LONG VERSION
hash = hashlib.md5()
hash.update("Hello")
hash.update(" world!")
print hash.hexdigest() # str
# SHORT VERSION
print hashlib.md5("Hello world!").hexdigest()
if __name__ == '__main__': main()
|
Add a snippet in 'python/hashlib_md5_sha'.#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import hashlib
def main():
"""Main function"""
# LONG VERSION
hash = hashlib.md5()
hash.update("Hello")
hash.update(" world!")
print hash.hexdigest() # str
# SHORT VERSION
print hashlib.md5("Hello world!").hexdigest()
if __name__ == '__main__': main()
|
<commit_before><commit_msg>Add a snippet in 'python/hashlib_md5_sha'.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import hashlib
def main():
"""Main function"""
# LONG VERSION
hash = hashlib.md5()
hash.update("Hello")
hash.update(" world!")
print hash.hexdigest() # str
# SHORT VERSION
print hashlib.md5("Hello world!").hexdigest()
if __name__ == '__main__': main()
|
|
b5d8303f4def7de4814f73d02d5e143cbeb2baa7
|
code/ex3.2-aio_multiple_requests.py
|
code/ex3.2-aio_multiple_requests.py
|
import asyncio
import aiohttp
import time
URLS = [
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
]
@asyncio.coroutine
def request_greetings():
response_tasks = yield from asyncio.wait([aiohttp.get(url) for url in URLS])
text_tasks = yield from asyncio.wait(
[task.result().text() for task in response_tasks[0]]
)
texts = [task.result() for task in text_tasks[0]]
return '\n'.join(texts)
loop = asyncio.get_event_loop()
t1 = time.time()
greetings = loop.run_until_complete(request_greetings())
print(time.time() - t1, 'seconds passed')
print(greetings)
loop.close()
|
Add ex3.2: multiple aiohttp requests
|
Add ex3.2: multiple aiohttp requests
|
Python
|
mit
|
MA3STR0/PythonAsyncWorkshop
|
Add ex3.2: multiple aiohttp requests
|
import asyncio
import aiohttp
import time
URLS = [
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
]
@asyncio.coroutine
def request_greetings():
response_tasks = yield from asyncio.wait([aiohttp.get(url) for url in URLS])
text_tasks = yield from asyncio.wait(
[task.result().text() for task in response_tasks[0]]
)
texts = [task.result() for task in text_tasks[0]]
return '\n'.join(texts)
loop = asyncio.get_event_loop()
t1 = time.time()
greetings = loop.run_until_complete(request_greetings())
print(time.time() - t1, 'seconds passed')
print(greetings)
loop.close()
|
<commit_before><commit_msg>Add ex3.2: multiple aiohttp requests<commit_after>
|
import asyncio
import aiohttp
import time
URLS = [
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
]
@asyncio.coroutine
def request_greetings():
response_tasks = yield from asyncio.wait([aiohttp.get(url) for url in URLS])
text_tasks = yield from asyncio.wait(
[task.result().text() for task in response_tasks[0]]
)
texts = [task.result() for task in text_tasks[0]]
return '\n'.join(texts)
loop = asyncio.get_event_loop()
t1 = time.time()
greetings = loop.run_until_complete(request_greetings())
print(time.time() - t1, 'seconds passed')
print(greetings)
loop.close()
|
Add ex3.2: multiple aiohttp requestsimport asyncio
import aiohttp
import time
URLS = [
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
]
@asyncio.coroutine
def request_greetings():
response_tasks = yield from asyncio.wait([aiohttp.get(url) for url in URLS])
text_tasks = yield from asyncio.wait(
[task.result().text() for task in response_tasks[0]]
)
texts = [task.result() for task in text_tasks[0]]
return '\n'.join(texts)
loop = asyncio.get_event_loop()
t1 = time.time()
greetings = loop.run_until_complete(request_greetings())
print(time.time() - t1, 'seconds passed')
print(greetings)
loop.close()
|
<commit_before><commit_msg>Add ex3.2: multiple aiohttp requests<commit_after>import asyncio
import aiohttp
import time
URLS = [
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
]
@asyncio.coroutine
def request_greetings():
response_tasks = yield from asyncio.wait([aiohttp.get(url) for url in URLS])
text_tasks = yield from asyncio.wait(
[task.result().text() for task in response_tasks[0]]
)
texts = [task.result() for task in text_tasks[0]]
return '\n'.join(texts)
loop = asyncio.get_event_loop()
t1 = time.time()
greetings = loop.run_until_complete(request_greetings())
print(time.time() - t1, 'seconds passed')
print(greetings)
loop.close()
|
|
d491c60d089e5c6872cb0e0abf8cdd0e0a4e3bee
|
migrations/0002_auto_20190327_1951.py
|
migrations/0002_auto_20190327_1951.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-27 19:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('downloads', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='flavor',
name='download_root',
field=models.FilePathField(allow_files=False, allow_folders=True, max_length=250, path='/Users/marc/Workspaces/Web/django-marcsupdater/marcsupdater/_static', recursive=True, verbose_name='Download root'),
),
]
|
Update migrations for Django 1.11 and Python 3
|
Update migrations for Django 1.11 and Python 3
|
Python
|
mit
|
mback2k/django-app-downloads,mback2k/django-app-downloads
|
Update migrations for Django 1.11 and Python 3
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-27 19:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('downloads', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='flavor',
name='download_root',
field=models.FilePathField(allow_files=False, allow_folders=True, max_length=250, path='/Users/marc/Workspaces/Web/django-marcsupdater/marcsupdater/_static', recursive=True, verbose_name='Download root'),
),
]
|
<commit_before><commit_msg>Update migrations for Django 1.11 and Python 3<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-27 19:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('downloads', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='flavor',
name='download_root',
field=models.FilePathField(allow_files=False, allow_folders=True, max_length=250, path='/Users/marc/Workspaces/Web/django-marcsupdater/marcsupdater/_static', recursive=True, verbose_name='Download root'),
),
]
|
Update migrations for Django 1.11 and Python 3# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-27 19:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('downloads', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='flavor',
name='download_root',
field=models.FilePathField(allow_files=False, allow_folders=True, max_length=250, path='/Users/marc/Workspaces/Web/django-marcsupdater/marcsupdater/_static', recursive=True, verbose_name='Download root'),
),
]
|
<commit_before><commit_msg>Update migrations for Django 1.11 and Python 3<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-27 19:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('downloads', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='flavor',
name='download_root',
field=models.FilePathField(allow_files=False, allow_folders=True, max_length=250, path='/Users/marc/Workspaces/Web/django-marcsupdater/marcsupdater/_static', recursive=True, verbose_name='Download root'),
),
]
|
|
586c83a2c72637af611b4a9855e7a057c48163e2
|
scripts/delete_sqs_queues.py
|
scripts/delete_sqs_queues.py
|
import boto3
import csv
from datetime import datetime
from pprint import pprint
import os
client = boto3.client('sqs', region_name=os.getenv('AWS_REGION'))
def _formatted_date_from_timestamp(timestamp):
return datetime.fromtimestamp(
int(timestamp)
).strftime('%Y-%m-%d %H:%M:%S')
def get_queues():
response = client.list_queues()
queues = response['QueueUrls']
return queues
def get_queue_attributes(queue_name):
response = client.get_queue_attributes(
QueueUrl=queue_name,
AttributeNames=[
'All'
]
)
queue_attributes = response['Attributes']
return queue_attributes
def delete_queue(queue_name):
response = client.delete_queue(
QueueUrl=queue_name
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
print('Deleted queue successfully')
else:
print('Error occured when attempting to delete queue')
pprint(response)
return response
def output_to_csv(queue_attributes):
csv_name = 'queues.csv'
with open(csv_name, 'w') as csvfile:
fieldnames = [
'Queue Name',
'Number of Messages',
'Number of Messages Delayed',
'Number of Messages Not Visible',
'Created'
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for queue_attr in queue_attributes:
queue_url = client.get_queue_url(
QueueName=queue_attr['QueueArn']
)['QueueUrl']
writer.writerow({
'Queue Name': queue_attr['QueueArn'],
'Queue URL': queue_url,
'Number of Messages': queue_attr['ApproximateNumberOfMessages'],
'Number of Messages Delayed': queue_attr['ApproximateNumberOfMessagesDelayed'],
'Number of Messages Not Visible': queue_attr['ApproximateNumberOfMessagesNotVisible'],
'Created': _formatted_date_from_timestamp(queue_attr['CreatedTimestamp'])
})
return csv_name
def read_from_csv(csv_name):
queue_urls = []
with open(csv_name, 'r') as csvfile:
next(csvfile)
rows = csv.reader(csvfile, delimiter=',')
for row in rows:
queue_urls.append(row[1])
return queue_urls
queues = get_queues()
for queue in queues:
delete_queue(queue)
|
Add a script to delete sqs queues: * Uses boto to retrieve/delete queues * Additional functions to output/read from csv
|
Add a script to delete sqs queues:
* Uses boto to retrieve/delete queues
* Additional functions to output/read from csv
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add a script to delete sqs queues:
* Uses boto to retrieve/delete queues
* Additional functions to output/read from csv
|
import boto3
import csv
from datetime import datetime
from pprint import pprint
import os
client = boto3.client('sqs', region_name=os.getenv('AWS_REGION'))
def _formatted_date_from_timestamp(timestamp):
return datetime.fromtimestamp(
int(timestamp)
).strftime('%Y-%m-%d %H:%M:%S')
def get_queues():
response = client.list_queues()
queues = response['QueueUrls']
return queues
def get_queue_attributes(queue_name):
response = client.get_queue_attributes(
QueueUrl=queue_name,
AttributeNames=[
'All'
]
)
queue_attributes = response['Attributes']
return queue_attributes
def delete_queue(queue_name):
response = client.delete_queue(
QueueUrl=queue_name
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
print('Deleted queue successfully')
else:
print('Error occured when attempting to delete queue')
pprint(response)
return response
def output_to_csv(queue_attributes):
csv_name = 'queues.csv'
with open(csv_name, 'w') as csvfile:
fieldnames = [
'Queue Name',
'Number of Messages',
'Number of Messages Delayed',
'Number of Messages Not Visible',
'Created'
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for queue_attr in queue_attributes:
queue_url = client.get_queue_url(
QueueName=queue_attr['QueueArn']
)['QueueUrl']
writer.writerow({
'Queue Name': queue_attr['QueueArn'],
'Queue URL': queue_url,
'Number of Messages': queue_attr['ApproximateNumberOfMessages'],
'Number of Messages Delayed': queue_attr['ApproximateNumberOfMessagesDelayed'],
'Number of Messages Not Visible': queue_attr['ApproximateNumberOfMessagesNotVisible'],
'Created': _formatted_date_from_timestamp(queue_attr['CreatedTimestamp'])
})
return csv_name
def read_from_csv(csv_name):
queue_urls = []
with open(csv_name, 'r') as csvfile:
next(csvfile)
rows = csv.reader(csvfile, delimiter=',')
for row in rows:
queue_urls.append(row[1])
return queue_urls
queues = get_queues()
for queue in queues:
delete_queue(queue)
|
<commit_before><commit_msg>Add a script to delete sqs queues:
* Uses boto to retrieve/delete queues
* Additional functions to output/read from csv<commit_after>
|
import boto3
import csv
from datetime import datetime
from pprint import pprint
import os
client = boto3.client('sqs', region_name=os.getenv('AWS_REGION'))
def _formatted_date_from_timestamp(timestamp):
return datetime.fromtimestamp(
int(timestamp)
).strftime('%Y-%m-%d %H:%M:%S')
def get_queues():
response = client.list_queues()
queues = response['QueueUrls']
return queues
def get_queue_attributes(queue_name):
response = client.get_queue_attributes(
QueueUrl=queue_name,
AttributeNames=[
'All'
]
)
queue_attributes = response['Attributes']
return queue_attributes
def delete_queue(queue_name):
response = client.delete_queue(
QueueUrl=queue_name
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
print('Deleted queue successfully')
else:
print('Error occured when attempting to delete queue')
pprint(response)
return response
def output_to_csv(queue_attributes):
csv_name = 'queues.csv'
with open(csv_name, 'w') as csvfile:
fieldnames = [
'Queue Name',
'Number of Messages',
'Number of Messages Delayed',
'Number of Messages Not Visible',
'Created'
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for queue_attr in queue_attributes:
queue_url = client.get_queue_url(
QueueName=queue_attr['QueueArn']
)['QueueUrl']
writer.writerow({
'Queue Name': queue_attr['QueueArn'],
'Queue URL': queue_url,
'Number of Messages': queue_attr['ApproximateNumberOfMessages'],
'Number of Messages Delayed': queue_attr['ApproximateNumberOfMessagesDelayed'],
'Number of Messages Not Visible': queue_attr['ApproximateNumberOfMessagesNotVisible'],
'Created': _formatted_date_from_timestamp(queue_attr['CreatedTimestamp'])
})
return csv_name
def read_from_csv(csv_name):
queue_urls = []
with open(csv_name, 'r') as csvfile:
next(csvfile)
rows = csv.reader(csvfile, delimiter=',')
for row in rows:
queue_urls.append(row[1])
return queue_urls
queues = get_queues()
for queue in queues:
delete_queue(queue)
|
Add a script to delete sqs queues:
* Uses boto to retrieve/delete queues
* Additional functions to output/read from csvimport boto3
import csv
from datetime import datetime
from pprint import pprint
import os
client = boto3.client('sqs', region_name=os.getenv('AWS_REGION'))
def _formatted_date_from_timestamp(timestamp):
return datetime.fromtimestamp(
int(timestamp)
).strftime('%Y-%m-%d %H:%M:%S')
def get_queues():
response = client.list_queues()
queues = response['QueueUrls']
return queues
def get_queue_attributes(queue_name):
response = client.get_queue_attributes(
QueueUrl=queue_name,
AttributeNames=[
'All'
]
)
queue_attributes = response['Attributes']
return queue_attributes
def delete_queue(queue_name):
response = client.delete_queue(
QueueUrl=queue_name
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
print('Deleted queue successfully')
else:
print('Error occured when attempting to delete queue')
pprint(response)
return response
def output_to_csv(queue_attributes):
csv_name = 'queues.csv'
with open(csv_name, 'w') as csvfile:
fieldnames = [
'Queue Name',
'Number of Messages',
'Number of Messages Delayed',
'Number of Messages Not Visible',
'Created'
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for queue_attr in queue_attributes:
queue_url = client.get_queue_url(
QueueName=queue_attr['QueueArn']
)['QueueUrl']
writer.writerow({
'Queue Name': queue_attr['QueueArn'],
'Queue URL': queue_url,
'Number of Messages': queue_attr['ApproximateNumberOfMessages'],
'Number of Messages Delayed': queue_attr['ApproximateNumberOfMessagesDelayed'],
'Number of Messages Not Visible': queue_attr['ApproximateNumberOfMessagesNotVisible'],
'Created': _formatted_date_from_timestamp(queue_attr['CreatedTimestamp'])
})
return csv_name
def read_from_csv(csv_name):
queue_urls = []
with open(csv_name, 'r') as csvfile:
next(csvfile)
rows = csv.reader(csvfile, delimiter=',')
for row in rows:
queue_urls.append(row[1])
return queue_urls
queues = get_queues()
for queue in queues:
delete_queue(queue)
|
<commit_before><commit_msg>Add a script to delete sqs queues:
* Uses boto to retrieve/delete queues
* Additional functions to output/read from csv<commit_after>import boto3
import csv
from datetime import datetime
from pprint import pprint
import os
client = boto3.client('sqs', region_name=os.getenv('AWS_REGION'))
def _formatted_date_from_timestamp(timestamp):
return datetime.fromtimestamp(
int(timestamp)
).strftime('%Y-%m-%d %H:%M:%S')
def get_queues():
response = client.list_queues()
queues = response['QueueUrls']
return queues
def get_queue_attributes(queue_name):
response = client.get_queue_attributes(
QueueUrl=queue_name,
AttributeNames=[
'All'
]
)
queue_attributes = response['Attributes']
return queue_attributes
def delete_queue(queue_name):
response = client.delete_queue(
QueueUrl=queue_name
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
print('Deleted queue successfully')
else:
print('Error occured when attempting to delete queue')
pprint(response)
return response
def output_to_csv(queue_attributes):
csv_name = 'queues.csv'
with open(csv_name, 'w') as csvfile:
fieldnames = [
'Queue Name',
'Number of Messages',
'Number of Messages Delayed',
'Number of Messages Not Visible',
'Created'
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for queue_attr in queue_attributes:
queue_url = client.get_queue_url(
QueueName=queue_attr['QueueArn']
)['QueueUrl']
writer.writerow({
'Queue Name': queue_attr['QueueArn'],
'Queue URL': queue_url,
'Number of Messages': queue_attr['ApproximateNumberOfMessages'],
'Number of Messages Delayed': queue_attr['ApproximateNumberOfMessagesDelayed'],
'Number of Messages Not Visible': queue_attr['ApproximateNumberOfMessagesNotVisible'],
'Created': _formatted_date_from_timestamp(queue_attr['CreatedTimestamp'])
})
return csv_name
def read_from_csv(csv_name):
queue_urls = []
with open(csv_name, 'r') as csvfile:
next(csvfile)
rows = csv.reader(csvfile, delimiter=',')
for row in rows:
queue_urls.append(row[1])
return queue_urls
queues = get_queues()
for queue in queues:
delete_queue(queue)
|
|
5921c252cc25eca91c637ed69e0a53719274b42c
|
day4-2.py
|
day4-2.py
|
"""Module to find real rooms in data and sum their sector IDs."""
def main():
"""Run the main function."""
with open('data/day4data.txt', 'r') as f:
dataList = f.readlines()
realRoomList = []
roomNames = []
sum = 0
for line in dataList:
data = line.strip("\n").split('-')
name = '-'.join(data[:-1])
id, checkSum = data[-1].strip(']').split('[')
nameCheck = {}
check = [item for sublist in name.split('-') for item in sublist]
for i in check:
if i not in nameCheck.keys():
nameCheck[i] = 1
else:
nameCheck[i] += 1
nameCheck = sorted(nameCheck.iteritems(), key=lambda x: x[0])
nameCheck = sorted(nameCheck, key=lambda x: x[1], reverse=True)
nameCheck = [i[0] for i in nameCheck]
if ''.join(nameCheck[0:len(checkSum)]) == checkSum:
realRoomList.append((name, id))
for room in realRoomList:
encriptedName = room[0]
decriptedName = []
for i in encriptedName:
decriptedName.append(decriptLetter(i, room[1]))
sum += int(room[1])
roomNames.append([''.join(decriptedName), room[1]])
for i in roomNames:
print('{}').format(i)
print(sum)
def decriptLetter(c, shift):
"""Decript a letter using the shift cipher."""
if c == '-':
return ' '
return chr((((ord(c) - 97) + int(shift)) % 26) + 97)
if __name__ == '__main__':
main()
|
Add day 4 part 2.
|
Add day 4 part 2.
|
Python
|
mit
|
SayWhat1/adventofcode2016
|
Add day 4 part 2.
|
"""Module to find real rooms in data and sum their sector IDs."""
def main():
"""Run the main function."""
with open('data/day4data.txt', 'r') as f:
dataList = f.readlines()
realRoomList = []
roomNames = []
sum = 0
for line in dataList:
data = line.strip("\n").split('-')
name = '-'.join(data[:-1])
id, checkSum = data[-1].strip(']').split('[')
nameCheck = {}
check = [item for sublist in name.split('-') for item in sublist]
for i in check:
if i not in nameCheck.keys():
nameCheck[i] = 1
else:
nameCheck[i] += 1
nameCheck = sorted(nameCheck.iteritems(), key=lambda x: x[0])
nameCheck = sorted(nameCheck, key=lambda x: x[1], reverse=True)
nameCheck = [i[0] for i in nameCheck]
if ''.join(nameCheck[0:len(checkSum)]) == checkSum:
realRoomList.append((name, id))
for room in realRoomList:
encriptedName = room[0]
decriptedName = []
for i in encriptedName:
decriptedName.append(decriptLetter(i, room[1]))
sum += int(room[1])
roomNames.append([''.join(decriptedName), room[1]])
for i in roomNames:
print('{}').format(i)
print(sum)
def decriptLetter(c, shift):
"""Decript a letter using the shift cipher."""
if c == '-':
return ' '
return chr((((ord(c) - 97) + int(shift)) % 26) + 97)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add day 4 part 2.<commit_after>
|
"""Module to find real rooms in data and sum their sector IDs."""
def main():
"""Run the main function."""
with open('data/day4data.txt', 'r') as f:
dataList = f.readlines()
realRoomList = []
roomNames = []
sum = 0
for line in dataList:
data = line.strip("\n").split('-')
name = '-'.join(data[:-1])
id, checkSum = data[-1].strip(']').split('[')
nameCheck = {}
check = [item for sublist in name.split('-') for item in sublist]
for i in check:
if i not in nameCheck.keys():
nameCheck[i] = 1
else:
nameCheck[i] += 1
nameCheck = sorted(nameCheck.iteritems(), key=lambda x: x[0])
nameCheck = sorted(nameCheck, key=lambda x: x[1], reverse=True)
nameCheck = [i[0] for i in nameCheck]
if ''.join(nameCheck[0:len(checkSum)]) == checkSum:
realRoomList.append((name, id))
for room in realRoomList:
encriptedName = room[0]
decriptedName = []
for i in encriptedName:
decriptedName.append(decriptLetter(i, room[1]))
sum += int(room[1])
roomNames.append([''.join(decriptedName), room[1]])
for i in roomNames:
print('{}').format(i)
print(sum)
def decriptLetter(c, shift):
"""Decript a letter using the shift cipher."""
if c == '-':
return ' '
return chr((((ord(c) - 97) + int(shift)) % 26) + 97)
if __name__ == '__main__':
main()
|
Add day 4 part 2."""Module to find real rooms in data and sum their sector IDs."""
def main():
"""Run the main function."""
with open('data/day4data.txt', 'r') as f:
dataList = f.readlines()
realRoomList = []
roomNames = []
sum = 0
for line in dataList:
data = line.strip("\n").split('-')
name = '-'.join(data[:-1])
id, checkSum = data[-1].strip(']').split('[')
nameCheck = {}
check = [item for sublist in name.split('-') for item in sublist]
for i in check:
if i not in nameCheck.keys():
nameCheck[i] = 1
else:
nameCheck[i] += 1
nameCheck = sorted(nameCheck.iteritems(), key=lambda x: x[0])
nameCheck = sorted(nameCheck, key=lambda x: x[1], reverse=True)
nameCheck = [i[0] for i in nameCheck]
if ''.join(nameCheck[0:len(checkSum)]) == checkSum:
realRoomList.append((name, id))
for room in realRoomList:
encriptedName = room[0]
decriptedName = []
for i in encriptedName:
decriptedName.append(decriptLetter(i, room[1]))
sum += int(room[1])
roomNames.append([''.join(decriptedName), room[1]])
for i in roomNames:
print('{}').format(i)
print(sum)
def decriptLetter(c, shift):
"""Decript a letter using the shift cipher."""
if c == '-':
return ' '
return chr((((ord(c) - 97) + int(shift)) % 26) + 97)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add day 4 part 2.<commit_after>"""Module to find real rooms in data and sum their sector IDs."""
def main():
"""Run the main function."""
with open('data/day4data.txt', 'r') as f:
dataList = f.readlines()
realRoomList = []
roomNames = []
sum = 0
for line in dataList:
data = line.strip("\n").split('-')
name = '-'.join(data[:-1])
id, checkSum = data[-1].strip(']').split('[')
nameCheck = {}
check = [item for sublist in name.split('-') for item in sublist]
for i in check:
if i not in nameCheck.keys():
nameCheck[i] = 1
else:
nameCheck[i] += 1
nameCheck = sorted(nameCheck.iteritems(), key=lambda x: x[0])
nameCheck = sorted(nameCheck, key=lambda x: x[1], reverse=True)
nameCheck = [i[0] for i in nameCheck]
if ''.join(nameCheck[0:len(checkSum)]) == checkSum:
realRoomList.append((name, id))
for room in realRoomList:
encriptedName = room[0]
decriptedName = []
for i in encriptedName:
decriptedName.append(decriptLetter(i, room[1]))
sum += int(room[1])
roomNames.append([''.join(decriptedName), room[1]])
for i in roomNames:
print('{}').format(i)
print(sum)
def decriptLetter(c, shift):
"""Decript a letter using the shift cipher."""
if c == '-':
return ' '
return chr((((ord(c) - 97) + int(shift)) % 26) + 97)
if __name__ == '__main__':
main()
|
|
ad74be9696d54d1c50f83d54791cff8e00db86da
|
python/takepicture.py
|
python/takepicture.py
|
import os
#from pathlib import Path
#import path
import picamera
from time import sleep
from threading import Thread
def takePictureGlobal(picturetaken_callback, peripheral_id):
camera = picamera.PiCamera()
try:
camera.sharpness = 0
camera.contrast = 0
camera.brightness = 70
camera.saturation = 0
camera.ISO = 0
camera.video_stabilization = False
camera.exposure_compensation = 0
camera.exposure_mode = 'off'
camera.meter_mode = 'average'
camera.awb_mode = 'auto'
camera.image_effect = 'none'
camera.color_effects = None
camera.rotation = 90
camera.hflip = False
camera.vflip = False
camera.crop = (0.0, 0.0, 1.0, 1.0)
camera.resolution = "HD"
PICTURE_PATH = os.path.expanduser("../../akpics")
if not os.path.exists(PICTURE_PATH):
os.makedirs(PICTURE_PATH)
if os.path.exists(PICTURE_PATH):
print('capturing image')
picturefile = PICTURE_PATH+ '/img' + '.jpg';
camera.capture(picturefile)
sleep(3);
if not (picturetaken_callback is None):
picturetaken_callback(picturefile, peripheral_id);
else:
print("picture path does not exist");
finally:
camera.close();
class AKCamera:
def takeOnePicture(self, picturetaken_callback, peripheral_id):
thread = Thread(target = takePictureGlobal, args = (picturetaken_callback, peripheral_id, ))
thread.start()
#thread.join()
#print "thread finished...exiting"
tp = AKCamera();
tp.takeOnePicture(None, None)
tp = None;
|
Add support of event and camera
|
Add support of event and camera
|
Python
|
mit
|
dmayrand/DivAE-client-beta,dmayrand/DivAE-client-beta,dmayrand/Clients-for-actionKATA,dmayrand/Clients-for-actionKATA
|
Add support of event and camera
|
import os
#from pathlib import Path
#import path
import picamera
from time import sleep
from threading import Thread
def takePictureGlobal(picturetaken_callback, peripheral_id):
camera = picamera.PiCamera()
try:
camera.sharpness = 0
camera.contrast = 0
camera.brightness = 70
camera.saturation = 0
camera.ISO = 0
camera.video_stabilization = False
camera.exposure_compensation = 0
camera.exposure_mode = 'off'
camera.meter_mode = 'average'
camera.awb_mode = 'auto'
camera.image_effect = 'none'
camera.color_effects = None
camera.rotation = 90
camera.hflip = False
camera.vflip = False
camera.crop = (0.0, 0.0, 1.0, 1.0)
camera.resolution = "HD"
PICTURE_PATH = os.path.expanduser("../../akpics")
if not os.path.exists(PICTURE_PATH):
os.makedirs(PICTURE_PATH)
if os.path.exists(PICTURE_PATH):
print('capturing image')
picturefile = PICTURE_PATH+ '/img' + '.jpg';
camera.capture(picturefile)
sleep(3);
if not (picturetaken_callback is None):
picturetaken_callback(picturefile, peripheral_id);
else:
print("picture path does not exist");
finally:
camera.close();
class AKCamera:
def takeOnePicture(self, picturetaken_callback, peripheral_id):
thread = Thread(target = takePictureGlobal, args = (picturetaken_callback, peripheral_id, ))
thread.start()
#thread.join()
#print "thread finished...exiting"
tp = AKCamera();
tp.takeOnePicture(None, None)
tp = None;
|
<commit_before><commit_msg>Add support of event and camera<commit_after>
|
import os
#from pathlib import Path
#import path
import picamera
from time import sleep
from threading import Thread
def takePictureGlobal(picturetaken_callback, peripheral_id):
camera = picamera.PiCamera()
try:
camera.sharpness = 0
camera.contrast = 0
camera.brightness = 70
camera.saturation = 0
camera.ISO = 0
camera.video_stabilization = False
camera.exposure_compensation = 0
camera.exposure_mode = 'off'
camera.meter_mode = 'average'
camera.awb_mode = 'auto'
camera.image_effect = 'none'
camera.color_effects = None
camera.rotation = 90
camera.hflip = False
camera.vflip = False
camera.crop = (0.0, 0.0, 1.0, 1.0)
camera.resolution = "HD"
PICTURE_PATH = os.path.expanduser("../../akpics")
if not os.path.exists(PICTURE_PATH):
os.makedirs(PICTURE_PATH)
if os.path.exists(PICTURE_PATH):
print('capturing image')
picturefile = PICTURE_PATH+ '/img' + '.jpg';
camera.capture(picturefile)
sleep(3);
if not (picturetaken_callback is None):
picturetaken_callback(picturefile, peripheral_id);
else:
print("picture path does not exist");
finally:
camera.close();
class AKCamera:
def takeOnePicture(self, picturetaken_callback, peripheral_id):
thread = Thread(target = takePictureGlobal, args = (picturetaken_callback, peripheral_id, ))
thread.start()
#thread.join()
#print "thread finished...exiting"
tp = AKCamera();
tp.takeOnePicture(None, None)
tp = None;
|
Add support of event and cameraimport os
#from pathlib import Path
#import path
import picamera
from time import sleep
from threading import Thread
def takePictureGlobal(picturetaken_callback, peripheral_id):
camera = picamera.PiCamera()
try:
camera.sharpness = 0
camera.contrast = 0
camera.brightness = 70
camera.saturation = 0
camera.ISO = 0
camera.video_stabilization = False
camera.exposure_compensation = 0
camera.exposure_mode = 'off'
camera.meter_mode = 'average'
camera.awb_mode = 'auto'
camera.image_effect = 'none'
camera.color_effects = None
camera.rotation = 90
camera.hflip = False
camera.vflip = False
camera.crop = (0.0, 0.0, 1.0, 1.0)
camera.resolution = "HD"
PICTURE_PATH = os.path.expanduser("../../akpics")
if not os.path.exists(PICTURE_PATH):
os.makedirs(PICTURE_PATH)
if os.path.exists(PICTURE_PATH):
print('capturing image')
picturefile = PICTURE_PATH+ '/img' + '.jpg';
camera.capture(picturefile)
sleep(3);
if not (picturetaken_callback is None):
picturetaken_callback(picturefile, peripheral_id);
else:
print("picture path does not exist");
finally:
camera.close();
class AKCamera:
def takeOnePicture(self, picturetaken_callback, peripheral_id):
thread = Thread(target = takePictureGlobal, args = (picturetaken_callback, peripheral_id, ))
thread.start()
#thread.join()
#print "thread finished...exiting"
tp = AKCamera();
tp.takeOnePicture(None, None)
tp = None;
|
<commit_before><commit_msg>Add support of event and camera<commit_after>import os
#from pathlib import Path
#import path
import picamera
from time import sleep
from threading import Thread
def takePictureGlobal(picturetaken_callback, peripheral_id):
camera = picamera.PiCamera()
try:
camera.sharpness = 0
camera.contrast = 0
camera.brightness = 70
camera.saturation = 0
camera.ISO = 0
camera.video_stabilization = False
camera.exposure_compensation = 0
camera.exposure_mode = 'off'
camera.meter_mode = 'average'
camera.awb_mode = 'auto'
camera.image_effect = 'none'
camera.color_effects = None
camera.rotation = 90
camera.hflip = False
camera.vflip = False
camera.crop = (0.0, 0.0, 1.0, 1.0)
camera.resolution = "HD"
PICTURE_PATH = os.path.expanduser("../../akpics")
if not os.path.exists(PICTURE_PATH):
os.makedirs(PICTURE_PATH)
if os.path.exists(PICTURE_PATH):
print('capturing image')
picturefile = PICTURE_PATH+ '/img' + '.jpg';
camera.capture(picturefile)
sleep(3);
if not (picturetaken_callback is None):
picturetaken_callback(picturefile, peripheral_id);
else:
print("picture path does not exist");
finally:
camera.close();
class AKCamera:
def takeOnePicture(self, picturetaken_callback, peripheral_id):
thread = Thread(target = takePictureGlobal, args = (picturetaken_callback, peripheral_id, ))
thread.start()
#thread.join()
#print "thread finished...exiting"
tp = AKCamera();
tp.takeOnePicture(None, None)
tp = None;
|
|
8eaad95972f5ae67d658ad017de466815376f9f0
|
remove_nth_node_from_end_of_list.py
|
remove_nth_node_from_end_of_list.py
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @return a ListNode
def removeNthFromEnd(self, head, n):
if n < 0 or head == None:
return head
d = {}
current = head
l = 0
while current:
d[l] = current
current = current.next
l += 1
if n == 0:
d[l - 1].next = None
return head
elif n > l:
return head
elif n == l:
if l == 1:
return None
else:
return d[1]
elif n < l:
idx = l - n
d[idx - 1].next = d[idx].next
return head
|
Remove Nth Node From End of List 58ms should be more optimal
|
Remove Nth Node From End of List 58ms
should be more optimal
|
Python
|
mit
|
zhiyelee/leetcode,zhiyelee/leetcode,zhiyelee/leetcode
|
Remove Nth Node From End of List 58ms
should be more optimal
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @return a ListNode
def removeNthFromEnd(self, head, n):
if n < 0 or head == None:
return head
d = {}
current = head
l = 0
while current:
d[l] = current
current = current.next
l += 1
if n == 0:
d[l - 1].next = None
return head
elif n > l:
return head
elif n == l:
if l == 1:
return None
else:
return d[1]
elif n < l:
idx = l - n
d[idx - 1].next = d[idx].next
return head
|
<commit_before><commit_msg>Remove Nth Node From End of List 58ms
should be more optimal<commit_after>
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @return a ListNode
def removeNthFromEnd(self, head, n):
if n < 0 or head == None:
return head
d = {}
current = head
l = 0
while current:
d[l] = current
current = current.next
l += 1
if n == 0:
d[l - 1].next = None
return head
elif n > l:
return head
elif n == l:
if l == 1:
return None
else:
return d[1]
elif n < l:
idx = l - n
d[idx - 1].next = d[idx].next
return head
|
Remove Nth Node From End of List 58ms
should be more optimal# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @return a ListNode
def removeNthFromEnd(self, head, n):
if n < 0 or head == None:
return head
d = {}
current = head
l = 0
while current:
d[l] = current
current = current.next
l += 1
if n == 0:
d[l - 1].next = None
return head
elif n > l:
return head
elif n == l:
if l == 1:
return None
else:
return d[1]
elif n < l:
idx = l - n
d[idx - 1].next = d[idx].next
return head
|
<commit_before><commit_msg>Remove Nth Node From End of List 58ms
should be more optimal<commit_after># Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @return a ListNode
def removeNthFromEnd(self, head, n):
if n < 0 or head == None:
return head
d = {}
current = head
l = 0
while current:
d[l] = current
current = current.next
l += 1
if n == 0:
d[l - 1].next = None
return head
elif n > l:
return head
elif n == l:
if l == 1:
return None
else:
return d[1]
elif n < l:
idx = l - n
d[idx - 1].next = d[idx].next
return head
|
|
c935f85d62d0375d07685eeff14859e5a5a9b161
|
tests/test_spotifile.py
|
tests/test_spotifile.py
|
import unittest
import os
from subprocess import check_call
from sh import ls
mountpoint = '/tmp/spotifile_test_mount'
class SpotifileTestClass(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not os.path.exists(mountpoint):
os.mkdir(mountpoint)
@classmethod
def tearDownClass(cls):
if os.path.exists(mountpoint):
os.rmdir(mountpoint)
def setUp(self):
check_call(['./spotifile', mountpoint])
def tearDown(self):
check_call(['fusermount', '-u', mountpoint])
def test_ls(self):
assert 'connection' in ls(mountpoint)
|
Add simple test to check that we populate the root as expected
|
tests: Add simple test to check that we populate the root as expected
Signed-off-by: Anton Lofgren <64e2cb85477d4abaea4b354af986fde12a3e64f3@op5.com>
|
Python
|
bsd-3-clause
|
catharsis/spotifile,raoulh/spotifile,chelmertz/spotifile,raoulh/spotifile,raoulh/spotifile,chelmertz/spotifile,chelmertz/spotifile,catharsis/spotifile,catharsis/spotifile
|
tests: Add simple test to check that we populate the root as expected
Signed-off-by: Anton Lofgren <64e2cb85477d4abaea4b354af986fde12a3e64f3@op5.com>
|
import unittest
import os
from subprocess import check_call
from sh import ls
mountpoint = '/tmp/spotifile_test_mount'
class SpotifileTestClass(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not os.path.exists(mountpoint):
os.mkdir(mountpoint)
@classmethod
def tearDownClass(cls):
if os.path.exists(mountpoint):
os.rmdir(mountpoint)
def setUp(self):
check_call(['./spotifile', mountpoint])
def tearDown(self):
check_call(['fusermount', '-u', mountpoint])
def test_ls(self):
assert 'connection' in ls(mountpoint)
|
<commit_before><commit_msg>tests: Add simple test to check that we populate the root as expected
Signed-off-by: Anton Lofgren <64e2cb85477d4abaea4b354af986fde12a3e64f3@op5.com><commit_after>
|
import unittest
import os
from subprocess import check_call
from sh import ls
mountpoint = '/tmp/spotifile_test_mount'
class SpotifileTestClass(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not os.path.exists(mountpoint):
os.mkdir(mountpoint)
@classmethod
def tearDownClass(cls):
if os.path.exists(mountpoint):
os.rmdir(mountpoint)
def setUp(self):
check_call(['./spotifile', mountpoint])
def tearDown(self):
check_call(['fusermount', '-u', mountpoint])
def test_ls(self):
assert 'connection' in ls(mountpoint)
|
tests: Add simple test to check that we populate the root as expected
Signed-off-by: Anton Lofgren <64e2cb85477d4abaea4b354af986fde12a3e64f3@op5.com>import unittest
import os
from subprocess import check_call
from sh import ls
mountpoint = '/tmp/spotifile_test_mount'
class SpotifileTestClass(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not os.path.exists(mountpoint):
os.mkdir(mountpoint)
@classmethod
def tearDownClass(cls):
if os.path.exists(mountpoint):
os.rmdir(mountpoint)
def setUp(self):
check_call(['./spotifile', mountpoint])
def tearDown(self):
check_call(['fusermount', '-u', mountpoint])
def test_ls(self):
assert 'connection' in ls(mountpoint)
|
<commit_before><commit_msg>tests: Add simple test to check that we populate the root as expected
Signed-off-by: Anton Lofgren <64e2cb85477d4abaea4b354af986fde12a3e64f3@op5.com><commit_after>import unittest
import os
from subprocess import check_call
from sh import ls
mountpoint = '/tmp/spotifile_test_mount'
class SpotifileTestClass(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not os.path.exists(mountpoint):
os.mkdir(mountpoint)
@classmethod
def tearDownClass(cls):
if os.path.exists(mountpoint):
os.rmdir(mountpoint)
def setUp(self):
check_call(['./spotifile', mountpoint])
def tearDown(self):
check_call(['fusermount', '-u', mountpoint])
def test_ls(self):
assert 'connection' in ls(mountpoint)
|
|
959e30bed3dcaee03df929f8ec2848d07c745dc9
|
tests/webcam_read_qr.py
|
tests/webcam_read_qr.py
|
#!/usr/bin/env python
"""
This module sets up a video stream from internal or connected webcam using Gstreamer.
You can then take snapshots.
import qrtools
qr = qrtools.QR()
qr.decode("cam.jpg")
print qr.data
"""
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gst', '1.0')
from gi.repository import Gtk as gtk
from gi.repository import Gdk
from gi.repository import Gst as gst
from gi.repository import GdkPixbuf
from avocado import Test
from os.path import exists, relpath
import qrtools
import time
#import pyqrcode
class WebcamReadQR(Test):
def setUp(self):
# if not exists('/dev/video0'):
# self.skip("No webcam detected: /dev/video0 cannot be found");
self.device = '/dev/video0'
Gdk.threads_init()
gtk.main()
self.take_snapshot()
def test(self):
self.create_video_pipeline()
def create_video_pipeline(self):
gst.init([])
#v4l2src
self.video_player = gst.parse_launch("videotestsrc ! jpegenc ! filesink location=cam.jpg")
self.video_player.set_state(gst.State.PLAYING)
bus = self.video_player.get_bus()
bus.add_signal_watch()
bus.connect("message", self.on_message)
bus.enable_sync_message_emission()
bus.connect("sync-message::element", self.on_sync_message)
def on_message(self, bus, message):
t = message.type
if t == gst.MessageType.EOS:
self.exit()
elif t == gst.MessageType.ERROR:
self.exit()
self.fail("Error {0}".format(message.parse_error()))
def on_sync_message(self, bus, message):
if message.structure is None:
return
message_name = message.structure.get_name()
def exit(self):
self.video_player.set_state(gst.State.NULL)
gtk.main_quit()
def take_snapshot(self):
#TODO:fill this in
|
Put gst code into Avocado test format. Needs to be edited to take a snapshot and read the qr code.
|
Put gst code into Avocado test format. Needs to be edited to take a snapshot and read the qr code.
|
Python
|
mit
|
daveol/Fedora-Test-Laptop,daveol/Fedora-Test-Laptop
|
Put gst code into Avocado test format. Needs to be edited to take a snapshot and read the qr code.
|
#!/usr/bin/env python
"""
This module sets up a video stream from internal or connected webcam using Gstreamer.
You can then take snapshots.
import qrtools
qr = qrtools.QR()
qr.decode("cam.jpg")
print qr.data
"""
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gst', '1.0')
from gi.repository import Gtk as gtk
from gi.repository import Gdk
from gi.repository import Gst as gst
from gi.repository import GdkPixbuf
from avocado import Test
from os.path import exists, relpath
import qrtools
import time
#import pyqrcode
class WebcamReadQR(Test):
def setUp(self):
# if not exists('/dev/video0'):
# self.skip("No webcam detected: /dev/video0 cannot be found");
self.device = '/dev/video0'
Gdk.threads_init()
gtk.main()
self.take_snapshot()
def test(self):
self.create_video_pipeline()
def create_video_pipeline(self):
gst.init([])
#v4l2src
self.video_player = gst.parse_launch("videotestsrc ! jpegenc ! filesink location=cam.jpg")
self.video_player.set_state(gst.State.PLAYING)
bus = self.video_player.get_bus()
bus.add_signal_watch()
bus.connect("message", self.on_message)
bus.enable_sync_message_emission()
bus.connect("sync-message::element", self.on_sync_message)
def on_message(self, bus, message):
t = message.type
if t == gst.MessageType.EOS:
self.exit()
elif t == gst.MessageType.ERROR:
self.exit()
self.fail("Error {0}".format(message.parse_error()))
def on_sync_message(self, bus, message):
if message.structure is None:
return
message_name = message.structure.get_name()
def exit(self):
self.video_player.set_state(gst.State.NULL)
gtk.main_quit()
def take_snapshot(self):
#TODO:fill this in
|
<commit_before><commit_msg>Put gst code into Avocado test format. Needs to be edited to take a snapshot and read the qr code.<commit_after>
|
#!/usr/bin/env python
"""
This module sets up a video stream from internal or connected webcam using Gstreamer.
You can then take snapshots.
import qrtools
qr = qrtools.QR()
qr.decode("cam.jpg")
print qr.data
"""
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gst', '1.0')
from gi.repository import Gtk as gtk
from gi.repository import Gdk
from gi.repository import Gst as gst
from gi.repository import GdkPixbuf
from avocado import Test
from os.path import exists, relpath
import qrtools
import time
#import pyqrcode
class WebcamReadQR(Test):
def setUp(self):
# if not exists('/dev/video0'):
# self.skip("No webcam detected: /dev/video0 cannot be found");
self.device = '/dev/video0'
Gdk.threads_init()
gtk.main()
self.take_snapshot()
def test(self):
self.create_video_pipeline()
def create_video_pipeline(self):
gst.init([])
#v4l2src
self.video_player = gst.parse_launch("videotestsrc ! jpegenc ! filesink location=cam.jpg")
self.video_player.set_state(gst.State.PLAYING)
bus = self.video_player.get_bus()
bus.add_signal_watch()
bus.connect("message", self.on_message)
bus.enable_sync_message_emission()
bus.connect("sync-message::element", self.on_sync_message)
def on_message(self, bus, message):
t = message.type
if t == gst.MessageType.EOS:
self.exit()
elif t == gst.MessageType.ERROR:
self.exit()
self.fail("Error {0}".format(message.parse_error()))
def on_sync_message(self, bus, message):
if message.structure is None:
return
message_name = message.structure.get_name()
def exit(self):
self.video_player.set_state(gst.State.NULL)
gtk.main_quit()
def take_snapshot(self):
#TODO:fill this in
|
Put gst code into Avocado test format. Needs to be edited to take a snapshot and read the qr code.#!/usr/bin/env python
"""
This module sets up a video stream from internal or connected webcam using Gstreamer.
You can then take snapshots.
import qrtools
qr = qrtools.QR()
qr.decode("cam.jpg")
print qr.data
"""
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gst', '1.0')
from gi.repository import Gtk as gtk
from gi.repository import Gdk
from gi.repository import Gst as gst
from gi.repository import GdkPixbuf
from avocado import Test
from os.path import exists, relpath
import qrtools
import time
#import pyqrcode
class WebcamReadQR(Test):
def setUp(self):
# if not exists('/dev/video0'):
# self.skip("No webcam detected: /dev/video0 cannot be found");
self.device = '/dev/video0'
Gdk.threads_init()
gtk.main()
self.take_snapshot()
def test(self):
self.create_video_pipeline()
def create_video_pipeline(self):
gst.init([])
#v4l2src
self.video_player = gst.parse_launch("videotestsrc ! jpegenc ! filesink location=cam.jpg")
self.video_player.set_state(gst.State.PLAYING)
bus = self.video_player.get_bus()
bus.add_signal_watch()
bus.connect("message", self.on_message)
bus.enable_sync_message_emission()
bus.connect("sync-message::element", self.on_sync_message)
def on_message(self, bus, message):
t = message.type
if t == gst.MessageType.EOS:
self.exit()
elif t == gst.MessageType.ERROR:
self.exit()
self.fail("Error {0}".format(message.parse_error()))
def on_sync_message(self, bus, message):
if message.structure is None:
return
message_name = message.structure.get_name()
def exit(self):
self.video_player.set_state(gst.State.NULL)
gtk.main_quit()
def take_snapshot(self):
#TODO:fill this in
|
<commit_before><commit_msg>Put gst code into Avocado test format. Needs to be edited to take a snapshot and read the qr code.<commit_after>#!/usr/bin/env python
"""
This module sets up a video stream from internal or connected webcam using Gstreamer.
You can then take snapshots.
import qrtools
qr = qrtools.QR()
qr.decode("cam.jpg")
print qr.data
"""
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gst', '1.0')
from gi.repository import Gtk as gtk
from gi.repository import Gdk
from gi.repository import Gst as gst
from gi.repository import GdkPixbuf
from avocado import Test
from os.path import exists, relpath
import qrtools
import time
#import pyqrcode
class WebcamReadQR(Test):
def setUp(self):
# if not exists('/dev/video0'):
# self.skip("No webcam detected: /dev/video0 cannot be found");
self.device = '/dev/video0'
Gdk.threads_init()
gtk.main()
self.take_snapshot()
def test(self):
self.create_video_pipeline()
def create_video_pipeline(self):
gst.init([])
#v4l2src
self.video_player = gst.parse_launch("videotestsrc ! jpegenc ! filesink location=cam.jpg")
self.video_player.set_state(gst.State.PLAYING)
bus = self.video_player.get_bus()
bus.add_signal_watch()
bus.connect("message", self.on_message)
bus.enable_sync_message_emission()
bus.connect("sync-message::element", self.on_sync_message)
def on_message(self, bus, message):
t = message.type
if t == gst.MessageType.EOS:
self.exit()
elif t == gst.MessageType.ERROR:
self.exit()
self.fail("Error {0}".format(message.parse_error()))
def on_sync_message(self, bus, message):
if message.structure is None:
return
message_name = message.structure.get_name()
def exit(self):
self.video_player.set_state(gst.State.NULL)
gtk.main_quit()
def take_snapshot(self):
#TODO:fill this in
|
|
84cf7b425c58a6c1b9c08fb1605cd3b3dfd24b21
|
tests/test_utils.py
|
tests/test_utils.py
|
from edx_shopify.utils import hmac_is_valid
from django.test import TestCase
class SignatureVerificationTest(TestCase):
def test_hmac_is_valid(self):
correct_hmac = [
('hello', 'world', '8ayXAutfryPKKRpNxG3t3u4qeMza8KQSvtdxTP/7HMQ='),
('bye', 'bye', 'HHfaL+C4HxPTexmlKO9pwEHuAXkErAz85APGPOgvBVU='),
('foo', 'bar', '+TILrwJJFp5zhQzWFW3tAQbiu2rYyrAbe7vr5tEGUxc=')
]
incorrect_hmac = [
('hello', 'world', '+TILrwJJFp5zhQzWFW3tAQbiu2rYyrAbe7vr5tEGUxc='),
('bye', 'bye', '8ayXAutfryPKKRpNxG3t3u4qeMza8KQSvtdxTP/7HMQ='),
('foo', 'bar', 'HHfaL+C4HxPTexmlKO9pwEHuAXkErAz85APGPOgvBVU=')
]
for triplet in correct_hmac:
self.assertTrue(hmac_is_valid(*triplet))
for triplet in incorrect_hmac:
self.assertFalse(hmac_is_valid(*triplet))
|
Add a test for signature checking
|
Add a test for signature checking
|
Python
|
agpl-3.0
|
hastexo/edx-shopify,fghaas/edx-shopify
|
Add a test for signature checking
|
from edx_shopify.utils import hmac_is_valid
from django.test import TestCase
class SignatureVerificationTest(TestCase):
def test_hmac_is_valid(self):
correct_hmac = [
('hello', 'world', '8ayXAutfryPKKRpNxG3t3u4qeMza8KQSvtdxTP/7HMQ='),
('bye', 'bye', 'HHfaL+C4HxPTexmlKO9pwEHuAXkErAz85APGPOgvBVU='),
('foo', 'bar', '+TILrwJJFp5zhQzWFW3tAQbiu2rYyrAbe7vr5tEGUxc=')
]
incorrect_hmac = [
('hello', 'world', '+TILrwJJFp5zhQzWFW3tAQbiu2rYyrAbe7vr5tEGUxc='),
('bye', 'bye', '8ayXAutfryPKKRpNxG3t3u4qeMza8KQSvtdxTP/7HMQ='),
('foo', 'bar', 'HHfaL+C4HxPTexmlKO9pwEHuAXkErAz85APGPOgvBVU=')
]
for triplet in correct_hmac:
self.assertTrue(hmac_is_valid(*triplet))
for triplet in incorrect_hmac:
self.assertFalse(hmac_is_valid(*triplet))
|
<commit_before><commit_msg>Add a test for signature checking<commit_after>
|
from edx_shopify.utils import hmac_is_valid
from django.test import TestCase
class SignatureVerificationTest(TestCase):
def test_hmac_is_valid(self):
correct_hmac = [
('hello', 'world', '8ayXAutfryPKKRpNxG3t3u4qeMza8KQSvtdxTP/7HMQ='),
('bye', 'bye', 'HHfaL+C4HxPTexmlKO9pwEHuAXkErAz85APGPOgvBVU='),
('foo', 'bar', '+TILrwJJFp5zhQzWFW3tAQbiu2rYyrAbe7vr5tEGUxc=')
]
incorrect_hmac = [
('hello', 'world', '+TILrwJJFp5zhQzWFW3tAQbiu2rYyrAbe7vr5tEGUxc='),
('bye', 'bye', '8ayXAutfryPKKRpNxG3t3u4qeMza8KQSvtdxTP/7HMQ='),
('foo', 'bar', 'HHfaL+C4HxPTexmlKO9pwEHuAXkErAz85APGPOgvBVU=')
]
for triplet in correct_hmac:
self.assertTrue(hmac_is_valid(*triplet))
for triplet in incorrect_hmac:
self.assertFalse(hmac_is_valid(*triplet))
|
Add a test for signature checkingfrom edx_shopify.utils import hmac_is_valid
from django.test import TestCase
class SignatureVerificationTest(TestCase):
def test_hmac_is_valid(self):
correct_hmac = [
('hello', 'world', '8ayXAutfryPKKRpNxG3t3u4qeMza8KQSvtdxTP/7HMQ='),
('bye', 'bye', 'HHfaL+C4HxPTexmlKO9pwEHuAXkErAz85APGPOgvBVU='),
('foo', 'bar', '+TILrwJJFp5zhQzWFW3tAQbiu2rYyrAbe7vr5tEGUxc=')
]
incorrect_hmac = [
('hello', 'world', '+TILrwJJFp5zhQzWFW3tAQbiu2rYyrAbe7vr5tEGUxc='),
('bye', 'bye', '8ayXAutfryPKKRpNxG3t3u4qeMza8KQSvtdxTP/7HMQ='),
('foo', 'bar', 'HHfaL+C4HxPTexmlKO9pwEHuAXkErAz85APGPOgvBVU=')
]
for triplet in correct_hmac:
self.assertTrue(hmac_is_valid(*triplet))
for triplet in incorrect_hmac:
self.assertFalse(hmac_is_valid(*triplet))
|
<commit_before><commit_msg>Add a test for signature checking<commit_after>from edx_shopify.utils import hmac_is_valid
from django.test import TestCase
class SignatureVerificationTest(TestCase):
def test_hmac_is_valid(self):
correct_hmac = [
('hello', 'world', '8ayXAutfryPKKRpNxG3t3u4qeMza8KQSvtdxTP/7HMQ='),
('bye', 'bye', 'HHfaL+C4HxPTexmlKO9pwEHuAXkErAz85APGPOgvBVU='),
('foo', 'bar', '+TILrwJJFp5zhQzWFW3tAQbiu2rYyrAbe7vr5tEGUxc=')
]
incorrect_hmac = [
('hello', 'world', '+TILrwJJFp5zhQzWFW3tAQbiu2rYyrAbe7vr5tEGUxc='),
('bye', 'bye', '8ayXAutfryPKKRpNxG3t3u4qeMza8KQSvtdxTP/7HMQ='),
('foo', 'bar', 'HHfaL+C4HxPTexmlKO9pwEHuAXkErAz85APGPOgvBVU=')
]
for triplet in correct_hmac:
self.assertTrue(hmac_is_valid(*triplet))
for triplet in incorrect_hmac:
self.assertFalse(hmac_is_valid(*triplet))
|
|
8a79cf5235bebd514b8eb0a788e4b491e01feea9
|
verdenskart/src/utils/data_fetcher.py
|
verdenskart/src/utils/data_fetcher.py
|
import logging
import requests
def get_world_topology():
ans = requests.get("https://restcountries.eu/rest/v2/all?fields=name;capital;region;subregion;latlng;alpha3Code;borders")
if ans.status_code != 200:
raise RuntimeError("Unable to get country data")
countries = {country.pop("alpha3Code"): country for country in ans.json()}
logging.info("Done getting raw topological data")
for country in countries:
countries[country]["borders"] = [countries[name] for name in countries[country]["borders"]]
return countries
|
Read country topology from restcountries.eu
|
Read country topology from restcountries.eu
|
Python
|
bsd-2-clause
|
expertanalytics/fagkveld
|
Read country topology from restcountries.eu
|
import logging
import requests
def get_world_topology():
ans = requests.get("https://restcountries.eu/rest/v2/all?fields=name;capital;region;subregion;latlng;alpha3Code;borders")
if ans.status_code != 200:
raise RuntimeError("Unable to get country data")
countries = {country.pop("alpha3Code"): country for country in ans.json()}
logging.info("Done getting raw topological data")
for country in countries:
countries[country]["borders"] = [countries[name] for name in countries[country]["borders"]]
return countries
|
<commit_before><commit_msg>Read country topology from restcountries.eu<commit_after>
|
import logging
import requests
def get_world_topology():
ans = requests.get("https://restcountries.eu/rest/v2/all?fields=name;capital;region;subregion;latlng;alpha3Code;borders")
if ans.status_code != 200:
raise RuntimeError("Unable to get country data")
countries = {country.pop("alpha3Code"): country for country in ans.json()}
logging.info("Done getting raw topological data")
for country in countries:
countries[country]["borders"] = [countries[name] for name in countries[country]["borders"]]
return countries
|
Read country topology from restcountries.euimport logging
import requests
def get_world_topology():
ans = requests.get("https://restcountries.eu/rest/v2/all?fields=name;capital;region;subregion;latlng;alpha3Code;borders")
if ans.status_code != 200:
raise RuntimeError("Unable to get country data")
countries = {country.pop("alpha3Code"): country for country in ans.json()}
logging.info("Done getting raw topological data")
for country in countries:
countries[country]["borders"] = [countries[name] for name in countries[country]["borders"]]
return countries
|
<commit_before><commit_msg>Read country topology from restcountries.eu<commit_after>import logging
import requests
def get_world_topology():
ans = requests.get("https://restcountries.eu/rest/v2/all?fields=name;capital;region;subregion;latlng;alpha3Code;borders")
if ans.status_code != 200:
raise RuntimeError("Unable to get country data")
countries = {country.pop("alpha3Code"): country for country in ans.json()}
logging.info("Done getting raw topological data")
for country in countries:
countries[country]["borders"] = [countries[name] for name in countries[country]["borders"]]
return countries
|
|
dc3bc21e8cf7c0a0244dd7d2584a206656bc8444
|
tests/Settings/TestContainerRegistry.py
|
tests/Settings/TestContainerRegistry.py
|
# Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import pytest
import os.path
import UM.Settings
from UM.Resources import Resources
from UM.MimeTypeDatabase import MimeType, MimeTypeDatabase
@pytest.fixture
def container_registry():
Resources.addSearchPath(os.path.dirname(os.path.abspath(__file__)))
UM.Settings.ContainerRegistry._ContainerRegistry__instance = None # Reset the private instance variable every time
return UM.Settings.ContainerRegistry.getInstance()
def test_load(container_registry):
container_registry.load()
definitions = container_registry.findDefinitionContainers({ "id": "single_setting" })
assert len(definitions) == 1
definition = definitions[0]
assert definition.getId() == "single_setting"
definitions = container_registry.findDefinitionContainers({ "author": "Ultimaker" })
assert len(definitions) == 3
ids_found = []
for definition in definitions:
ids_found.append(definition.getId())
assert "metadata" in ids_found
assert "single_setting" in ids_found
assert "inherits" in ids_found
|
Add a test for ContainerRegistry
|
Add a test for ContainerRegistry
|
Python
|
agpl-3.0
|
onitake/Uranium,onitake/Uranium
|
Add a test for ContainerRegistry
|
# Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import pytest
import os.path
import UM.Settings
from UM.Resources import Resources
from UM.MimeTypeDatabase import MimeType, MimeTypeDatabase
@pytest.fixture
def container_registry():
Resources.addSearchPath(os.path.dirname(os.path.abspath(__file__)))
UM.Settings.ContainerRegistry._ContainerRegistry__instance = None # Reset the private instance variable every time
return UM.Settings.ContainerRegistry.getInstance()
def test_load(container_registry):
container_registry.load()
definitions = container_registry.findDefinitionContainers({ "id": "single_setting" })
assert len(definitions) == 1
definition = definitions[0]
assert definition.getId() == "single_setting"
definitions = container_registry.findDefinitionContainers({ "author": "Ultimaker" })
assert len(definitions) == 3
ids_found = []
for definition in definitions:
ids_found.append(definition.getId())
assert "metadata" in ids_found
assert "single_setting" in ids_found
assert "inherits" in ids_found
|
<commit_before><commit_msg>Add a test for ContainerRegistry<commit_after>
|
# Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import pytest
import os.path
import UM.Settings
from UM.Resources import Resources
from UM.MimeTypeDatabase import MimeType, MimeTypeDatabase
@pytest.fixture
def container_registry():
Resources.addSearchPath(os.path.dirname(os.path.abspath(__file__)))
UM.Settings.ContainerRegistry._ContainerRegistry__instance = None # Reset the private instance variable every time
return UM.Settings.ContainerRegistry.getInstance()
def test_load(container_registry):
container_registry.load()
definitions = container_registry.findDefinitionContainers({ "id": "single_setting" })
assert len(definitions) == 1
definition = definitions[0]
assert definition.getId() == "single_setting"
definitions = container_registry.findDefinitionContainers({ "author": "Ultimaker" })
assert len(definitions) == 3
ids_found = []
for definition in definitions:
ids_found.append(definition.getId())
assert "metadata" in ids_found
assert "single_setting" in ids_found
assert "inherits" in ids_found
|
Add a test for ContainerRegistry# Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import pytest
import os.path
import UM.Settings
from UM.Resources import Resources
from UM.MimeTypeDatabase import MimeType, MimeTypeDatabase
@pytest.fixture
def container_registry():
Resources.addSearchPath(os.path.dirname(os.path.abspath(__file__)))
UM.Settings.ContainerRegistry._ContainerRegistry__instance = None # Reset the private instance variable every time
return UM.Settings.ContainerRegistry.getInstance()
def test_load(container_registry):
container_registry.load()
definitions = container_registry.findDefinitionContainers({ "id": "single_setting" })
assert len(definitions) == 1
definition = definitions[0]
assert definition.getId() == "single_setting"
definitions = container_registry.findDefinitionContainers({ "author": "Ultimaker" })
assert len(definitions) == 3
ids_found = []
for definition in definitions:
ids_found.append(definition.getId())
assert "metadata" in ids_found
assert "single_setting" in ids_found
assert "inherits" in ids_found
|
<commit_before><commit_msg>Add a test for ContainerRegistry<commit_after># Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import pytest
import os.path
import UM.Settings
from UM.Resources import Resources
from UM.MimeTypeDatabase import MimeType, MimeTypeDatabase
@pytest.fixture
def container_registry():
Resources.addSearchPath(os.path.dirname(os.path.abspath(__file__)))
UM.Settings.ContainerRegistry._ContainerRegistry__instance = None # Reset the private instance variable every time
return UM.Settings.ContainerRegistry.getInstance()
def test_load(container_registry):
container_registry.load()
definitions = container_registry.findDefinitionContainers({ "id": "single_setting" })
assert len(definitions) == 1
definition = definitions[0]
assert definition.getId() == "single_setting"
definitions = container_registry.findDefinitionContainers({ "author": "Ultimaker" })
assert len(definitions) == 3
ids_found = []
for definition in definitions:
ids_found.append(definition.getId())
assert "metadata" in ids_found
assert "single_setting" in ids_found
assert "inherits" in ids_found
|
|
7fe7b751ef61fffd59a751df99a4bfc6715f10f8
|
globaleaks/tests/test_storm.py
|
globaleaks/tests/test_storm.py
|
from twisted.internet import defer
from twisted.trial import unittest
from storm.twisted.testing import FakeThreadPool
from storm.twisted.transact import transact, Transactor
from storm.locals import *
from storm.databases.sqlite import SQLite
from storm.uri import URI
from globaleaks.db.models import TXModel
from globaleaks import db
class TestModels(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
threadpool = FakeThreadPool()
self.transactor = Transactor(threadpool)
self.database = SQLite(URI('sqlite:///test.db'))
db.database = self.database
db.threadpool = threadpool
db.transactor = self.transactor
@defer.inlineCallbacks
def test_txmodel(self):
class DummyModel(TXModel):
transactor = self.transactor
__storm_table__ = 'test'
createQuery = "CREATE TABLE " + __storm_table__ +\
"(id INTEGER PRIMARY KEY, test INTEGER)"
id = Int(primary=True)
test = Int()
@transact
def find(self):
store = db.getStore()
res = store.find(DummyModel, DummyModel.test == 42).one()
return res
dm = DummyModel()
yield dm.createTable()
yield dm.save()
dm = DummyModel()
dm.test = 42
yield dm.save()
result = yield dm.find()
self.assertEqual(result.test, 42)
|
Add unittest for storm to illustrate it's usage. Also useful to understand when and if they break our usage of it.
|
Add unittest for storm to illustrate it's usage.
Also useful to understand when and if they break our usage of it.
|
Python
|
agpl-3.0
|
vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks
|
Add unittest for storm to illustrate it's usage.
Also useful to understand when and if they break our usage of it.
|
from twisted.internet import defer
from twisted.trial import unittest
from storm.twisted.testing import FakeThreadPool
from storm.twisted.transact import transact, Transactor
from storm.locals import *
from storm.databases.sqlite import SQLite
from storm.uri import URI
from globaleaks.db.models import TXModel
from globaleaks import db
class TestModels(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
threadpool = FakeThreadPool()
self.transactor = Transactor(threadpool)
self.database = SQLite(URI('sqlite:///test.db'))
db.database = self.database
db.threadpool = threadpool
db.transactor = self.transactor
@defer.inlineCallbacks
def test_txmodel(self):
class DummyModel(TXModel):
transactor = self.transactor
__storm_table__ = 'test'
createQuery = "CREATE TABLE " + __storm_table__ +\
"(id INTEGER PRIMARY KEY, test INTEGER)"
id = Int(primary=True)
test = Int()
@transact
def find(self):
store = db.getStore()
res = store.find(DummyModel, DummyModel.test == 42).one()
return res
dm = DummyModel()
yield dm.createTable()
yield dm.save()
dm = DummyModel()
dm.test = 42
yield dm.save()
result = yield dm.find()
self.assertEqual(result.test, 42)
|
<commit_before><commit_msg>Add unittest for storm to illustrate it's usage.
Also useful to understand when and if they break our usage of it.<commit_after>
|
from twisted.internet import defer
from twisted.trial import unittest
from storm.twisted.testing import FakeThreadPool
from storm.twisted.transact import transact, Transactor
from storm.locals import *
from storm.databases.sqlite import SQLite
from storm.uri import URI
from globaleaks.db.models import TXModel
from globaleaks import db
class TestModels(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
threadpool = FakeThreadPool()
self.transactor = Transactor(threadpool)
self.database = SQLite(URI('sqlite:///test.db'))
db.database = self.database
db.threadpool = threadpool
db.transactor = self.transactor
@defer.inlineCallbacks
def test_txmodel(self):
class DummyModel(TXModel):
transactor = self.transactor
__storm_table__ = 'test'
createQuery = "CREATE TABLE " + __storm_table__ +\
"(id INTEGER PRIMARY KEY, test INTEGER)"
id = Int(primary=True)
test = Int()
@transact
def find(self):
store = db.getStore()
res = store.find(DummyModel, DummyModel.test == 42).one()
return res
dm = DummyModel()
yield dm.createTable()
yield dm.save()
dm = DummyModel()
dm.test = 42
yield dm.save()
result = yield dm.find()
self.assertEqual(result.test, 42)
|
Add unittest for storm to illustrate it's usage.
Also useful to understand when and if they break our usage of it.from twisted.internet import defer
from twisted.trial import unittest
from storm.twisted.testing import FakeThreadPool
from storm.twisted.transact import transact, Transactor
from storm.locals import *
from storm.databases.sqlite import SQLite
from storm.uri import URI
from globaleaks.db.models import TXModel
from globaleaks import db
class TestModels(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
threadpool = FakeThreadPool()
self.transactor = Transactor(threadpool)
self.database = SQLite(URI('sqlite:///test.db'))
db.database = self.database
db.threadpool = threadpool
db.transactor = self.transactor
@defer.inlineCallbacks
def test_txmodel(self):
class DummyModel(TXModel):
transactor = self.transactor
__storm_table__ = 'test'
createQuery = "CREATE TABLE " + __storm_table__ +\
"(id INTEGER PRIMARY KEY, test INTEGER)"
id = Int(primary=True)
test = Int()
@transact
def find(self):
store = db.getStore()
res = store.find(DummyModel, DummyModel.test == 42).one()
return res
dm = DummyModel()
yield dm.createTable()
yield dm.save()
dm = DummyModel()
dm.test = 42
yield dm.save()
result = yield dm.find()
self.assertEqual(result.test, 42)
|
<commit_before><commit_msg>Add unittest for storm to illustrate it's usage.
Also useful to understand when and if they break our usage of it.<commit_after>from twisted.internet import defer
from twisted.trial import unittest
from storm.twisted.testing import FakeThreadPool
from storm.twisted.transact import transact, Transactor
from storm.locals import *
from storm.databases.sqlite import SQLite
from storm.uri import URI
from globaleaks.db.models import TXModel
from globaleaks import db
class TestModels(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
threadpool = FakeThreadPool()
self.transactor = Transactor(threadpool)
self.database = SQLite(URI('sqlite:///test.db'))
db.database = self.database
db.threadpool = threadpool
db.transactor = self.transactor
@defer.inlineCallbacks
def test_txmodel(self):
class DummyModel(TXModel):
transactor = self.transactor
__storm_table__ = 'test'
createQuery = "CREATE TABLE " + __storm_table__ +\
"(id INTEGER PRIMARY KEY, test INTEGER)"
id = Int(primary=True)
test = Int()
@transact
def find(self):
store = db.getStore()
res = store.find(DummyModel, DummyModel.test == 42).one()
return res
dm = DummyModel()
yield dm.createTable()
yield dm.save()
dm = DummyModel()
dm.test = 42
yield dm.save()
result = yield dm.find()
self.assertEqual(result.test, 42)
|
|
b98fee0ccb127dbca6f1c20bb1c1025a20cb1e7c
|
teknologr/members/migrations/0015_auto_20181119_2256.py
|
teknologr/members/migrations/0015_auto_20181119_2256.py
|
# Generated by Django 2.1.2 on 2018-11-19 20:56
from django.db import migrations
def move_phone_numbers(apps, schema_editor):
Member = apps.get_model('members', 'Member')
for member in Member.objects.all():
if member.mobile_phone:
member.phone = member.mobile_phone
member.save()
class Migration(migrations.Migration):
dependencies = [
('members', '0014_auto_20180321_0916'),
]
operations = [
migrations.RunPython(move_phone_numbers),
]
|
Add data migrations for moving phone numbers to phone field
|
Add data migrations for moving phone numbers to phone field
|
Python
|
mit
|
Teknologforeningen/teknologr.io,Teknologforeningen/teknologr.io,Teknologforeningen/teknologr.io,Teknologforeningen/teknologr.io
|
Add data migrations for moving phone numbers to phone field
|
# Generated by Django 2.1.2 on 2018-11-19 20:56
from django.db import migrations
def move_phone_numbers(apps, schema_editor):
Member = apps.get_model('members', 'Member')
for member in Member.objects.all():
if member.mobile_phone:
member.phone = member.mobile_phone
member.save()
class Migration(migrations.Migration):
dependencies = [
('members', '0014_auto_20180321_0916'),
]
operations = [
migrations.RunPython(move_phone_numbers),
]
|
<commit_before><commit_msg>Add data migrations for moving phone numbers to phone field<commit_after>
|
# Generated by Django 2.1.2 on 2018-11-19 20:56
from django.db import migrations
def move_phone_numbers(apps, schema_editor):
Member = apps.get_model('members', 'Member')
for member in Member.objects.all():
if member.mobile_phone:
member.phone = member.mobile_phone
member.save()
class Migration(migrations.Migration):
dependencies = [
('members', '0014_auto_20180321_0916'),
]
operations = [
migrations.RunPython(move_phone_numbers),
]
|
Add data migrations for moving phone numbers to phone field# Generated by Django 2.1.2 on 2018-11-19 20:56
from django.db import migrations
def move_phone_numbers(apps, schema_editor):
Member = apps.get_model('members', 'Member')
for member in Member.objects.all():
if member.mobile_phone:
member.phone = member.mobile_phone
member.save()
class Migration(migrations.Migration):
dependencies = [
('members', '0014_auto_20180321_0916'),
]
operations = [
migrations.RunPython(move_phone_numbers),
]
|
<commit_before><commit_msg>Add data migrations for moving phone numbers to phone field<commit_after># Generated by Django 2.1.2 on 2018-11-19 20:56
from django.db import migrations
def move_phone_numbers(apps, schema_editor):
Member = apps.get_model('members', 'Member')
for member in Member.objects.all():
if member.mobile_phone:
member.phone = member.mobile_phone
member.save()
class Migration(migrations.Migration):
dependencies = [
('members', '0014_auto_20180321_0916'),
]
operations = [
migrations.RunPython(move_phone_numbers),
]
|
|
a7ab2de6b55e02e3946cb16e2fabecad92731ebd
|
src/sample_image.py
|
src/sample_image.py
|
__author__ = 'team-entaku'
import cv2
import sys
if __name__ == "__main__":
file_name = sys.argv[1]
im = cv2.imread(file_name, 0)
cv2.imshow('im', im)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
Add a sample for a reading image
|
Add a sample for a reading image
|
Python
|
mit
|
entaku/kusarigama
|
Add a sample for a reading image
|
__author__ = 'team-entaku'
import cv2
import sys
if __name__ == "__main__":
file_name = sys.argv[1]
im = cv2.imread(file_name, 0)
cv2.imshow('im', im)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
<commit_before><commit_msg>Add a sample for a reading image<commit_after>
|
__author__ = 'team-entaku'
import cv2
import sys
if __name__ == "__main__":
file_name = sys.argv[1]
im = cv2.imread(file_name, 0)
cv2.imshow('im', im)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
Add a sample for a reading image__author__ = 'team-entaku'
import cv2
import sys
if __name__ == "__main__":
file_name = sys.argv[1]
im = cv2.imread(file_name, 0)
cv2.imshow('im', im)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
<commit_before><commit_msg>Add a sample for a reading image<commit_after>__author__ = 'team-entaku'
import cv2
import sys
if __name__ == "__main__":
file_name = sys.argv[1]
im = cv2.imread(file_name, 0)
cv2.imshow('im', im)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
|
92a42c693cc5c38c0d02174722a87217fdd2eb3b
|
oweb/libs/shortcuts.py
|
oweb/libs/shortcuts.py
|
# app imports
from oweb.exceptions import OWebDoesNotExist
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise OWebDoesNotExist
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise OWebDoesNotExist
return obj_list
|
Make our own version of get_object_or_404 and get_list_or_404
|
Make our own version of get_object_or_404 and get_list_or_404
|
Python
|
mit
|
Mischback/django-oweb,Mischback/django-oweb
|
Make our own version of get_object_or_404 and get_list_or_404
|
# app imports
from oweb.exceptions import OWebDoesNotExist
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise OWebDoesNotExist
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise OWebDoesNotExist
return obj_list
|
<commit_before><commit_msg>Make our own version of get_object_or_404 and get_list_or_404<commit_after>
|
# app imports
from oweb.exceptions import OWebDoesNotExist
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise OWebDoesNotExist
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise OWebDoesNotExist
return obj_list
|
Make our own version of get_object_or_404 and get_list_or_404# app imports
from oweb.exceptions import OWebDoesNotExist
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise OWebDoesNotExist
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise OWebDoesNotExist
return obj_list
|
<commit_before><commit_msg>Make our own version of get_object_or_404 and get_list_or_404<commit_after># app imports
from oweb.exceptions import OWebDoesNotExist
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise OWebDoesNotExist
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise OWebDoesNotExist
return obj_list
|
|
e32ace92c94e83b82bddf1ad348621c408b47a24
|
tests/test_overlap.py
|
tests/test_overlap.py
|
import nose
import cle
import os
class MockBackend(cle.backends.Backend):
def __init__(self, requested_base, size, **kwargs):
super(MockBackend, self).__init__('/dev/zero', **kwargs)
self.requested_base = requested_base
self.size = size
def get_max_addr(self):
return self.rebase_addr + self.size
def test_overlap():
filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests/i386/manysum')
ld = cle.Loader(filename, auto_load_libs=False)
nose.tools.assert_equal(ld.main_bin.rebase_addr, 0)
nose.tools.assert_equal(ld.main_bin.get_min_addr(), 0x8048000)
obj1 = MockBackend(0x8047000, 0x2000, custom_arch=ld.main_bin.arch)
obj2 = MockBackend(0x8047000, 0x1000, custom_arch=ld.main_bin.arch)
ld.add_object(obj1)
ld.add_object(obj2)
nose.tools.assert_equal(obj2.rebase_addr, 0x8047000)
nose.tools.assert_greater(obj1.rebase_addr, 0x8048000)
if __name__ == '__main__':
test_overlap()
|
Add test case for object overlapping and requested base addresses
|
Add test case for object overlapping and requested base addresses
|
Python
|
bsd-2-clause
|
angr/cle
|
Add test case for object overlapping and requested base addresses
|
import nose
import cle
import os
class MockBackend(cle.backends.Backend):
def __init__(self, requested_base, size, **kwargs):
super(MockBackend, self).__init__('/dev/zero', **kwargs)
self.requested_base = requested_base
self.size = size
def get_max_addr(self):
return self.rebase_addr + self.size
def test_overlap():
filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests/i386/manysum')
ld = cle.Loader(filename, auto_load_libs=False)
nose.tools.assert_equal(ld.main_bin.rebase_addr, 0)
nose.tools.assert_equal(ld.main_bin.get_min_addr(), 0x8048000)
obj1 = MockBackend(0x8047000, 0x2000, custom_arch=ld.main_bin.arch)
obj2 = MockBackend(0x8047000, 0x1000, custom_arch=ld.main_bin.arch)
ld.add_object(obj1)
ld.add_object(obj2)
nose.tools.assert_equal(obj2.rebase_addr, 0x8047000)
nose.tools.assert_greater(obj1.rebase_addr, 0x8048000)
if __name__ == '__main__':
test_overlap()
|
<commit_before><commit_msg>Add test case for object overlapping and requested base addresses<commit_after>
|
import nose
import cle
import os
class MockBackend(cle.backends.Backend):
def __init__(self, requested_base, size, **kwargs):
super(MockBackend, self).__init__('/dev/zero', **kwargs)
self.requested_base = requested_base
self.size = size
def get_max_addr(self):
return self.rebase_addr + self.size
def test_overlap():
filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests/i386/manysum')
ld = cle.Loader(filename, auto_load_libs=False)
nose.tools.assert_equal(ld.main_bin.rebase_addr, 0)
nose.tools.assert_equal(ld.main_bin.get_min_addr(), 0x8048000)
obj1 = MockBackend(0x8047000, 0x2000, custom_arch=ld.main_bin.arch)
obj2 = MockBackend(0x8047000, 0x1000, custom_arch=ld.main_bin.arch)
ld.add_object(obj1)
ld.add_object(obj2)
nose.tools.assert_equal(obj2.rebase_addr, 0x8047000)
nose.tools.assert_greater(obj1.rebase_addr, 0x8048000)
if __name__ == '__main__':
test_overlap()
|
Add test case for object overlapping and requested base addressesimport nose
import cle
import os
class MockBackend(cle.backends.Backend):
def __init__(self, requested_base, size, **kwargs):
super(MockBackend, self).__init__('/dev/zero', **kwargs)
self.requested_base = requested_base
self.size = size
def get_max_addr(self):
return self.rebase_addr + self.size
def test_overlap():
filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests/i386/manysum')
ld = cle.Loader(filename, auto_load_libs=False)
nose.tools.assert_equal(ld.main_bin.rebase_addr, 0)
nose.tools.assert_equal(ld.main_bin.get_min_addr(), 0x8048000)
obj1 = MockBackend(0x8047000, 0x2000, custom_arch=ld.main_bin.arch)
obj2 = MockBackend(0x8047000, 0x1000, custom_arch=ld.main_bin.arch)
ld.add_object(obj1)
ld.add_object(obj2)
nose.tools.assert_equal(obj2.rebase_addr, 0x8047000)
nose.tools.assert_greater(obj1.rebase_addr, 0x8048000)
if __name__ == '__main__':
test_overlap()
|
<commit_before><commit_msg>Add test case for object overlapping and requested base addresses<commit_after>import nose
import cle
import os
class MockBackend(cle.backends.Backend):
def __init__(self, requested_base, size, **kwargs):
super(MockBackend, self).__init__('/dev/zero', **kwargs)
self.requested_base = requested_base
self.size = size
def get_max_addr(self):
return self.rebase_addr + self.size
def test_overlap():
filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests/i386/manysum')
ld = cle.Loader(filename, auto_load_libs=False)
nose.tools.assert_equal(ld.main_bin.rebase_addr, 0)
nose.tools.assert_equal(ld.main_bin.get_min_addr(), 0x8048000)
obj1 = MockBackend(0x8047000, 0x2000, custom_arch=ld.main_bin.arch)
obj2 = MockBackend(0x8047000, 0x1000, custom_arch=ld.main_bin.arch)
ld.add_object(obj1)
ld.add_object(obj2)
nose.tools.assert_equal(obj2.rebase_addr, 0x8047000)
nose.tools.assert_greater(obj1.rebase_addr, 0x8048000)
if __name__ == '__main__':
test_overlap()
|
|
1d2dcd5a777119cbfb98274d73ee14c9190f1c24
|
tests/test_scraper.py
|
tests/test_scraper.py
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2013 Bastian Kleineidam
from unittest import TestCase
from dosagelib import scraper
class ScraperTester(TestCase):
"""Test scraper module functions."""
def test_get_scraperclasses(self):
for scraperclass in scraper.get_scraperclasses():
scraperclass()
scraperclass(indexes=["bla"])
def test_find_scraperclasses_single(self):
result = scraper.find_scraperclasses("CalvinAndHobbes")
self.assertEqual(len(result), 1)
def test_find_scraperclasses_multi(self):
result = scraper.find_scraperclasses("a", multiple_allowed=True)
self.assertTrue(len(result) > 1)
def test_find_scraperclasses_error(self):
self.assertRaises(ValueError, scraper.find_scraperclasses, "")
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2013 Bastian Kleineidam
from unittest import TestCase
from dosagelib import scraper
class ScraperTester(TestCase):
"""Test scraper module functions."""
def test_get_scraperclasses(self):
for scraperclass in scraper.get_scraperclasses():
scraperobj = scraperclass()
scraperobj = scraperclass(indexes=["bla"])
self.assertTrue(scraperobj.url,
"missing url in %s" % scraperobj.getName())
def test_find_scraperclasses_single(self):
result = scraper.find_scraperclasses("CalvinAndHobbes")
self.assertEqual(len(result), 1)
def test_find_scraperclasses_multi(self):
result = scraper.find_scraperclasses("a", multiple_allowed=True)
self.assertTrue(len(result) > 1)
def test_find_scraperclasses_error(self):
self.assertRaises(ValueError, scraper.find_scraperclasses, "")
|
Test for URL in every scraper.
|
Test for URL in every scraper.
|
Python
|
mit
|
peterjanes/dosage,webcomics/dosage,Freestila/dosage,mbrandis/dosage,blade2005/dosage,webcomics/dosage,peterjanes/dosage,mbrandis/dosage,Freestila/dosage,wummel/dosage,wummel/dosage,blade2005/dosage
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2013 Bastian Kleineidam
from unittest import TestCase
from dosagelib import scraper
class ScraperTester(TestCase):
"""Test scraper module functions."""
def test_get_scraperclasses(self):
for scraperclass in scraper.get_scraperclasses():
scraperclass()
scraperclass(indexes=["bla"])
def test_find_scraperclasses_single(self):
result = scraper.find_scraperclasses("CalvinAndHobbes")
self.assertEqual(len(result), 1)
def test_find_scraperclasses_multi(self):
result = scraper.find_scraperclasses("a", multiple_allowed=True)
self.assertTrue(len(result) > 1)
def test_find_scraperclasses_error(self):
self.assertRaises(ValueError, scraper.find_scraperclasses, "")
Test for URL in every scraper.
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2013 Bastian Kleineidam
from unittest import TestCase
from dosagelib import scraper
class ScraperTester(TestCase):
"""Test scraper module functions."""
def test_get_scraperclasses(self):
for scraperclass in scraper.get_scraperclasses():
scraperobj = scraperclass()
scraperobj = scraperclass(indexes=["bla"])
self.assertTrue(scraperobj.url,
"missing url in %s" % scraperobj.getName())
def test_find_scraperclasses_single(self):
result = scraper.find_scraperclasses("CalvinAndHobbes")
self.assertEqual(len(result), 1)
def test_find_scraperclasses_multi(self):
result = scraper.find_scraperclasses("a", multiple_allowed=True)
self.assertTrue(len(result) > 1)
def test_find_scraperclasses_error(self):
self.assertRaises(ValueError, scraper.find_scraperclasses, "")
|
<commit_before># -*- coding: iso-8859-1 -*-
# Copyright (C) 2013 Bastian Kleineidam
from unittest import TestCase
from dosagelib import scraper
class ScraperTester(TestCase):
"""Test scraper module functions."""
def test_get_scraperclasses(self):
for scraperclass in scraper.get_scraperclasses():
scraperclass()
scraperclass(indexes=["bla"])
def test_find_scraperclasses_single(self):
result = scraper.find_scraperclasses("CalvinAndHobbes")
self.assertEqual(len(result), 1)
def test_find_scraperclasses_multi(self):
result = scraper.find_scraperclasses("a", multiple_allowed=True)
self.assertTrue(len(result) > 1)
def test_find_scraperclasses_error(self):
self.assertRaises(ValueError, scraper.find_scraperclasses, "")
<commit_msg>Test for URL in every scraper.<commit_after>
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2013 Bastian Kleineidam
from unittest import TestCase
from dosagelib import scraper
class ScraperTester(TestCase):
"""Test scraper module functions."""
def test_get_scraperclasses(self):
for scraperclass in scraper.get_scraperclasses():
scraperobj = scraperclass()
scraperobj = scraperclass(indexes=["bla"])
self.assertTrue(scraperobj.url,
"missing url in %s" % scraperobj.getName())
def test_find_scraperclasses_single(self):
result = scraper.find_scraperclasses("CalvinAndHobbes")
self.assertEqual(len(result), 1)
def test_find_scraperclasses_multi(self):
result = scraper.find_scraperclasses("a", multiple_allowed=True)
self.assertTrue(len(result) > 1)
def test_find_scraperclasses_error(self):
self.assertRaises(ValueError, scraper.find_scraperclasses, "")
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2013 Bastian Kleineidam
from unittest import TestCase
from dosagelib import scraper
class ScraperTester(TestCase):
"""Test scraper module functions."""
def test_get_scraperclasses(self):
for scraperclass in scraper.get_scraperclasses():
scraperclass()
scraperclass(indexes=["bla"])
def test_find_scraperclasses_single(self):
result = scraper.find_scraperclasses("CalvinAndHobbes")
self.assertEqual(len(result), 1)
def test_find_scraperclasses_multi(self):
result = scraper.find_scraperclasses("a", multiple_allowed=True)
self.assertTrue(len(result) > 1)
def test_find_scraperclasses_error(self):
self.assertRaises(ValueError, scraper.find_scraperclasses, "")
Test for URL in every scraper.# -*- coding: iso-8859-1 -*-
# Copyright (C) 2013 Bastian Kleineidam
from unittest import TestCase
from dosagelib import scraper
class ScraperTester(TestCase):
"""Test scraper module functions."""
def test_get_scraperclasses(self):
for scraperclass in scraper.get_scraperclasses():
scraperobj = scraperclass()
scraperobj = scraperclass(indexes=["bla"])
self.assertTrue(scraperobj.url,
"missing url in %s" % scraperobj.getName())
def test_find_scraperclasses_single(self):
result = scraper.find_scraperclasses("CalvinAndHobbes")
self.assertEqual(len(result), 1)
def test_find_scraperclasses_multi(self):
result = scraper.find_scraperclasses("a", multiple_allowed=True)
self.assertTrue(len(result) > 1)
def test_find_scraperclasses_error(self):
self.assertRaises(ValueError, scraper.find_scraperclasses, "")
|
<commit_before># -*- coding: iso-8859-1 -*-
# Copyright (C) 2013 Bastian Kleineidam
from unittest import TestCase
from dosagelib import scraper
class ScraperTester(TestCase):
"""Test scraper module functions."""
def test_get_scraperclasses(self):
for scraperclass in scraper.get_scraperclasses():
scraperclass()
scraperclass(indexes=["bla"])
def test_find_scraperclasses_single(self):
result = scraper.find_scraperclasses("CalvinAndHobbes")
self.assertEqual(len(result), 1)
def test_find_scraperclasses_multi(self):
result = scraper.find_scraperclasses("a", multiple_allowed=True)
self.assertTrue(len(result) > 1)
def test_find_scraperclasses_error(self):
self.assertRaises(ValueError, scraper.find_scraperclasses, "")
<commit_msg>Test for URL in every scraper.<commit_after># -*- coding: iso-8859-1 -*-
# Copyright (C) 2013 Bastian Kleineidam
from unittest import TestCase
from dosagelib import scraper
class ScraperTester(TestCase):
"""Test scraper module functions."""
def test_get_scraperclasses(self):
for scraperclass in scraper.get_scraperclasses():
scraperobj = scraperclass()
scraperobj = scraperclass(indexes=["bla"])
self.assertTrue(scraperobj.url,
"missing url in %s" % scraperobj.getName())
def test_find_scraperclasses_single(self):
result = scraper.find_scraperclasses("CalvinAndHobbes")
self.assertEqual(len(result), 1)
def test_find_scraperclasses_multi(self):
result = scraper.find_scraperclasses("a", multiple_allowed=True)
self.assertTrue(len(result) > 1)
def test_find_scraperclasses_error(self):
self.assertRaises(ValueError, scraper.find_scraperclasses, "")
|
cad64c150479adcd9db07a8d3fb65d81f49b6d1e
|
locations/spiders/superonefoods.py
|
locations/spiders/superonefoods.py
|
# -*- coding: utf-8 -*-
import json
import scrapy
import re
from locations.items import GeojsonPointItem
class SuperonefoodsSpider(scrapy.Spider):
name = "superonefoods"
allowed_domains = ["www.superonefoods.com"]
start_urls = (
'https://www.superonefoods.com/store-finder',
)
def parse(self, response):
# retrieve js data variable from script tag
items = response.xpath('//script/text()')[3].re("var stores =(.+?);\n")
# convert data variable from unicode to string
items = [str(x) for x in items]
# convert type string representation of list to type list
data = [items[0]]
# load list into json object for parsing
jsondata = json.loads(data[0])
# loop through json data object and retrieve values; yield the values to GeojsonPointItem
for item in jsondata:
properties = {
'ref': item.get('_id'),
'addr:full': item.get('address'),
'addr:city': item.get('city'),
'addr:state': item.get('state'),
'addr:postcode': item.get('zip'),
}
yield GeojsonPointItem(
ref=item.get('_id'),
lat=float(item.get('latitude')),
lon=float(item.get('longitude')),
addr_full=item.get('address'),
city=item.get('city'),
state=item.get('state'),
postcode=item.get('zip'),
website='https://www.superonefoods.com/store-details/'+item.get('url'),
)
|
Add Spider to Wilco Fram Stores
|
Add Spider to Wilco Fram Stores
|
Python
|
mit
|
iandees/all-the-places,iandees/all-the-places,iandees/all-the-places
|
Add Spider to Wilco Fram Stores
|
# -*- coding: utf-8 -*-
import json
import scrapy
import re
from locations.items import GeojsonPointItem
class SuperonefoodsSpider(scrapy.Spider):
name = "superonefoods"
allowed_domains = ["www.superonefoods.com"]
start_urls = (
'https://www.superonefoods.com/store-finder',
)
def parse(self, response):
# retrieve js data variable from script tag
items = response.xpath('//script/text()')[3].re("var stores =(.+?);\n")
# convert data variable from unicode to string
items = [str(x) for x in items]
# convert type string representation of list to type list
data = [items[0]]
# load list into json object for parsing
jsondata = json.loads(data[0])
# loop through json data object and retrieve values; yield the values to GeojsonPointItem
for item in jsondata:
properties = {
'ref': item.get('_id'),
'addr:full': item.get('address'),
'addr:city': item.get('city'),
'addr:state': item.get('state'),
'addr:postcode': item.get('zip'),
}
yield GeojsonPointItem(
ref=item.get('_id'),
lat=float(item.get('latitude')),
lon=float(item.get('longitude')),
addr_full=item.get('address'),
city=item.get('city'),
state=item.get('state'),
postcode=item.get('zip'),
website='https://www.superonefoods.com/store-details/'+item.get('url'),
)
|
<commit_before><commit_msg>Add Spider to Wilco Fram Stores<commit_after>
|
# -*- coding: utf-8 -*-
import json
import scrapy
import re
from locations.items import GeojsonPointItem
class SuperonefoodsSpider(scrapy.Spider):
name = "superonefoods"
allowed_domains = ["www.superonefoods.com"]
start_urls = (
'https://www.superonefoods.com/store-finder',
)
def parse(self, response):
# retrieve js data variable from script tag
items = response.xpath('//script/text()')[3].re("var stores =(.+?);\n")
# convert data variable from unicode to string
items = [str(x) for x in items]
# convert type string representation of list to type list
data = [items[0]]
# load list into json object for parsing
jsondata = json.loads(data[0])
# loop through json data object and retrieve values; yield the values to GeojsonPointItem
for item in jsondata:
properties = {
'ref': item.get('_id'),
'addr:full': item.get('address'),
'addr:city': item.get('city'),
'addr:state': item.get('state'),
'addr:postcode': item.get('zip'),
}
yield GeojsonPointItem(
ref=item.get('_id'),
lat=float(item.get('latitude')),
lon=float(item.get('longitude')),
addr_full=item.get('address'),
city=item.get('city'),
state=item.get('state'),
postcode=item.get('zip'),
website='https://www.superonefoods.com/store-details/'+item.get('url'),
)
|
Add Spider to Wilco Fram Stores# -*- coding: utf-8 -*-
import json
import scrapy
import re
from locations.items import GeojsonPointItem
class SuperonefoodsSpider(scrapy.Spider):
name = "superonefoods"
allowed_domains = ["www.superonefoods.com"]
start_urls = (
'https://www.superonefoods.com/store-finder',
)
def parse(self, response):
# retrieve js data variable from script tag
items = response.xpath('//script/text()')[3].re("var stores =(.+?);\n")
# convert data variable from unicode to string
items = [str(x) for x in items]
# convert type string representation of list to type list
data = [items[0]]
# load list into json object for parsing
jsondata = json.loads(data[0])
# loop through json data object and retrieve values; yield the values to GeojsonPointItem
for item in jsondata:
properties = {
'ref': item.get('_id'),
'addr:full': item.get('address'),
'addr:city': item.get('city'),
'addr:state': item.get('state'),
'addr:postcode': item.get('zip'),
}
yield GeojsonPointItem(
ref=item.get('_id'),
lat=float(item.get('latitude')),
lon=float(item.get('longitude')),
addr_full=item.get('address'),
city=item.get('city'),
state=item.get('state'),
postcode=item.get('zip'),
website='https://www.superonefoods.com/store-details/'+item.get('url'),
)
|
<commit_before><commit_msg>Add Spider to Wilco Fram Stores<commit_after># -*- coding: utf-8 -*-
import json
import scrapy
import re
from locations.items import GeojsonPointItem
class SuperonefoodsSpider(scrapy.Spider):
name = "superonefoods"
allowed_domains = ["www.superonefoods.com"]
start_urls = (
'https://www.superonefoods.com/store-finder',
)
def parse(self, response):
# retrieve js data variable from script tag
items = response.xpath('//script/text()')[3].re("var stores =(.+?);\n")
# convert data variable from unicode to string
items = [str(x) for x in items]
# convert type string representation of list to type list
data = [items[0]]
# load list into json object for parsing
jsondata = json.loads(data[0])
# loop through json data object and retrieve values; yield the values to GeojsonPointItem
for item in jsondata:
properties = {
'ref': item.get('_id'),
'addr:full': item.get('address'),
'addr:city': item.get('city'),
'addr:state': item.get('state'),
'addr:postcode': item.get('zip'),
}
yield GeojsonPointItem(
ref=item.get('_id'),
lat=float(item.get('latitude')),
lon=float(item.get('longitude')),
addr_full=item.get('address'),
city=item.get('city'),
state=item.get('state'),
postcode=item.get('zip'),
website='https://www.superonefoods.com/store-details/'+item.get('url'),
)
|
|
4f6488cbb42e552daa67ee04159072fb9669a75e
|
tests/unit/test_packaging.py
|
tests/unit/test_packaging.py
|
import pytest
from pip._vendor.packaging import specifiers
from pip._internal.utils.packaging import check_requires_python
@pytest.mark.parametrize('version_info, requires_python, expected', [
((3, 6, 5), '== 3.6.4', False),
((3, 6, 5), '== 3.6.5', True),
((3, 6, 5), None, True),
])
def test_check_requires_python(version_info, requires_python, expected):
actual = check_requires_python(requires_python, version_info)
assert actual == expected
def test_check_requires_python__invalid():
"""
Test an invalid Requires-Python value.
"""
with pytest.raises(specifiers.InvalidSpecifier):
check_requires_python('invalid', (3, 6, 5))
|
Add some tests for check_requires_python().
|
Add some tests for check_requires_python().
|
Python
|
mit
|
rouge8/pip,rouge8/pip,sbidoul/pip,pradyunsg/pip,pfmoore/pip,xavfernandez/pip,pradyunsg/pip,xavfernandez/pip,pfmoore/pip,pypa/pip,xavfernandez/pip,sbidoul/pip,rouge8/pip,pypa/pip
|
Add some tests for check_requires_python().
|
import pytest
from pip._vendor.packaging import specifiers
from pip._internal.utils.packaging import check_requires_python
@pytest.mark.parametrize('version_info, requires_python, expected', [
((3, 6, 5), '== 3.6.4', False),
((3, 6, 5), '== 3.6.5', True),
((3, 6, 5), None, True),
])
def test_check_requires_python(version_info, requires_python, expected):
actual = check_requires_python(requires_python, version_info)
assert actual == expected
def test_check_requires_python__invalid():
"""
Test an invalid Requires-Python value.
"""
with pytest.raises(specifiers.InvalidSpecifier):
check_requires_python('invalid', (3, 6, 5))
|
<commit_before><commit_msg>Add some tests for check_requires_python().<commit_after>
|
import pytest
from pip._vendor.packaging import specifiers
from pip._internal.utils.packaging import check_requires_python
@pytest.mark.parametrize('version_info, requires_python, expected', [
((3, 6, 5), '== 3.6.4', False),
((3, 6, 5), '== 3.6.5', True),
((3, 6, 5), None, True),
])
def test_check_requires_python(version_info, requires_python, expected):
actual = check_requires_python(requires_python, version_info)
assert actual == expected
def test_check_requires_python__invalid():
"""
Test an invalid Requires-Python value.
"""
with pytest.raises(specifiers.InvalidSpecifier):
check_requires_python('invalid', (3, 6, 5))
|
Add some tests for check_requires_python().import pytest
from pip._vendor.packaging import specifiers
from pip._internal.utils.packaging import check_requires_python
@pytest.mark.parametrize('version_info, requires_python, expected', [
((3, 6, 5), '== 3.6.4', False),
((3, 6, 5), '== 3.6.5', True),
((3, 6, 5), None, True),
])
def test_check_requires_python(version_info, requires_python, expected):
actual = check_requires_python(requires_python, version_info)
assert actual == expected
def test_check_requires_python__invalid():
"""
Test an invalid Requires-Python value.
"""
with pytest.raises(specifiers.InvalidSpecifier):
check_requires_python('invalid', (3, 6, 5))
|
<commit_before><commit_msg>Add some tests for check_requires_python().<commit_after>import pytest
from pip._vendor.packaging import specifiers
from pip._internal.utils.packaging import check_requires_python
@pytest.mark.parametrize('version_info, requires_python, expected', [
((3, 6, 5), '== 3.6.4', False),
((3, 6, 5), '== 3.6.5', True),
((3, 6, 5), None, True),
])
def test_check_requires_python(version_info, requires_python, expected):
actual = check_requires_python(requires_python, version_info)
assert actual == expected
def test_check_requires_python__invalid():
"""
Test an invalid Requires-Python value.
"""
with pytest.raises(specifiers.InvalidSpecifier):
check_requires_python('invalid', (3, 6, 5))
|
|
d218b92cb598801a6fbca5421f216860107d7956
|
apps/curia_vista/management/commands/update_departments.py
|
apps/curia_vista/management/commands/update_departments.py
|
from xml.etree import ElementTree
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
import requests
from apps.curia_vista.models import *
class Command(BaseCommand):
help = 'Import departments from parlament.ch'
@transaction.atomic
def handle(self, *args, **options):
source = 'http://ws.parlament.ch/Departments?format=xml&lang=de'
headers = {'User-Agent': 'Mozilla'}
self.stdout.write("Importing: {}".format(source))
try:
response = requests.get(source, headers=headers)
except Exception as e:
raise CommandError("Could not fetch file from {}".format(source))
departments = ElementTree.fromstring(response.content)
if not departments:
raise CommandError("Not a valid XML file: {}".format(source))
for department in departments:
department_id = department.find('id').text
department_updated = department.find('updated').text
department_code = department.find('code').text
department_model, created = Department.objects.update_or_create(id=department_id,
defaults={'updated': department_updated,
'code': department_code})
department_model.full_clean()
department_model.save()
print(department_model)
|
Add import script for department data
|
Add import script for department data
|
Python
|
agpl-3.0
|
rettichschnidi/politkarma,rettichschnidi/politkarma,rettichschnidi/politkarma,rettichschnidi/politkarma
|
Add import script for department data
|
from xml.etree import ElementTree
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
import requests
from apps.curia_vista.models import *
class Command(BaseCommand):
help = 'Import departments from parlament.ch'
@transaction.atomic
def handle(self, *args, **options):
source = 'http://ws.parlament.ch/Departments?format=xml&lang=de'
headers = {'User-Agent': 'Mozilla'}
self.stdout.write("Importing: {}".format(source))
try:
response = requests.get(source, headers=headers)
except Exception as e:
raise CommandError("Could not fetch file from {}".format(source))
departments = ElementTree.fromstring(response.content)
if not departments:
raise CommandError("Not a valid XML file: {}".format(source))
for department in departments:
department_id = department.find('id').text
department_updated = department.find('updated').text
department_code = department.find('code').text
department_model, created = Department.objects.update_or_create(id=department_id,
defaults={'updated': department_updated,
'code': department_code})
department_model.full_clean()
department_model.save()
print(department_model)
|
<commit_before><commit_msg>Add import script for department data<commit_after>
|
from xml.etree import ElementTree
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
import requests
from apps.curia_vista.models import *
class Command(BaseCommand):
help = 'Import departments from parlament.ch'
@transaction.atomic
def handle(self, *args, **options):
source = 'http://ws.parlament.ch/Departments?format=xml&lang=de'
headers = {'User-Agent': 'Mozilla'}
self.stdout.write("Importing: {}".format(source))
try:
response = requests.get(source, headers=headers)
except Exception as e:
raise CommandError("Could not fetch file from {}".format(source))
departments = ElementTree.fromstring(response.content)
if not departments:
raise CommandError("Not a valid XML file: {}".format(source))
for department in departments:
department_id = department.find('id').text
department_updated = department.find('updated').text
department_code = department.find('code').text
department_model, created = Department.objects.update_or_create(id=department_id,
defaults={'updated': department_updated,
'code': department_code})
department_model.full_clean()
department_model.save()
print(department_model)
|
Add import script for department datafrom xml.etree import ElementTree
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
import requests
from apps.curia_vista.models import *
class Command(BaseCommand):
help = 'Import departments from parlament.ch'
@transaction.atomic
def handle(self, *args, **options):
source = 'http://ws.parlament.ch/Departments?format=xml&lang=de'
headers = {'User-Agent': 'Mozilla'}
self.stdout.write("Importing: {}".format(source))
try:
response = requests.get(source, headers=headers)
except Exception as e:
raise CommandError("Could not fetch file from {}".format(source))
departments = ElementTree.fromstring(response.content)
if not departments:
raise CommandError("Not a valid XML file: {}".format(source))
for department in departments:
department_id = department.find('id').text
department_updated = department.find('updated').text
department_code = department.find('code').text
department_model, created = Department.objects.update_or_create(id=department_id,
defaults={'updated': department_updated,
'code': department_code})
department_model.full_clean()
department_model.save()
print(department_model)
|
<commit_before><commit_msg>Add import script for department data<commit_after>from xml.etree import ElementTree
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
import requests
from apps.curia_vista.models import *
class Command(BaseCommand):
help = 'Import departments from parlament.ch'
@transaction.atomic
def handle(self, *args, **options):
source = 'http://ws.parlament.ch/Departments?format=xml&lang=de'
headers = {'User-Agent': 'Mozilla'}
self.stdout.write("Importing: {}".format(source))
try:
response = requests.get(source, headers=headers)
except Exception as e:
raise CommandError("Could not fetch file from {}".format(source))
departments = ElementTree.fromstring(response.content)
if not departments:
raise CommandError("Not a valid XML file: {}".format(source))
for department in departments:
department_id = department.find('id').text
department_updated = department.find('updated').text
department_code = department.find('code').text
department_model, created = Department.objects.update_or_create(id=department_id,
defaults={'updated': department_updated,
'code': department_code})
department_model.full_clean()
department_model.save()
print(department_model)
|
|
5806153edfde7d6e27b628c38eb0f9333642711d
|
sydent/http/servlets/getvalidated3pidservlet.py
|
sydent/http/servlets/getvalidated3pidservlet.py
|
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
# Copyright 2014 matrix.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.web.resource import Resource
from sydent.http.servlets import jsonwrap, require_args
from sydent.db.valsession import ThreePidValSessionStore
from sydent.validators import SessionExpiredException, IncorrectClientSecretException, InvalidSessionIdException,\
SessionNotValidatedException
class GetValidated3pidServlet(Resource):
isLeaf = True
def __init__(self, syd):
self.sydent = syd
@jsonwrap
def render_GET(self, request):
err = require_args(request, ('sid', 'clientSecret'))
if err:
return err
sid = request.args['sid'][0]
clientSecret = request.args['clientSecret'][0]
valSessionStore = ThreePidValSessionStore(self.sydent)
noMatchError = {'errcode': 'M_NO_VALID_SESSION',
'error': "No valid session was found matching that sid and client secret"}
try:
s = valSessionStore.getValidatedSession(sid, clientSecret)
except IncorrectClientSecretException:
return noMatchError
except SessionExpiredException:
return {'errcode': 'M_SESSION_EXPIRED',
'error': "This validation session has expired: call requestToken again"}
except InvalidSessionIdException:
return noMatchError
except SessionNotValidatedException:
return {'errcode': 'M_SESSION_NOT_VALIDATED',
'error': "This validation session has not yet been completed"}
return { 'medium': s.medium, 'address': s.address, 'validatedAt': s.mtime }
|
Add the file for the new servlet
|
Add the file for the new servlet
|
Python
|
apache-2.0
|
matrix-org/sydent,matrix-org/sydent,matrix-org/sydent
|
Add the file for the new servlet
|
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
# Copyright 2014 matrix.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.web.resource import Resource
from sydent.http.servlets import jsonwrap, require_args
from sydent.db.valsession import ThreePidValSessionStore
from sydent.validators import SessionExpiredException, IncorrectClientSecretException, InvalidSessionIdException,\
SessionNotValidatedException
class GetValidated3pidServlet(Resource):
isLeaf = True
def __init__(self, syd):
self.sydent = syd
@jsonwrap
def render_GET(self, request):
err = require_args(request, ('sid', 'clientSecret'))
if err:
return err
sid = request.args['sid'][0]
clientSecret = request.args['clientSecret'][0]
valSessionStore = ThreePidValSessionStore(self.sydent)
noMatchError = {'errcode': 'M_NO_VALID_SESSION',
'error': "No valid session was found matching that sid and client secret"}
try:
s = valSessionStore.getValidatedSession(sid, clientSecret)
except IncorrectClientSecretException:
return noMatchError
except SessionExpiredException:
return {'errcode': 'M_SESSION_EXPIRED',
'error': "This validation session has expired: call requestToken again"}
except InvalidSessionIdException:
return noMatchError
except SessionNotValidatedException:
return {'errcode': 'M_SESSION_NOT_VALIDATED',
'error': "This validation session has not yet been completed"}
return { 'medium': s.medium, 'address': s.address, 'validatedAt': s.mtime }
|
<commit_before><commit_msg>Add the file for the new servlet<commit_after>
|
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
# Copyright 2014 matrix.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.web.resource import Resource
from sydent.http.servlets import jsonwrap, require_args
from sydent.db.valsession import ThreePidValSessionStore
from sydent.validators import SessionExpiredException, IncorrectClientSecretException, InvalidSessionIdException,\
SessionNotValidatedException
class GetValidated3pidServlet(Resource):
isLeaf = True
def __init__(self, syd):
self.sydent = syd
@jsonwrap
def render_GET(self, request):
err = require_args(request, ('sid', 'clientSecret'))
if err:
return err
sid = request.args['sid'][0]
clientSecret = request.args['clientSecret'][0]
valSessionStore = ThreePidValSessionStore(self.sydent)
noMatchError = {'errcode': 'M_NO_VALID_SESSION',
'error': "No valid session was found matching that sid and client secret"}
try:
s = valSessionStore.getValidatedSession(sid, clientSecret)
except IncorrectClientSecretException:
return noMatchError
except SessionExpiredException:
return {'errcode': 'M_SESSION_EXPIRED',
'error': "This validation session has expired: call requestToken again"}
except InvalidSessionIdException:
return noMatchError
except SessionNotValidatedException:
return {'errcode': 'M_SESSION_NOT_VALIDATED',
'error': "This validation session has not yet been completed"}
return { 'medium': s.medium, 'address': s.address, 'validatedAt': s.mtime }
|
Add the file for the new servlet# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
# Copyright 2014 matrix.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.web.resource import Resource
from sydent.http.servlets import jsonwrap, require_args
from sydent.db.valsession import ThreePidValSessionStore
from sydent.validators import SessionExpiredException, IncorrectClientSecretException, InvalidSessionIdException,\
SessionNotValidatedException
class GetValidated3pidServlet(Resource):
isLeaf = True
def __init__(self, syd):
self.sydent = syd
@jsonwrap
def render_GET(self, request):
err = require_args(request, ('sid', 'clientSecret'))
if err:
return err
sid = request.args['sid'][0]
clientSecret = request.args['clientSecret'][0]
valSessionStore = ThreePidValSessionStore(self.sydent)
noMatchError = {'errcode': 'M_NO_VALID_SESSION',
'error': "No valid session was found matching that sid and client secret"}
try:
s = valSessionStore.getValidatedSession(sid, clientSecret)
except IncorrectClientSecretException:
return noMatchError
except SessionExpiredException:
return {'errcode': 'M_SESSION_EXPIRED',
'error': "This validation session has expired: call requestToken again"}
except InvalidSessionIdException:
return noMatchError
except SessionNotValidatedException:
return {'errcode': 'M_SESSION_NOT_VALIDATED',
'error': "This validation session has not yet been completed"}
return { 'medium': s.medium, 'address': s.address, 'validatedAt': s.mtime }
|
<commit_before><commit_msg>Add the file for the new servlet<commit_after># -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
# Copyright 2014 matrix.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.web.resource import Resource
from sydent.http.servlets import jsonwrap, require_args
from sydent.db.valsession import ThreePidValSessionStore
from sydent.validators import SessionExpiredException, IncorrectClientSecretException, InvalidSessionIdException,\
SessionNotValidatedException
class GetValidated3pidServlet(Resource):
isLeaf = True
def __init__(self, syd):
self.sydent = syd
@jsonwrap
def render_GET(self, request):
err = require_args(request, ('sid', 'clientSecret'))
if err:
return err
sid = request.args['sid'][0]
clientSecret = request.args['clientSecret'][0]
valSessionStore = ThreePidValSessionStore(self.sydent)
noMatchError = {'errcode': 'M_NO_VALID_SESSION',
'error': "No valid session was found matching that sid and client secret"}
try:
s = valSessionStore.getValidatedSession(sid, clientSecret)
except IncorrectClientSecretException:
return noMatchError
except SessionExpiredException:
return {'errcode': 'M_SESSION_EXPIRED',
'error': "This validation session has expired: call requestToken again"}
except InvalidSessionIdException:
return noMatchError
except SessionNotValidatedException:
return {'errcode': 'M_SESSION_NOT_VALIDATED',
'error': "This validation session has not yet been completed"}
return { 'medium': s.medium, 'address': s.address, 'validatedAt': s.mtime }
|
|
814f2aafebc04d75d4f3d99fa42b09c6054a2b16
|
domainchecks/tests/test_commands.py
|
domainchecks/tests/test_commands.py
|
from datetime import timedelta
from io import StringIO
from unittest.mock import Mock, patch
from django.core.management import call_command
from django.test import TestCase
from . import factories
class CheckDomainsCommandTestCase(TestCase):
"""Management command for running the domain checks."""
def call_command(self, **kwargs):
"""Helper to call the management command and return stdout/stderr."""
stdout, stderr = StringIO(), StringIO()
kwargs['stdout'], kwargs['stderr'] = stdout, stderr
call_command('checkdomains', **kwargs)
stdout.seek(0)
stderr.seek(0)
return stdout, stderr
def test_no_checks(self):
"""Call command with no checks configured."""
stdout, stderr = self.call_command()
self.assertIn('0 domain statuses updated', stdout.getvalue())
@patch('domainchecks.management.commands.checkdomains.DomainCheck')
def test_check_stale_domains(self, mock_model):
"""Checks should only be run for active and stale domains."""
self.call_command()
cutoff = timedelta(minutes=5)
mock_model.objects.active.assert_called_with()
mock_model.objects.active.return_value.stale.assert_called_with(
cutoff=cutoff)
@patch('domainchecks.management.commands.checkdomains.DomainCheck')
def test_specify_cutoff(self, mock_model):
"""Minutes option changes the stale cutoff."""
self.call_command(minutes=60)
cutoff = timedelta(minutes=60)
mock_model.objects.active.return_value.stale.assert_called_with(
cutoff=cutoff)
@patch('domainchecks.management.commands.checkdomains.DomainCheck')
def test_check_run_check(self, mock_model):
"""Checks should use the run_check model method."""
example = Mock()
mock_model.objects.active.return_value.stale.return_value = [example, ]
self.call_command()
example.run_check.assert_called_with(timeout=10)
@patch('domainchecks.management.commands.checkdomains.DomainCheck')
def test_specity_timeout(self, mock_model):
"""Timeout option changes the timeout for run_check."""
example = Mock()
mock_model.objects.active.return_value.stale.return_value = [example, ]
self.call_command(timeout=1)
example.run_check.assert_called_with(timeout=1)
def test_functional_defaults(self):
"""Run command defaults with actual domain record."""
stale = factories.create_domain_check()
# Still want to mock the remote call
with patch('domainchecks.models.requests') as mock_requests:
mock_requests.request.return_value = Mock(
status_code=200, text='Ok')
stdout, stderr = self.call_command()
mock_requests.request.assert_called_once_with(
stale.method, stale.url, allow_redirects=False, timeout=10)
self.assertIn('1 domain status updated', stdout.getvalue())
|
Add tests for the checkdomains command.
|
Add tests for the checkdomains command.
|
Python
|
bsd-2-clause
|
mlavin/video-examples,mlavin/video-examples,mlavin/video-examples
|
Add tests for the checkdomains command.
|
from datetime import timedelta
from io import StringIO
from unittest.mock import Mock, patch
from django.core.management import call_command
from django.test import TestCase
from . import factories
class CheckDomainsCommandTestCase(TestCase):
"""Management command for running the domain checks."""
def call_command(self, **kwargs):
"""Helper to call the management command and return stdout/stderr."""
stdout, stderr = StringIO(), StringIO()
kwargs['stdout'], kwargs['stderr'] = stdout, stderr
call_command('checkdomains', **kwargs)
stdout.seek(0)
stderr.seek(0)
return stdout, stderr
def test_no_checks(self):
"""Call command with no checks configured."""
stdout, stderr = self.call_command()
self.assertIn('0 domain statuses updated', stdout.getvalue())
@patch('domainchecks.management.commands.checkdomains.DomainCheck')
def test_check_stale_domains(self, mock_model):
"""Checks should only be run for active and stale domains."""
self.call_command()
cutoff = timedelta(minutes=5)
mock_model.objects.active.assert_called_with()
mock_model.objects.active.return_value.stale.assert_called_with(
cutoff=cutoff)
@patch('domainchecks.management.commands.checkdomains.DomainCheck')
def test_specify_cutoff(self, mock_model):
"""Minutes option changes the stale cutoff."""
self.call_command(minutes=60)
cutoff = timedelta(minutes=60)
mock_model.objects.active.return_value.stale.assert_called_with(
cutoff=cutoff)
@patch('domainchecks.management.commands.checkdomains.DomainCheck')
def test_check_run_check(self, mock_model):
"""Checks should use the run_check model method."""
example = Mock()
mock_model.objects.active.return_value.stale.return_value = [example, ]
self.call_command()
example.run_check.assert_called_with(timeout=10)
@patch('domainchecks.management.commands.checkdomains.DomainCheck')
def test_specity_timeout(self, mock_model):
"""Timeout option changes the timeout for run_check."""
example = Mock()
mock_model.objects.active.return_value.stale.return_value = [example, ]
self.call_command(timeout=1)
example.run_check.assert_called_with(timeout=1)
def test_functional_defaults(self):
"""Run command defaults with actual domain record."""
stale = factories.create_domain_check()
# Still want to mock the remote call
with patch('domainchecks.models.requests') as mock_requests:
mock_requests.request.return_value = Mock(
status_code=200, text='Ok')
stdout, stderr = self.call_command()
mock_requests.request.assert_called_once_with(
stale.method, stale.url, allow_redirects=False, timeout=10)
self.assertIn('1 domain status updated', stdout.getvalue())
|
<commit_before><commit_msg>Add tests for the checkdomains command.<commit_after>
|
from datetime import timedelta
from io import StringIO
from unittest.mock import Mock, patch
from django.core.management import call_command
from django.test import TestCase
from . import factories
class CheckDomainsCommandTestCase(TestCase):
"""Management command for running the domain checks."""
def call_command(self, **kwargs):
"""Helper to call the management command and return stdout/stderr."""
stdout, stderr = StringIO(), StringIO()
kwargs['stdout'], kwargs['stderr'] = stdout, stderr
call_command('checkdomains', **kwargs)
stdout.seek(0)
stderr.seek(0)
return stdout, stderr
def test_no_checks(self):
"""Call command with no checks configured."""
stdout, stderr = self.call_command()
self.assertIn('0 domain statuses updated', stdout.getvalue())
@patch('domainchecks.management.commands.checkdomains.DomainCheck')
def test_check_stale_domains(self, mock_model):
"""Checks should only be run for active and stale domains."""
self.call_command()
cutoff = timedelta(minutes=5)
mock_model.objects.active.assert_called_with()
mock_model.objects.active.return_value.stale.assert_called_with(
cutoff=cutoff)
@patch('domainchecks.management.commands.checkdomains.DomainCheck')
def test_specify_cutoff(self, mock_model):
"""Minutes option changes the stale cutoff."""
self.call_command(minutes=60)
cutoff = timedelta(minutes=60)
mock_model.objects.active.return_value.stale.assert_called_with(
cutoff=cutoff)
@patch('domainchecks.management.commands.checkdomains.DomainCheck')
def test_check_run_check(self, mock_model):
"""Checks should use the run_check model method."""
example = Mock()
mock_model.objects.active.return_value.stale.return_value = [example, ]
self.call_command()
example.run_check.assert_called_with(timeout=10)
@patch('domainchecks.management.commands.checkdomains.DomainCheck')
def test_specity_timeout(self, mock_model):
"""Timeout option changes the timeout for run_check."""
example = Mock()
mock_model.objects.active.return_value.stale.return_value = [example, ]
self.call_command(timeout=1)
example.run_check.assert_called_with(timeout=1)
def test_functional_defaults(self):
"""Run command defaults with actual domain record."""
stale = factories.create_domain_check()
# Still want to mock the remote call
with patch('domainchecks.models.requests') as mock_requests:
mock_requests.request.return_value = Mock(
status_code=200, text='Ok')
stdout, stderr = self.call_command()
mock_requests.request.assert_called_once_with(
stale.method, stale.url, allow_redirects=False, timeout=10)
self.assertIn('1 domain status updated', stdout.getvalue())
|
Add tests for the checkdomains command.from datetime import timedelta
from io import StringIO
from unittest.mock import Mock, patch
from django.core.management import call_command
from django.test import TestCase
from . import factories
class CheckDomainsCommandTestCase(TestCase):
"""Management command for running the domain checks."""
def call_command(self, **kwargs):
"""Helper to call the management command and return stdout/stderr."""
stdout, stderr = StringIO(), StringIO()
kwargs['stdout'], kwargs['stderr'] = stdout, stderr
call_command('checkdomains', **kwargs)
stdout.seek(0)
stderr.seek(0)
return stdout, stderr
def test_no_checks(self):
"""Call command with no checks configured."""
stdout, stderr = self.call_command()
self.assertIn('0 domain statuses updated', stdout.getvalue())
@patch('domainchecks.management.commands.checkdomains.DomainCheck')
def test_check_stale_domains(self, mock_model):
"""Checks should only be run for active and stale domains."""
self.call_command()
cutoff = timedelta(minutes=5)
mock_model.objects.active.assert_called_with()
mock_model.objects.active.return_value.stale.assert_called_with(
cutoff=cutoff)
@patch('domainchecks.management.commands.checkdomains.DomainCheck')
def test_specify_cutoff(self, mock_model):
"""Minutes option changes the stale cutoff."""
self.call_command(minutes=60)
cutoff = timedelta(minutes=60)
mock_model.objects.active.return_value.stale.assert_called_with(
cutoff=cutoff)
@patch('domainchecks.management.commands.checkdomains.DomainCheck')
def test_check_run_check(self, mock_model):
"""Checks should use the run_check model method."""
example = Mock()
mock_model.objects.active.return_value.stale.return_value = [example, ]
self.call_command()
example.run_check.assert_called_with(timeout=10)
@patch('domainchecks.management.commands.checkdomains.DomainCheck')
def test_specity_timeout(self, mock_model):
"""Timeout option changes the timeout for run_check."""
example = Mock()
mock_model.objects.active.return_value.stale.return_value = [example, ]
self.call_command(timeout=1)
example.run_check.assert_called_with(timeout=1)
def test_functional_defaults(self):
"""Run command defaults with actual domain record."""
stale = factories.create_domain_check()
# Still want to mock the remote call
with patch('domainchecks.models.requests') as mock_requests:
mock_requests.request.return_value = Mock(
status_code=200, text='Ok')
stdout, stderr = self.call_command()
mock_requests.request.assert_called_once_with(
stale.method, stale.url, allow_redirects=False, timeout=10)
self.assertIn('1 domain status updated', stdout.getvalue())
|
<commit_before><commit_msg>Add tests for the checkdomains command.<commit_after>from datetime import timedelta
from io import StringIO
from unittest.mock import Mock, patch
from django.core.management import call_command
from django.test import TestCase
from . import factories
class CheckDomainsCommandTestCase(TestCase):
"""Management command for running the domain checks."""
def call_command(self, **kwargs):
"""Helper to call the management command and return stdout/stderr."""
stdout, stderr = StringIO(), StringIO()
kwargs['stdout'], kwargs['stderr'] = stdout, stderr
call_command('checkdomains', **kwargs)
stdout.seek(0)
stderr.seek(0)
return stdout, stderr
def test_no_checks(self):
"""Call command with no checks configured."""
stdout, stderr = self.call_command()
self.assertIn('0 domain statuses updated', stdout.getvalue())
@patch('domainchecks.management.commands.checkdomains.DomainCheck')
def test_check_stale_domains(self, mock_model):
"""Checks should only be run for active and stale domains."""
self.call_command()
cutoff = timedelta(minutes=5)
mock_model.objects.active.assert_called_with()
mock_model.objects.active.return_value.stale.assert_called_with(
cutoff=cutoff)
@patch('domainchecks.management.commands.checkdomains.DomainCheck')
def test_specify_cutoff(self, mock_model):
"""Minutes option changes the stale cutoff."""
self.call_command(minutes=60)
cutoff = timedelta(minutes=60)
mock_model.objects.active.return_value.stale.assert_called_with(
cutoff=cutoff)
@patch('domainchecks.management.commands.checkdomains.DomainCheck')
def test_check_run_check(self, mock_model):
"""Checks should use the run_check model method."""
example = Mock()
mock_model.objects.active.return_value.stale.return_value = [example, ]
self.call_command()
example.run_check.assert_called_with(timeout=10)
@patch('domainchecks.management.commands.checkdomains.DomainCheck')
def test_specity_timeout(self, mock_model):
"""Timeout option changes the timeout for run_check."""
example = Mock()
mock_model.objects.active.return_value.stale.return_value = [example, ]
self.call_command(timeout=1)
example.run_check.assert_called_with(timeout=1)
def test_functional_defaults(self):
"""Run command defaults with actual domain record."""
stale = factories.create_domain_check()
# Still want to mock the remote call
with patch('domainchecks.models.requests') as mock_requests:
mock_requests.request.return_value = Mock(
status_code=200, text='Ok')
stdout, stderr = self.call_command()
mock_requests.request.assert_called_once_with(
stale.method, stale.url, allow_redirects=False, timeout=10)
self.assertIn('1 domain status updated', stdout.getvalue())
|
|
2dc179a309dd4152193de8b2d34a3c25150b1128
|
deposit/osf/migrations/0001_initial.py
|
deposit/osf/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-08-28 11:43
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('deposit', '0010_email_on_preferences'),
]
operations = [
migrations.CreateModel(
name='OSFDepositPreferences',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('on_behalf_of', models.CharField(blank=True, help_text='If set, deposits will be associated to this OSF account.', max_length=128, null=True, verbose_name='OSF username')),
('repository', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='deposit.Repository')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
|
Add migration for OSF model
|
Add migration for OSF model
|
Python
|
agpl-3.0
|
wetneb/dissemin,wetneb/dissemin,wetneb/dissemin,dissemin/dissemin,dissemin/dissemin,dissemin/dissemin,wetneb/dissemin,dissemin/dissemin,dissemin/dissemin
|
Add migration for OSF model
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-08-28 11:43
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('deposit', '0010_email_on_preferences'),
]
operations = [
migrations.CreateModel(
name='OSFDepositPreferences',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('on_behalf_of', models.CharField(blank=True, help_text='If set, deposits will be associated to this OSF account.', max_length=128, null=True, verbose_name='OSF username')),
('repository', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='deposit.Repository')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
|
<commit_before><commit_msg>Add migration for OSF model<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-08-28 11:43
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('deposit', '0010_email_on_preferences'),
]
operations = [
migrations.CreateModel(
name='OSFDepositPreferences',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('on_behalf_of', models.CharField(blank=True, help_text='If set, deposits will be associated to this OSF account.', max_length=128, null=True, verbose_name='OSF username')),
('repository', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='deposit.Repository')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
|
Add migration for OSF model# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-08-28 11:43
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('deposit', '0010_email_on_preferences'),
]
operations = [
migrations.CreateModel(
name='OSFDepositPreferences',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('on_behalf_of', models.CharField(blank=True, help_text='If set, deposits will be associated to this OSF account.', max_length=128, null=True, verbose_name='OSF username')),
('repository', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='deposit.Repository')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
|
<commit_before><commit_msg>Add migration for OSF model<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-08-28 11:43
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('deposit', '0010_email_on_preferences'),
]
operations = [
migrations.CreateModel(
name='OSFDepositPreferences',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('on_behalf_of', models.CharField(blank=True, help_text='If set, deposits will be associated to this OSF account.', max_length=128, null=True, verbose_name='OSF username')),
('repository', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='deposit.Repository')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
|
|
7a17603060c488875109ec1bee42eeafbf11bdcf
|
exercises/test_stack_balanced_parents.py
|
exercises/test_stack_balanced_parents.py
|
#!/usr/local/bin/python3
"""
Unittest for stack_balanced_parens.py
"""
import stack_balanced_parens
import unittest
class TestStackParens(unittest.TestCase):
def test_balanced(self):
expression = "{[()]}"
self.assertTrue(stack_balanced_parens.isBalanced(expression))
def test_unbalanced(self):
expression = "{[()]}("
self.assertFalse(stack_balanced_parens.isBalanced(expression))
def test_numbers_unbalanced(self):
expression = "(4+5)*(2+(4-7)"
self.assertFalse(stack_balanced_parens.isBalanced(expression))
def test_numbers_balanced(self):
expression = "(7+8)*{3-[5+6]*4}"
self.assertTrue(stack_balanced_parens.isBalanced(expression))
if __name__ == "__main__":
unittest.main()
|
Add unit test for stack_balanced_parens.py
|
Add unit test for stack_balanced_parens.py
|
Python
|
mit
|
myleneh/code
|
Add unit test for stack_balanced_parens.py
|
#!/usr/local/bin/python3
"""
Unittest for stack_balanced_parens.py
"""
import stack_balanced_parens
import unittest
class TestStackParens(unittest.TestCase):
def test_balanced(self):
expression = "{[()]}"
self.assertTrue(stack_balanced_parens.isBalanced(expression))
def test_unbalanced(self):
expression = "{[()]}("
self.assertFalse(stack_balanced_parens.isBalanced(expression))
def test_numbers_unbalanced(self):
expression = "(4+5)*(2+(4-7)"
self.assertFalse(stack_balanced_parens.isBalanced(expression))
def test_numbers_balanced(self):
expression = "(7+8)*{3-[5+6]*4}"
self.assertTrue(stack_balanced_parens.isBalanced(expression))
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unit test for stack_balanced_parens.py<commit_after>
|
#!/usr/local/bin/python3
"""
Unittest for stack_balanced_parens.py
"""
import stack_balanced_parens
import unittest
class TestStackParens(unittest.TestCase):
def test_balanced(self):
expression = "{[()]}"
self.assertTrue(stack_balanced_parens.isBalanced(expression))
def test_unbalanced(self):
expression = "{[()]}("
self.assertFalse(stack_balanced_parens.isBalanced(expression))
def test_numbers_unbalanced(self):
expression = "(4+5)*(2+(4-7)"
self.assertFalse(stack_balanced_parens.isBalanced(expression))
def test_numbers_balanced(self):
expression = "(7+8)*{3-[5+6]*4}"
self.assertTrue(stack_balanced_parens.isBalanced(expression))
if __name__ == "__main__":
unittest.main()
|
Add unit test for stack_balanced_parens.py#!/usr/local/bin/python3
"""
Unittest for stack_balanced_parens.py
"""
import stack_balanced_parens
import unittest
class TestStackParens(unittest.TestCase):
def test_balanced(self):
expression = "{[()]}"
self.assertTrue(stack_balanced_parens.isBalanced(expression))
def test_unbalanced(self):
expression = "{[()]}("
self.assertFalse(stack_balanced_parens.isBalanced(expression))
def test_numbers_unbalanced(self):
expression = "(4+5)*(2+(4-7)"
self.assertFalse(stack_balanced_parens.isBalanced(expression))
def test_numbers_balanced(self):
expression = "(7+8)*{3-[5+6]*4}"
self.assertTrue(stack_balanced_parens.isBalanced(expression))
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unit test for stack_balanced_parens.py<commit_after>#!/usr/local/bin/python3
"""
Unittest for stack_balanced_parens.py
"""
import stack_balanced_parens
import unittest
class TestStackParens(unittest.TestCase):
def test_balanced(self):
expression = "{[()]}"
self.assertTrue(stack_balanced_parens.isBalanced(expression))
def test_unbalanced(self):
expression = "{[()]}("
self.assertFalse(stack_balanced_parens.isBalanced(expression))
def test_numbers_unbalanced(self):
expression = "(4+5)*(2+(4-7)"
self.assertFalse(stack_balanced_parens.isBalanced(expression))
def test_numbers_balanced(self):
expression = "(7+8)*{3-[5+6]*4}"
self.assertTrue(stack_balanced_parens.isBalanced(expression))
if __name__ == "__main__":
unittest.main()
|
|
385e7194e8cf7db736001e50023cc13fbba27366
|
py/bulb-switcher-ii.py
|
py/bulb-switcher-ii.py
|
from itertools import combinations, cycle, islice
class Solution(object):
def flipLights(self, n, m):
"""
:type n: int
:type m: int
:rtype: int
"""
if m > 4:
return self.flipLights(n, 4 - m % 2)
if n > 6:
return self.flipLights(6, m)
ans = set()
for i in xrange(m % 2, m + 1, 2):
for comb in combinations(xrange(4), i):
orig = [True] * n
for flip in comb:
if flip == 0:
orig = [not x for x in orig]
elif flip == 1:
orig = [not x if i % 2 == 0 else x for i, x in enumerate(orig)]
elif flip == 2:
orig = [not x if i % 2 == 1 else x for i, x in enumerate(orig)]
elif flip == 3:
orig = [not x if i % 3 == 0 else x for i, x in enumerate(orig)]
ans.add(tuple(orig))
return len(ans)
|
Add py solution for 672. Bulb Switcher II
|
Add py solution for 672. Bulb Switcher II
672. Bulb Switcher II: https://leetcode.com/problems/bulb-switcher-ii/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 672. Bulb Switcher II
672. Bulb Switcher II: https://leetcode.com/problems/bulb-switcher-ii/
|
from itertools import combinations, cycle, islice
class Solution(object):
def flipLights(self, n, m):
"""
:type n: int
:type m: int
:rtype: int
"""
if m > 4:
return self.flipLights(n, 4 - m % 2)
if n > 6:
return self.flipLights(6, m)
ans = set()
for i in xrange(m % 2, m + 1, 2):
for comb in combinations(xrange(4), i):
orig = [True] * n
for flip in comb:
if flip == 0:
orig = [not x for x in orig]
elif flip == 1:
orig = [not x if i % 2 == 0 else x for i, x in enumerate(orig)]
elif flip == 2:
orig = [not x if i % 2 == 1 else x for i, x in enumerate(orig)]
elif flip == 3:
orig = [not x if i % 3 == 0 else x for i, x in enumerate(orig)]
ans.add(tuple(orig))
return len(ans)
|
<commit_before><commit_msg>Add py solution for 672. Bulb Switcher II
672. Bulb Switcher II: https://leetcode.com/problems/bulb-switcher-ii/<commit_after>
|
from itertools import combinations, cycle, islice
class Solution(object):
def flipLights(self, n, m):
"""
:type n: int
:type m: int
:rtype: int
"""
if m > 4:
return self.flipLights(n, 4 - m % 2)
if n > 6:
return self.flipLights(6, m)
ans = set()
for i in xrange(m % 2, m + 1, 2):
for comb in combinations(xrange(4), i):
orig = [True] * n
for flip in comb:
if flip == 0:
orig = [not x for x in orig]
elif flip == 1:
orig = [not x if i % 2 == 0 else x for i, x in enumerate(orig)]
elif flip == 2:
orig = [not x if i % 2 == 1 else x for i, x in enumerate(orig)]
elif flip == 3:
orig = [not x if i % 3 == 0 else x for i, x in enumerate(orig)]
ans.add(tuple(orig))
return len(ans)
|
Add py solution for 672. Bulb Switcher II
672. Bulb Switcher II: https://leetcode.com/problems/bulb-switcher-ii/from itertools import combinations, cycle, islice
class Solution(object):
def flipLights(self, n, m):
"""
:type n: int
:type m: int
:rtype: int
"""
if m > 4:
return self.flipLights(n, 4 - m % 2)
if n > 6:
return self.flipLights(6, m)
ans = set()
for i in xrange(m % 2, m + 1, 2):
for comb in combinations(xrange(4), i):
orig = [True] * n
for flip in comb:
if flip == 0:
orig = [not x for x in orig]
elif flip == 1:
orig = [not x if i % 2 == 0 else x for i, x in enumerate(orig)]
elif flip == 2:
orig = [not x if i % 2 == 1 else x for i, x in enumerate(orig)]
elif flip == 3:
orig = [not x if i % 3 == 0 else x for i, x in enumerate(orig)]
ans.add(tuple(orig))
return len(ans)
|
<commit_before><commit_msg>Add py solution for 672. Bulb Switcher II
672. Bulb Switcher II: https://leetcode.com/problems/bulb-switcher-ii/<commit_after>from itertools import combinations, cycle, islice
class Solution(object):
def flipLights(self, n, m):
"""
:type n: int
:type m: int
:rtype: int
"""
if m > 4:
return self.flipLights(n, 4 - m % 2)
if n > 6:
return self.flipLights(6, m)
ans = set()
for i in xrange(m % 2, m + 1, 2):
for comb in combinations(xrange(4), i):
orig = [True] * n
for flip in comb:
if flip == 0:
orig = [not x for x in orig]
elif flip == 1:
orig = [not x if i % 2 == 0 else x for i, x in enumerate(orig)]
elif flip == 2:
orig = [not x if i % 2 == 1 else x for i, x in enumerate(orig)]
elif flip == 3:
orig = [not x if i % 3 == 0 else x for i, x in enumerate(orig)]
ans.add(tuple(orig))
return len(ans)
|
|
d818cabe31867de6b23035b309d15ba0ba12117b
|
dockci/migrations/0005.py
|
dockci/migrations/0005.py
|
"""
Rename "build" models to "job" models
"""
import py.path
import yaml
from yaml import safe_load as yaml_load
builds_path = py.path.local().join('data', 'builds')
jobs_path = py.path.local().join('data', 'jobs')
for project_path in builds_path.listdir():
build_files = project_path.listdir(
lambda filename: filename.fnmatch('*.yaml')
)
for build_file in build_files:
with build_file.open() as handle:
build_data = yaml_load(handle)
try:
build_data['job_stage_slugs'] = build_data.pop('build_stage_slugs')
except KeyError:
pass
else:
with build_file.open('w') as handle:
yaml.dump(build_data, handle, default_flow_style=False)
builds_path.move(jobs_path)
|
Add build -> job migration
|
Add build -> job migration
|
Python
|
isc
|
RickyCook/DockCI,sprucedev/DockCI,RickyCook/DockCI,sprucedev/DockCI,RickyCook/DockCI,sprucedev/DockCI-Agent,RickyCook/DockCI,sprucedev/DockCI-Agent,sprucedev/DockCI,sprucedev/DockCI
|
Add build -> job migration
|
"""
Rename "build" models to "job" models
"""
import py.path
import yaml
from yaml import safe_load as yaml_load
builds_path = py.path.local().join('data', 'builds')
jobs_path = py.path.local().join('data', 'jobs')
for project_path in builds_path.listdir():
build_files = project_path.listdir(
lambda filename: filename.fnmatch('*.yaml')
)
for build_file in build_files:
with build_file.open() as handle:
build_data = yaml_load(handle)
try:
build_data['job_stage_slugs'] = build_data.pop('build_stage_slugs')
except KeyError:
pass
else:
with build_file.open('w') as handle:
yaml.dump(build_data, handle, default_flow_style=False)
builds_path.move(jobs_path)
|
<commit_before><commit_msg>Add build -> job migration<commit_after>
|
"""
Rename "build" models to "job" models
"""
import py.path
import yaml
from yaml import safe_load as yaml_load
builds_path = py.path.local().join('data', 'builds')
jobs_path = py.path.local().join('data', 'jobs')
for project_path in builds_path.listdir():
build_files = project_path.listdir(
lambda filename: filename.fnmatch('*.yaml')
)
for build_file in build_files:
with build_file.open() as handle:
build_data = yaml_load(handle)
try:
build_data['job_stage_slugs'] = build_data.pop('build_stage_slugs')
except KeyError:
pass
else:
with build_file.open('w') as handle:
yaml.dump(build_data, handle, default_flow_style=False)
builds_path.move(jobs_path)
|
Add build -> job migration"""
Rename "build" models to "job" models
"""
import py.path
import yaml
from yaml import safe_load as yaml_load
builds_path = py.path.local().join('data', 'builds')
jobs_path = py.path.local().join('data', 'jobs')
for project_path in builds_path.listdir():
build_files = project_path.listdir(
lambda filename: filename.fnmatch('*.yaml')
)
for build_file in build_files:
with build_file.open() as handle:
build_data = yaml_load(handle)
try:
build_data['job_stage_slugs'] = build_data.pop('build_stage_slugs')
except KeyError:
pass
else:
with build_file.open('w') as handle:
yaml.dump(build_data, handle, default_flow_style=False)
builds_path.move(jobs_path)
|
<commit_before><commit_msg>Add build -> job migration<commit_after>"""
Rename "build" models to "job" models
"""
import py.path
import yaml
from yaml import safe_load as yaml_load
builds_path = py.path.local().join('data', 'builds')
jobs_path = py.path.local().join('data', 'jobs')
for project_path in builds_path.listdir():
build_files = project_path.listdir(
lambda filename: filename.fnmatch('*.yaml')
)
for build_file in build_files:
with build_file.open() as handle:
build_data = yaml_load(handle)
try:
build_data['job_stage_slugs'] = build_data.pop('build_stage_slugs')
except KeyError:
pass
else:
with build_file.open('w') as handle:
yaml.dump(build_data, handle, default_flow_style=False)
builds_path.move(jobs_path)
|
|
da590cef665abf8898ab056bb8048c3541df181f
|
src/server/RemoteFunctionCaller.py
|
src/server/RemoteFunctionCaller.py
|
import time
class TimeoutError(Exception):
pass
class RemoteError(Exception):
pass
class RemoteFunction:
def __init__(self, sender, seq, name, timeout=5, timeoutCb = None):
self.sender = sender
self.seq = seq
self.name = name
self.timeout = timeout
self.timeoutCb = timeoutCb
self.result = None
def __call__(self, *args, **kwargs):
obj = {
"seq": self.seq,
"op": self.name,
"args": args,
"kwargs": kwargs
}
self.sender.send(obj)
start = time.time()
while True:
if self.result:
if "error" in self.result:
raise RemoteError(self.result["error"])
else:
return self.result["result"]
if time.time() - start >= self.timeout:
if self.timeoutCb:
self.timeoutCb(self.seq)
raise TimeoutError
time.sleep(.005)
class RemoteFunctionCaller:
def __init__(self, sender, timeout=5):
self.sender = sender
self.sender.listeners.append(self.dataReceived)
self.timeout = timeout
self.seqnum = 0
self.calls = {}
def functionTimeout(self, seq):
if seq in self.calls:
del self.calls[seq]
def __getattr__(self, name):
# Need to lock on seqnum if used by multiple threads
self.seqnum += 1
rf = RemoteFunction(self.sender, self.seqnum, name,
timeout=self.timeout, timeoutCb=self.functionTimeout)
self.calls[self.seqnum] = rf
return rf
def dataReceived(self, data):
print("Got data", data)
if "seq" in data:
if data["seq"] in self.calls:
self.calls[data["seq"]].result = data
self.calls.remove(data["seq"])
def destroy(self):
self.sender.close()
|
Add class for transparently calling functions over a socket
|
Add class for transparently calling functions over a socket
|
Python
|
mit
|
cnlohr/bridgesim,cnlohr/bridgesim,cnlohr/bridgesim,cnlohr/bridgesim
|
Add class for transparently calling functions over a socket
|
import time
class TimeoutError(Exception):
pass
class RemoteError(Exception):
pass
class RemoteFunction:
def __init__(self, sender, seq, name, timeout=5, timeoutCb = None):
self.sender = sender
self.seq = seq
self.name = name
self.timeout = timeout
self.timeoutCb = timeoutCb
self.result = None
def __call__(self, *args, **kwargs):
obj = {
"seq": self.seq,
"op": self.name,
"args": args,
"kwargs": kwargs
}
self.sender.send(obj)
start = time.time()
while True:
if self.result:
if "error" in self.result:
raise RemoteError(self.result["error"])
else:
return self.result["result"]
if time.time() - start >= self.timeout:
if self.timeoutCb:
self.timeoutCb(self.seq)
raise TimeoutError
time.sleep(.005)
class RemoteFunctionCaller:
def __init__(self, sender, timeout=5):
self.sender = sender
self.sender.listeners.append(self.dataReceived)
self.timeout = timeout
self.seqnum = 0
self.calls = {}
def functionTimeout(self, seq):
if seq in self.calls:
del self.calls[seq]
def __getattr__(self, name):
# Need to lock on seqnum if used by multiple threads
self.seqnum += 1
rf = RemoteFunction(self.sender, self.seqnum, name,
timeout=self.timeout, timeoutCb=self.functionTimeout)
self.calls[self.seqnum] = rf
return rf
def dataReceived(self, data):
print("Got data", data)
if "seq" in data:
if data["seq"] in self.calls:
self.calls[data["seq"]].result = data
self.calls.remove(data["seq"])
def destroy(self):
self.sender.close()
|
<commit_before><commit_msg>Add class for transparently calling functions over a socket<commit_after>
|
import time
class TimeoutError(Exception):
pass
class RemoteError(Exception):
pass
class RemoteFunction:
def __init__(self, sender, seq, name, timeout=5, timeoutCb = None):
self.sender = sender
self.seq = seq
self.name = name
self.timeout = timeout
self.timeoutCb = timeoutCb
self.result = None
def __call__(self, *args, **kwargs):
obj = {
"seq": self.seq,
"op": self.name,
"args": args,
"kwargs": kwargs
}
self.sender.send(obj)
start = time.time()
while True:
if self.result:
if "error" in self.result:
raise RemoteError(self.result["error"])
else:
return self.result["result"]
if time.time() - start >= self.timeout:
if self.timeoutCb:
self.timeoutCb(self.seq)
raise TimeoutError
time.sleep(.005)
class RemoteFunctionCaller:
def __init__(self, sender, timeout=5):
self.sender = sender
self.sender.listeners.append(self.dataReceived)
self.timeout = timeout
self.seqnum = 0
self.calls = {}
def functionTimeout(self, seq):
if seq in self.calls:
del self.calls[seq]
def __getattr__(self, name):
# Need to lock on seqnum if used by multiple threads
self.seqnum += 1
rf = RemoteFunction(self.sender, self.seqnum, name,
timeout=self.timeout, timeoutCb=self.functionTimeout)
self.calls[self.seqnum] = rf
return rf
def dataReceived(self, data):
print("Got data", data)
if "seq" in data:
if data["seq"] in self.calls:
self.calls[data["seq"]].result = data
self.calls.remove(data["seq"])
def destroy(self):
self.sender.close()
|
Add class for transparently calling functions over a socketimport time
class TimeoutError(Exception):
pass
class RemoteError(Exception):
pass
class RemoteFunction:
def __init__(self, sender, seq, name, timeout=5, timeoutCb = None):
self.sender = sender
self.seq = seq
self.name = name
self.timeout = timeout
self.timeoutCb = timeoutCb
self.result = None
def __call__(self, *args, **kwargs):
obj = {
"seq": self.seq,
"op": self.name,
"args": args,
"kwargs": kwargs
}
self.sender.send(obj)
start = time.time()
while True:
if self.result:
if "error" in self.result:
raise RemoteError(self.result["error"])
else:
return self.result["result"]
if time.time() - start >= self.timeout:
if self.timeoutCb:
self.timeoutCb(self.seq)
raise TimeoutError
time.sleep(.005)
class RemoteFunctionCaller:
def __init__(self, sender, timeout=5):
self.sender = sender
self.sender.listeners.append(self.dataReceived)
self.timeout = timeout
self.seqnum = 0
self.calls = {}
def functionTimeout(self, seq):
if seq in self.calls:
del self.calls[seq]
def __getattr__(self, name):
# Need to lock on seqnum if used by multiple threads
self.seqnum += 1
rf = RemoteFunction(self.sender, self.seqnum, name,
timeout=self.timeout, timeoutCb=self.functionTimeout)
self.calls[self.seqnum] = rf
return rf
def dataReceived(self, data):
print("Got data", data)
if "seq" in data:
if data["seq"] in self.calls:
self.calls[data["seq"]].result = data
self.calls.remove(data["seq"])
def destroy(self):
self.sender.close()
|
<commit_before><commit_msg>Add class for transparently calling functions over a socket<commit_after>import time
class TimeoutError(Exception):
pass
class RemoteError(Exception):
pass
class RemoteFunction:
def __init__(self, sender, seq, name, timeout=5, timeoutCb = None):
self.sender = sender
self.seq = seq
self.name = name
self.timeout = timeout
self.timeoutCb = timeoutCb
self.result = None
def __call__(self, *args, **kwargs):
obj = {
"seq": self.seq,
"op": self.name,
"args": args,
"kwargs": kwargs
}
self.sender.send(obj)
start = time.time()
while True:
if self.result:
if "error" in self.result:
raise RemoteError(self.result["error"])
else:
return self.result["result"]
if time.time() - start >= self.timeout:
if self.timeoutCb:
self.timeoutCb(self.seq)
raise TimeoutError
time.sleep(.005)
class RemoteFunctionCaller:
def __init__(self, sender, timeout=5):
self.sender = sender
self.sender.listeners.append(self.dataReceived)
self.timeout = timeout
self.seqnum = 0
self.calls = {}
def functionTimeout(self, seq):
if seq in self.calls:
del self.calls[seq]
def __getattr__(self, name):
# Need to lock on seqnum if used by multiple threads
self.seqnum += 1
rf = RemoteFunction(self.sender, self.seqnum, name,
timeout=self.timeout, timeoutCb=self.functionTimeout)
self.calls[self.seqnum] = rf
return rf
def dataReceived(self, data):
print("Got data", data)
if "seq" in data:
if data["seq"] in self.calls:
self.calls[data["seq"]].result = data
self.calls.remove(data["seq"])
def destroy(self):
self.sender.close()
|
|
2c3c073298d134e13d100f84877c5065c3d60fdd
|
batdisplay.py
|
batdisplay.py
|
#!/usr/bin/python
import os # for instance management
from gi.repository import Gtk, Gdk, GObject
# milliseconds of window getting displayed
DISPLAY_DURATION = 4000
# this will get set in order to keep this obj. alive while
LIVING_ID = False
# path to a temporary file for a pid.
MY_BAT_WARNING_PATH = "/tmp/battery_warning_active.tmp"
WARNING_TEXT = "Idiot, ur battery is dying!"
class WarningDisplay(Gtk.Window):
def __init__(self, xdist, ydist):
Gtk.Window.__init__(self, type=Gtk.WindowType.POPUP)
self.set_border_width(15)
self.set_default_size(300, 70)
self.set_decorated(False)
self.move(xdist, ydist) # distance to top left corner
# little greyish
self.modify_bg(Gtk.StateType.NORMAL, Gdk.color_parse("#222"))
hbox = Gtk.Box(spacing=20)
self.add(hbox)
self.label = Gtk.Label(WARNING_TEXT)
self.label.set_markup(
'<span foreground="#00FFCC" size="medium" face="cursive">' + WARNING_TEXT + '</span>')
hbox.pack_start(self.label, True, True, 0)
def constructWindow():
""" construct a window and place pid file. Note the timeout ID in LIVING_ID global var."""
global LIVING_ID, DISPLAY_DURATION
# add self destruction after DISPLAY_DURATION milliseconds
LIVING_ID = GObject.timeout_add(DISPLAY_DURATION, destroyWindow)
open(MY_BAT_WARNING_PATH, 'w').write(str(os.getpid()))
window = WarningDisplay(50, 125)
window.connect("delete-event", Gtk.main_quit)
window.show_all()
Gtk.main()
def destroyWindow():
""" Rules for destroying the window. Remove pid file and exit gtk main loop. """
os.remove(MY_BAT_WARNING_PATH)
Gtk.main_quit()
if not os.path.exists(MY_BAT_WARNING_PATH):
constructWindow()
else:
pid = int(open(MY_BAT_WARNING_PATH).read())
try:
# check if pid is running:
os.kill(pid, 0)
except OSError:
# is not running, stale temp file found...... however:
constructWindow()
else:
# is running! do nothing
exit()
|
Add gtk window containing some warning text, very similar to former display classes.
|
Add gtk window containing some warning text, very similar to former display classes.
|
Python
|
mit
|
0ortmann/SysDisplays
|
Add gtk window containing some warning text, very similar to former display classes.
|
#!/usr/bin/python
import os # for instance management
from gi.repository import Gtk, Gdk, GObject
# milliseconds of window getting displayed
DISPLAY_DURATION = 4000
# this will get set in order to keep this obj. alive while
LIVING_ID = False
# path to a temporary file for a pid.
MY_BAT_WARNING_PATH = "/tmp/battery_warning_active.tmp"
WARNING_TEXT = "Idiot, ur battery is dying!"
class WarningDisplay(Gtk.Window):
def __init__(self, xdist, ydist):
Gtk.Window.__init__(self, type=Gtk.WindowType.POPUP)
self.set_border_width(15)
self.set_default_size(300, 70)
self.set_decorated(False)
self.move(xdist, ydist) # distance to top left corner
# little greyish
self.modify_bg(Gtk.StateType.NORMAL, Gdk.color_parse("#222"))
hbox = Gtk.Box(spacing=20)
self.add(hbox)
self.label = Gtk.Label(WARNING_TEXT)
self.label.set_markup(
'<span foreground="#00FFCC" size="medium" face="cursive">' + WARNING_TEXT + '</span>')
hbox.pack_start(self.label, True, True, 0)
def constructWindow():
""" construct a window and place pid file. Note the timeout ID in LIVING_ID global var."""
global LIVING_ID, DISPLAY_DURATION
# add self destruction after DISPLAY_DURATION milliseconds
LIVING_ID = GObject.timeout_add(DISPLAY_DURATION, destroyWindow)
open(MY_BAT_WARNING_PATH, 'w').write(str(os.getpid()))
window = WarningDisplay(50, 125)
window.connect("delete-event", Gtk.main_quit)
window.show_all()
Gtk.main()
def destroyWindow():
""" Rules for destroying the window. Remove pid file and exit gtk main loop. """
os.remove(MY_BAT_WARNING_PATH)
Gtk.main_quit()
if not os.path.exists(MY_BAT_WARNING_PATH):
constructWindow()
else:
pid = int(open(MY_BAT_WARNING_PATH).read())
try:
# check if pid is running:
os.kill(pid, 0)
except OSError:
# is not running, stale temp file found...... however:
constructWindow()
else:
# is running! do nothing
exit()
|
<commit_before><commit_msg>Add gtk window containing some warning text, very similar to former display classes.<commit_after>
|
#!/usr/bin/python
import os # for instance management
from gi.repository import Gtk, Gdk, GObject
# milliseconds of window getting displayed
DISPLAY_DURATION = 4000
# this will get set in order to keep this obj. alive while
LIVING_ID = False
# path to a temporary file for a pid.
MY_BAT_WARNING_PATH = "/tmp/battery_warning_active.tmp"
WARNING_TEXT = "Idiot, ur battery is dying!"
class WarningDisplay(Gtk.Window):
def __init__(self, xdist, ydist):
Gtk.Window.__init__(self, type=Gtk.WindowType.POPUP)
self.set_border_width(15)
self.set_default_size(300, 70)
self.set_decorated(False)
self.move(xdist, ydist) # distance to top left corner
# little greyish
self.modify_bg(Gtk.StateType.NORMAL, Gdk.color_parse("#222"))
hbox = Gtk.Box(spacing=20)
self.add(hbox)
self.label = Gtk.Label(WARNING_TEXT)
self.label.set_markup(
'<span foreground="#00FFCC" size="medium" face="cursive">' + WARNING_TEXT + '</span>')
hbox.pack_start(self.label, True, True, 0)
def constructWindow():
""" construct a window and place pid file. Note the timeout ID in LIVING_ID global var."""
global LIVING_ID, DISPLAY_DURATION
# add self destruction after DISPLAY_DURATION milliseconds
LIVING_ID = GObject.timeout_add(DISPLAY_DURATION, destroyWindow)
open(MY_BAT_WARNING_PATH, 'w').write(str(os.getpid()))
window = WarningDisplay(50, 125)
window.connect("delete-event", Gtk.main_quit)
window.show_all()
Gtk.main()
def destroyWindow():
""" Rules for destroying the window. Remove pid file and exit gtk main loop. """
os.remove(MY_BAT_WARNING_PATH)
Gtk.main_quit()
if not os.path.exists(MY_BAT_WARNING_PATH):
constructWindow()
else:
pid = int(open(MY_BAT_WARNING_PATH).read())
try:
# check if pid is running:
os.kill(pid, 0)
except OSError:
# is not running, stale temp file found...... however:
constructWindow()
else:
# is running! do nothing
exit()
|
Add gtk window containing some warning text, very similar to former display classes.#!/usr/bin/python
import os # for instance management
from gi.repository import Gtk, Gdk, GObject
# milliseconds of window getting displayed
DISPLAY_DURATION = 4000
# this will get set in order to keep this obj. alive while
LIVING_ID = False
# path to a temporary file for a pid.
MY_BAT_WARNING_PATH = "/tmp/battery_warning_active.tmp"
WARNING_TEXT = "Idiot, ur battery is dying!"
class WarningDisplay(Gtk.Window):
def __init__(self, xdist, ydist):
Gtk.Window.__init__(self, type=Gtk.WindowType.POPUP)
self.set_border_width(15)
self.set_default_size(300, 70)
self.set_decorated(False)
self.move(xdist, ydist) # distance to top left corner
# little greyish
self.modify_bg(Gtk.StateType.NORMAL, Gdk.color_parse("#222"))
hbox = Gtk.Box(spacing=20)
self.add(hbox)
self.label = Gtk.Label(WARNING_TEXT)
self.label.set_markup(
'<span foreground="#00FFCC" size="medium" face="cursive">' + WARNING_TEXT + '</span>')
hbox.pack_start(self.label, True, True, 0)
def constructWindow():
""" construct a window and place pid file. Note the timeout ID in LIVING_ID global var."""
global LIVING_ID, DISPLAY_DURATION
# add self destruction after DISPLAY_DURATION milliseconds
LIVING_ID = GObject.timeout_add(DISPLAY_DURATION, destroyWindow)
open(MY_BAT_WARNING_PATH, 'w').write(str(os.getpid()))
window = WarningDisplay(50, 125)
window.connect("delete-event", Gtk.main_quit)
window.show_all()
Gtk.main()
def destroyWindow():
""" Rules for destroying the window. Remove pid file and exit gtk main loop. """
os.remove(MY_BAT_WARNING_PATH)
Gtk.main_quit()
if not os.path.exists(MY_BAT_WARNING_PATH):
constructWindow()
else:
pid = int(open(MY_BAT_WARNING_PATH).read())
try:
# check if pid is running:
os.kill(pid, 0)
except OSError:
# is not running, stale temp file found...... however:
constructWindow()
else:
# is running! do nothing
exit()
|
<commit_before><commit_msg>Add gtk window containing some warning text, very similar to former display classes.<commit_after>#!/usr/bin/python
import os # for instance management
from gi.repository import Gtk, Gdk, GObject
# milliseconds of window getting displayed
DISPLAY_DURATION = 4000
# this will get set in order to keep this obj. alive while
LIVING_ID = False
# path to a temporary file for a pid.
MY_BAT_WARNING_PATH = "/tmp/battery_warning_active.tmp"
WARNING_TEXT = "Idiot, ur battery is dying!"
class WarningDisplay(Gtk.Window):
def __init__(self, xdist, ydist):
Gtk.Window.__init__(self, type=Gtk.WindowType.POPUP)
self.set_border_width(15)
self.set_default_size(300, 70)
self.set_decorated(False)
self.move(xdist, ydist) # distance to top left corner
# little greyish
self.modify_bg(Gtk.StateType.NORMAL, Gdk.color_parse("#222"))
hbox = Gtk.Box(spacing=20)
self.add(hbox)
self.label = Gtk.Label(WARNING_TEXT)
self.label.set_markup(
'<span foreground="#00FFCC" size="medium" face="cursive">' + WARNING_TEXT + '</span>')
hbox.pack_start(self.label, True, True, 0)
def constructWindow():
""" construct a window and place pid file. Note the timeout ID in LIVING_ID global var."""
global LIVING_ID, DISPLAY_DURATION
# add self destruction after DISPLAY_DURATION milliseconds
LIVING_ID = GObject.timeout_add(DISPLAY_DURATION, destroyWindow)
open(MY_BAT_WARNING_PATH, 'w').write(str(os.getpid()))
window = WarningDisplay(50, 125)
window.connect("delete-event", Gtk.main_quit)
window.show_all()
Gtk.main()
def destroyWindow():
""" Rules for destroying the window. Remove pid file and exit gtk main loop. """
os.remove(MY_BAT_WARNING_PATH)
Gtk.main_quit()
if not os.path.exists(MY_BAT_WARNING_PATH):
constructWindow()
else:
pid = int(open(MY_BAT_WARNING_PATH).read())
try:
# check if pid is running:
os.kill(pid, 0)
except OSError:
# is not running, stale temp file found...... however:
constructWindow()
else:
# is running! do nothing
exit()
|
|
d960d614be9ddbdad997e57458b4634f591d79ad
|
pony_stable/build-testutils.py
|
pony_stable/build-testutils.py
|
import sys
from base_django import DjangoBuild
from pony_build import client as pony
class TestUtilsBuild(DjangoBuild):
def __init__(self):
super(TestUtilsBuild, self).__init__()
self.repo_url = "git://github.com/ericholscher/django-test-utils"
self.name = "django-test-utils"
self.package_name = 'test_app'
self.required = ['django']
self.installed_apps = ['test_utils', 'test_project.polls', 'test_project.test_app']
def define_commands(self):
self.commands = [ pony.GitClone(self.repo_url),
pony.BuildCommand([self.context.python, 'setup.py', 'install'], name='Install'),
pony.BuildCommand([self.context.djangoadmin, 'syncdb', '--noinput', '--settings', 'django_pony_test_settings'], name='Install'),
pony.TestCommand([self.context.djangoadmin, 'test', self.package_name, '--settings', 'django_pony_test_settings'], name='run tests')
]
if __name__ == '__main__':
build = TestUtilsBuild()
sys.exit(build.execute(sys.argv))
|
Add a build for Django Test Utils.
|
Add a build for Django Test Utils.
|
Python
|
mit
|
ericholscher/pony_barn,ericholscher/pony_barn
|
Add a build for Django Test Utils.
|
import sys
from base_django import DjangoBuild
from pony_build import client as pony
class TestUtilsBuild(DjangoBuild):
def __init__(self):
super(TestUtilsBuild, self).__init__()
self.repo_url = "git://github.com/ericholscher/django-test-utils"
self.name = "django-test-utils"
self.package_name = 'test_app'
self.required = ['django']
self.installed_apps = ['test_utils', 'test_project.polls', 'test_project.test_app']
def define_commands(self):
self.commands = [ pony.GitClone(self.repo_url),
pony.BuildCommand([self.context.python, 'setup.py', 'install'], name='Install'),
pony.BuildCommand([self.context.djangoadmin, 'syncdb', '--noinput', '--settings', 'django_pony_test_settings'], name='Install'),
pony.TestCommand([self.context.djangoadmin, 'test', self.package_name, '--settings', 'django_pony_test_settings'], name='run tests')
]
if __name__ == '__main__':
build = TestUtilsBuild()
sys.exit(build.execute(sys.argv))
|
<commit_before><commit_msg>Add a build for Django Test Utils.<commit_after>
|
import sys
from base_django import DjangoBuild
from pony_build import client as pony
class TestUtilsBuild(DjangoBuild):
def __init__(self):
super(TestUtilsBuild, self).__init__()
self.repo_url = "git://github.com/ericholscher/django-test-utils"
self.name = "django-test-utils"
self.package_name = 'test_app'
self.required = ['django']
self.installed_apps = ['test_utils', 'test_project.polls', 'test_project.test_app']
def define_commands(self):
self.commands = [ pony.GitClone(self.repo_url),
pony.BuildCommand([self.context.python, 'setup.py', 'install'], name='Install'),
pony.BuildCommand([self.context.djangoadmin, 'syncdb', '--noinput', '--settings', 'django_pony_test_settings'], name='Install'),
pony.TestCommand([self.context.djangoadmin, 'test', self.package_name, '--settings', 'django_pony_test_settings'], name='run tests')
]
if __name__ == '__main__':
build = TestUtilsBuild()
sys.exit(build.execute(sys.argv))
|
Add a build for Django Test Utils.import sys
from base_django import DjangoBuild
from pony_build import client as pony
class TestUtilsBuild(DjangoBuild):
def __init__(self):
super(TestUtilsBuild, self).__init__()
self.repo_url = "git://github.com/ericholscher/django-test-utils"
self.name = "django-test-utils"
self.package_name = 'test_app'
self.required = ['django']
self.installed_apps = ['test_utils', 'test_project.polls', 'test_project.test_app']
def define_commands(self):
self.commands = [ pony.GitClone(self.repo_url),
pony.BuildCommand([self.context.python, 'setup.py', 'install'], name='Install'),
pony.BuildCommand([self.context.djangoadmin, 'syncdb', '--noinput', '--settings', 'django_pony_test_settings'], name='Install'),
pony.TestCommand([self.context.djangoadmin, 'test', self.package_name, '--settings', 'django_pony_test_settings'], name='run tests')
]
if __name__ == '__main__':
build = TestUtilsBuild()
sys.exit(build.execute(sys.argv))
|
<commit_before><commit_msg>Add a build for Django Test Utils.<commit_after>import sys
from base_django import DjangoBuild
from pony_build import client as pony
class TestUtilsBuild(DjangoBuild):
def __init__(self):
super(TestUtilsBuild, self).__init__()
self.repo_url = "git://github.com/ericholscher/django-test-utils"
self.name = "django-test-utils"
self.package_name = 'test_app'
self.required = ['django']
self.installed_apps = ['test_utils', 'test_project.polls', 'test_project.test_app']
def define_commands(self):
self.commands = [ pony.GitClone(self.repo_url),
pony.BuildCommand([self.context.python, 'setup.py', 'install'], name='Install'),
pony.BuildCommand([self.context.djangoadmin, 'syncdb', '--noinput', '--settings', 'django_pony_test_settings'], name='Install'),
pony.TestCommand([self.context.djangoadmin, 'test', self.package_name, '--settings', 'django_pony_test_settings'], name='run tests')
]
if __name__ == '__main__':
build = TestUtilsBuild()
sys.exit(build.execute(sys.argv))
|
|
59db12020433e4787555cfeccaeae65f872f59b6
|
tests/test_html_formatter.py
|
tests/test_html_formatter.py
|
# -*- coding: utf-8 -*-
"""
Pygments HTML formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2006 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import unittest
import StringIO
import random
from pygments import lexers, formatters
from pygments.token import _TokenType
class HtmlFormatterTest(unittest.TestCase):
def test_external_css(self):
# TODO: write this test.
pass
|
Add a reminder to write a HTML formatter test.
|
[svn] Add a reminder to write a HTML formatter test.
|
Python
|
bsd-2-clause
|
tomstuart/pygments,spencerlyon2/pygments,erickt/pygments,tomstuart/pygments,erickt/pygments,tomstuart/pygments,spencerlyon2/pygments,tomstuart/pygments,spencerlyon2/pygments,erickt/pygments,tomstuart/pygments,erickt/pygments,erickt/pygments,erickt/pygments,tomstuart/pygments,erickt/pygments,erickt/pygments,erickt/pygments,spencerlyon2/pygments,tomstuart/pygments,tomstuart/pygments,spencerlyon2/pygments,spencerlyon2/pygments,erickt/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,erickt/pygments,tomstuart/pygments,spencerlyon2/pygments,spencerlyon2/pygments,erickt/pygments,tomstuart/pygments,tomstuart/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,tomstuart/pygments,tomstuart/pygments,tomstuart/pygments,tomstuart/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments
|
[svn] Add a reminder to write a HTML formatter test.
|
# -*- coding: utf-8 -*-
"""
Pygments HTML formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2006 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import unittest
import StringIO
import random
from pygments import lexers, formatters
from pygments.token import _TokenType
class HtmlFormatterTest(unittest.TestCase):
def test_external_css(self):
# TODO: write this test.
pass
|
<commit_before><commit_msg>[svn] Add a reminder to write a HTML formatter test.<commit_after>
|
# -*- coding: utf-8 -*-
"""
Pygments HTML formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2006 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import unittest
import StringIO
import random
from pygments import lexers, formatters
from pygments.token import _TokenType
class HtmlFormatterTest(unittest.TestCase):
def test_external_css(self):
# TODO: write this test.
pass
|
[svn] Add a reminder to write a HTML formatter test.# -*- coding: utf-8 -*-
"""
Pygments HTML formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2006 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import unittest
import StringIO
import random
from pygments import lexers, formatters
from pygments.token import _TokenType
class HtmlFormatterTest(unittest.TestCase):
def test_external_css(self):
# TODO: write this test.
pass
|
<commit_before><commit_msg>[svn] Add a reminder to write a HTML formatter test.<commit_after># -*- coding: utf-8 -*-
"""
Pygments HTML formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2006 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import unittest
import StringIO
import random
from pygments import lexers, formatters
from pygments.token import _TokenType
class HtmlFormatterTest(unittest.TestCase):
def test_external_css(self):
# TODO: write this test.
pass
|
|
fe4bf332628b104865ca79a218800c0182c9faaa
|
teuthology/test/test_lock.py
|
teuthology/test/test_lock.py
|
from .. import lock
class TestLock(object):
def test_canonicalize_hostname(self):
host_base = 'box1'
result = lock.canonicalize_hostname(host_base)
assert result == 'ubuntu@box1.front.sepia.ceph.com'
def test_decanonicalize_hostname(self):
host = 'ubuntu@box1.front.sepia.ceph.com'
result = lock.decanonicalize_hostname(host)
assert result == 'box1'
|
Add unit tests for (de)canonicalize_hostname()
|
Add unit tests for (de)canonicalize_hostname()
Signed-off-by: Zack Cerza <f801c831581d4150a2793939287636221d62131e@inktank.com>
|
Python
|
mit
|
dmick/teuthology,caibo2014/teuthology,ivotron/teuthology,ceph/teuthology,tchaikov/teuthology,michaelsevilla/teuthology,dreamhost/teuthology,yghannam/teuthology,SUSE/teuthology,ktdreyer/teuthology,t-miyamae/teuthology,t-miyamae/teuthology,SUSE/teuthology,robbat2/teuthology,SUSE/teuthology,robbat2/teuthology,tchaikov/teuthology,ktdreyer/teuthology,caibo2014/teuthology,ivotron/teuthology,michaelsevilla/teuthology,dreamhost/teuthology,ceph/teuthology,zhouyuan/teuthology,dmick/teuthology,yghannam/teuthology,dmick/teuthology,zhouyuan/teuthology
|
Add unit tests for (de)canonicalize_hostname()
Signed-off-by: Zack Cerza <f801c831581d4150a2793939287636221d62131e@inktank.com>
|
from .. import lock
class TestLock(object):
def test_canonicalize_hostname(self):
host_base = 'box1'
result = lock.canonicalize_hostname(host_base)
assert result == 'ubuntu@box1.front.sepia.ceph.com'
def test_decanonicalize_hostname(self):
host = 'ubuntu@box1.front.sepia.ceph.com'
result = lock.decanonicalize_hostname(host)
assert result == 'box1'
|
<commit_before><commit_msg>Add unit tests for (de)canonicalize_hostname()
Signed-off-by: Zack Cerza <f801c831581d4150a2793939287636221d62131e@inktank.com><commit_after>
|
from .. import lock
class TestLock(object):
def test_canonicalize_hostname(self):
host_base = 'box1'
result = lock.canonicalize_hostname(host_base)
assert result == 'ubuntu@box1.front.sepia.ceph.com'
def test_decanonicalize_hostname(self):
host = 'ubuntu@box1.front.sepia.ceph.com'
result = lock.decanonicalize_hostname(host)
assert result == 'box1'
|
Add unit tests for (de)canonicalize_hostname()
Signed-off-by: Zack Cerza <f801c831581d4150a2793939287636221d62131e@inktank.com>from .. import lock
class TestLock(object):
def test_canonicalize_hostname(self):
host_base = 'box1'
result = lock.canonicalize_hostname(host_base)
assert result == 'ubuntu@box1.front.sepia.ceph.com'
def test_decanonicalize_hostname(self):
host = 'ubuntu@box1.front.sepia.ceph.com'
result = lock.decanonicalize_hostname(host)
assert result == 'box1'
|
<commit_before><commit_msg>Add unit tests for (de)canonicalize_hostname()
Signed-off-by: Zack Cerza <f801c831581d4150a2793939287636221d62131e@inktank.com><commit_after>from .. import lock
class TestLock(object):
def test_canonicalize_hostname(self):
host_base = 'box1'
result = lock.canonicalize_hostname(host_base)
assert result == 'ubuntu@box1.front.sepia.ceph.com'
def test_decanonicalize_hostname(self):
host = 'ubuntu@box1.front.sepia.ceph.com'
result = lock.decanonicalize_hostname(host)
assert result == 'box1'
|
|
f782fe44d7a27e01a7529922040768a657432a4b
|
app.py
|
app.py
|
from flask import Flask, jsonify, Response
app = Flask(__name__)
@app.route('/')
def api_root():
message = {
'status': 200,
'message': 'Hello World'
}
resp = jsonify(message)
resp.status_code = 200
return resp
@app.errorhandler(404)
def not_found(error=None):
message = {
'status': 404,
'message': 'Not Found'
}
resp = jsonify(message)
resp.status_code = 404
return resp
if __name__ == '__main__':
app.run(debug=True)
|
Add basic Hello World example
|
Add basic Hello World example
|
Python
|
apache-2.0
|
apache/cloudstack-gcestack
|
Add basic Hello World example
|
from flask import Flask, jsonify, Response
app = Flask(__name__)
@app.route('/')
def api_root():
message = {
'status': 200,
'message': 'Hello World'
}
resp = jsonify(message)
resp.status_code = 200
return resp
@app.errorhandler(404)
def not_found(error=None):
message = {
'status': 404,
'message': 'Not Found'
}
resp = jsonify(message)
resp.status_code = 404
return resp
if __name__ == '__main__':
app.run(debug=True)
|
<commit_before><commit_msg>Add basic Hello World example<commit_after>
|
from flask import Flask, jsonify, Response
app = Flask(__name__)
@app.route('/')
def api_root():
message = {
'status': 200,
'message': 'Hello World'
}
resp = jsonify(message)
resp.status_code = 200
return resp
@app.errorhandler(404)
def not_found(error=None):
message = {
'status': 404,
'message': 'Not Found'
}
resp = jsonify(message)
resp.status_code = 404
return resp
if __name__ == '__main__':
app.run(debug=True)
|
Add basic Hello World examplefrom flask import Flask, jsonify, Response
app = Flask(__name__)
@app.route('/')
def api_root():
message = {
'status': 200,
'message': 'Hello World'
}
resp = jsonify(message)
resp.status_code = 200
return resp
@app.errorhandler(404)
def not_found(error=None):
message = {
'status': 404,
'message': 'Not Found'
}
resp = jsonify(message)
resp.status_code = 404
return resp
if __name__ == '__main__':
app.run(debug=True)
|
<commit_before><commit_msg>Add basic Hello World example<commit_after>from flask import Flask, jsonify, Response
app = Flask(__name__)
@app.route('/')
def api_root():
message = {
'status': 200,
'message': 'Hello World'
}
resp = jsonify(message)
resp.status_code = 200
return resp
@app.errorhandler(404)
def not_found(error=None):
message = {
'status': 404,
'message': 'Not Found'
}
resp = jsonify(message)
resp.status_code = 404
return resp
if __name__ == '__main__':
app.run(debug=True)
|
|
cfb190ea86ea64f23a882a6e8b4212388c351d21
|
scripts/clean_array.py
|
scripts/clean_array.py
|
import glob
import os
import shutil
import sys
ample_root="/home/jmht/ample-dev1"
sys.path.insert(0,os.path.join(ample_root,"python"))
import clusterize
pdb_codes=["1MIX", "1P9G", "1UCS", "1XKR", "2BL2", "2EFR", "2FM9", "2JKU", "2QIH", "2QSK", "2UUI", "2XFD", "2YKT", "3CI9", "3CVF", "3GD8", "3GHF", "3HAP", "3HFE"]
#pdb_codes=["1MIX"]
root="/data2/jmht/testset/thresh"
for pdb in pdb_codes:
mrbd=os.path.join(root,pdb,"AMPLE_0","MRBUMP")
os.chdir(mrbd)
jobsFile = os.path.abspath(os.path.join(mrbd,"array.jobs"))
#arrayScript = os.path.abspath(os.path.join(mrbd,"array.script"))
if os.path.isfile(jobsFile): clusterize.ClusterRun().cleanUpArrayJob(jobsFile)
|
CLean up crashed array jobs
|
CLean up crashed array jobs
|
Python
|
bsd-3-clause
|
rigdenlab/ample,rigdenlab/ample,linucks/ample,linucks/ample
|
CLean up crashed array jobs
|
import glob
import os
import shutil
import sys
ample_root="/home/jmht/ample-dev1"
sys.path.insert(0,os.path.join(ample_root,"python"))
import clusterize
pdb_codes=["1MIX", "1P9G", "1UCS", "1XKR", "2BL2", "2EFR", "2FM9", "2JKU", "2QIH", "2QSK", "2UUI", "2XFD", "2YKT", "3CI9", "3CVF", "3GD8", "3GHF", "3HAP", "3HFE"]
#pdb_codes=["1MIX"]
root="/data2/jmht/testset/thresh"
for pdb in pdb_codes:
mrbd=os.path.join(root,pdb,"AMPLE_0","MRBUMP")
os.chdir(mrbd)
jobsFile = os.path.abspath(os.path.join(mrbd,"array.jobs"))
#arrayScript = os.path.abspath(os.path.join(mrbd,"array.script"))
if os.path.isfile(jobsFile): clusterize.ClusterRun().cleanUpArrayJob(jobsFile)
|
<commit_before><commit_msg>CLean up crashed array jobs<commit_after>
|
import glob
import os
import shutil
import sys
ample_root="/home/jmht/ample-dev1"
sys.path.insert(0,os.path.join(ample_root,"python"))
import clusterize
pdb_codes=["1MIX", "1P9G", "1UCS", "1XKR", "2BL2", "2EFR", "2FM9", "2JKU", "2QIH", "2QSK", "2UUI", "2XFD", "2YKT", "3CI9", "3CVF", "3GD8", "3GHF", "3HAP", "3HFE"]
#pdb_codes=["1MIX"]
root="/data2/jmht/testset/thresh"
for pdb in pdb_codes:
mrbd=os.path.join(root,pdb,"AMPLE_0","MRBUMP")
os.chdir(mrbd)
jobsFile = os.path.abspath(os.path.join(mrbd,"array.jobs"))
#arrayScript = os.path.abspath(os.path.join(mrbd,"array.script"))
if os.path.isfile(jobsFile): clusterize.ClusterRun().cleanUpArrayJob(jobsFile)
|
CLean up crashed array jobsimport glob
import os
import shutil
import sys
ample_root="/home/jmht/ample-dev1"
sys.path.insert(0,os.path.join(ample_root,"python"))
import clusterize
pdb_codes=["1MIX", "1P9G", "1UCS", "1XKR", "2BL2", "2EFR", "2FM9", "2JKU", "2QIH", "2QSK", "2UUI", "2XFD", "2YKT", "3CI9", "3CVF", "3GD8", "3GHF", "3HAP", "3HFE"]
#pdb_codes=["1MIX"]
root="/data2/jmht/testset/thresh"
for pdb in pdb_codes:
mrbd=os.path.join(root,pdb,"AMPLE_0","MRBUMP")
os.chdir(mrbd)
jobsFile = os.path.abspath(os.path.join(mrbd,"array.jobs"))
#arrayScript = os.path.abspath(os.path.join(mrbd,"array.script"))
if os.path.isfile(jobsFile): clusterize.ClusterRun().cleanUpArrayJob(jobsFile)
|
<commit_before><commit_msg>CLean up crashed array jobs<commit_after>import glob
import os
import shutil
import sys
ample_root="/home/jmht/ample-dev1"
sys.path.insert(0,os.path.join(ample_root,"python"))
import clusterize
pdb_codes=["1MIX", "1P9G", "1UCS", "1XKR", "2BL2", "2EFR", "2FM9", "2JKU", "2QIH", "2QSK", "2UUI", "2XFD", "2YKT", "3CI9", "3CVF", "3GD8", "3GHF", "3HAP", "3HFE"]
#pdb_codes=["1MIX"]
root="/data2/jmht/testset/thresh"
for pdb in pdb_codes:
mrbd=os.path.join(root,pdb,"AMPLE_0","MRBUMP")
os.chdir(mrbd)
jobsFile = os.path.abspath(os.path.join(mrbd,"array.jobs"))
#arrayScript = os.path.abspath(os.path.join(mrbd,"array.script"))
if os.path.isfile(jobsFile): clusterize.ClusterRun().cleanUpArrayJob(jobsFile)
|
|
dfd4396d616c07b69be4acb1f974bf5b4c3e4ffb
|
scripts/create-user.py
|
scripts/create-user.py
|
#!/usr/bin/python
# This is a small helper script to create a CATMAID user.
# You may need to install psycopg2, e.g. with:
# sudo apt-get install python-psycopg2
# Requires the file .catmaid-db to be present in your
# home directory, with the following format:
#
# host: localhost
# database: catmaid
# username: catmaid_user
# password: password_of_your_catmaid_user
import sys
import os
from common import db_connection
from subprocess import check_call
import getpass
from psycopg2 import IntegrityError
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: create-project.py <USERNAME> <LONG-NAME>"
sys.exit(1)
username = sys.argv[1]
full_name = sys.argv[2]
# Now get a password from the user:
p1 = getpass.getpass()
p2 = getpass.getpass("Confirm passsword: ")
if p1 != p2:
print >> sys.stderr, "The passwords didn't match."
sys.exit(2)
c = db_connection.cursor()
try:
c.execute('INSERT INTO "user" (name, pwd, longname) VALUES (%s, md5(%s), %s) RETURNING id',
(username, p1, full_name))
except IntegrityError, e:
print >> sys.stderr, "There is already a user called '%s'" % (username,)
sys.exit(3)
user_id = c.fetchone()[0]
print "Created the user '%s' with ID: %d" % (username, user_id)
db_connection.commit()
c.close()
db_connection.close()
|
Add a script for creating a new user
|
Add a script for creating a new user
|
Python
|
agpl-3.0
|
htem/CATMAID,fzadow/CATMAID,fzadow/CATMAID,fzadow/CATMAID,fzadow/CATMAID,htem/CATMAID,htem/CATMAID,htem/CATMAID
|
Add a script for creating a new user
|
#!/usr/bin/python
# This is a small helper script to create a CATMAID user.
# You may need to install psycopg2, e.g. with:
# sudo apt-get install python-psycopg2
# Requires the file .catmaid-db to be present in your
# home directory, with the following format:
#
# host: localhost
# database: catmaid
# username: catmaid_user
# password: password_of_your_catmaid_user
import sys
import os
from common import db_connection
from subprocess import check_call
import getpass
from psycopg2 import IntegrityError
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: create-project.py <USERNAME> <LONG-NAME>"
sys.exit(1)
username = sys.argv[1]
full_name = sys.argv[2]
# Now get a password from the user:
p1 = getpass.getpass()
p2 = getpass.getpass("Confirm passsword: ")
if p1 != p2:
print >> sys.stderr, "The passwords didn't match."
sys.exit(2)
c = db_connection.cursor()
try:
c.execute('INSERT INTO "user" (name, pwd, longname) VALUES (%s, md5(%s), %s) RETURNING id',
(username, p1, full_name))
except IntegrityError, e:
print >> sys.stderr, "There is already a user called '%s'" % (username,)
sys.exit(3)
user_id = c.fetchone()[0]
print "Created the user '%s' with ID: %d" % (username, user_id)
db_connection.commit()
c.close()
db_connection.close()
|
<commit_before><commit_msg>Add a script for creating a new user<commit_after>
|
#!/usr/bin/python
# This is a small helper script to create a CATMAID user.
# You may need to install psycopg2, e.g. with:
# sudo apt-get install python-psycopg2
# Requires the file .catmaid-db to be present in your
# home directory, with the following format:
#
# host: localhost
# database: catmaid
# username: catmaid_user
# password: password_of_your_catmaid_user
import sys
import os
from common import db_connection
from subprocess import check_call
import getpass
from psycopg2 import IntegrityError
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: create-project.py <USERNAME> <LONG-NAME>"
sys.exit(1)
username = sys.argv[1]
full_name = sys.argv[2]
# Now get a password from the user:
p1 = getpass.getpass()
p2 = getpass.getpass("Confirm passsword: ")
if p1 != p2:
print >> sys.stderr, "The passwords didn't match."
sys.exit(2)
c = db_connection.cursor()
try:
c.execute('INSERT INTO "user" (name, pwd, longname) VALUES (%s, md5(%s), %s) RETURNING id',
(username, p1, full_name))
except IntegrityError, e:
print >> sys.stderr, "There is already a user called '%s'" % (username,)
sys.exit(3)
user_id = c.fetchone()[0]
print "Created the user '%s' with ID: %d" % (username, user_id)
db_connection.commit()
c.close()
db_connection.close()
|
Add a script for creating a new user#!/usr/bin/python
# This is a small helper script to create a CATMAID user.
# You may need to install psycopg2, e.g. with:
# sudo apt-get install python-psycopg2
# Requires the file .catmaid-db to be present in your
# home directory, with the following format:
#
# host: localhost
# database: catmaid
# username: catmaid_user
# password: password_of_your_catmaid_user
import sys
import os
from common import db_connection
from subprocess import check_call
import getpass
from psycopg2 import IntegrityError
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: create-project.py <USERNAME> <LONG-NAME>"
sys.exit(1)
username = sys.argv[1]
full_name = sys.argv[2]
# Now get a password from the user:
p1 = getpass.getpass()
p2 = getpass.getpass("Confirm passsword: ")
if p1 != p2:
print >> sys.stderr, "The passwords didn't match."
sys.exit(2)
c = db_connection.cursor()
try:
c.execute('INSERT INTO "user" (name, pwd, longname) VALUES (%s, md5(%s), %s) RETURNING id',
(username, p1, full_name))
except IntegrityError, e:
print >> sys.stderr, "There is already a user called '%s'" % (username,)
sys.exit(3)
user_id = c.fetchone()[0]
print "Created the user '%s' with ID: %d" % (username, user_id)
db_connection.commit()
c.close()
db_connection.close()
|
<commit_before><commit_msg>Add a script for creating a new user<commit_after>#!/usr/bin/python
# This is a small helper script to create a CATMAID user.
# You may need to install psycopg2, e.g. with:
# sudo apt-get install python-psycopg2
# Requires the file .catmaid-db to be present in your
# home directory, with the following format:
#
# host: localhost
# database: catmaid
# username: catmaid_user
# password: password_of_your_catmaid_user
import sys
import os
from common import db_connection
from subprocess import check_call
import getpass
from psycopg2 import IntegrityError
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: create-project.py <USERNAME> <LONG-NAME>"
sys.exit(1)
username = sys.argv[1]
full_name = sys.argv[2]
# Now get a password from the user:
p1 = getpass.getpass()
p2 = getpass.getpass("Confirm passsword: ")
if p1 != p2:
print >> sys.stderr, "The passwords didn't match."
sys.exit(2)
c = db_connection.cursor()
try:
c.execute('INSERT INTO "user" (name, pwd, longname) VALUES (%s, md5(%s), %s) RETURNING id',
(username, p1, full_name))
except IntegrityError, e:
print >> sys.stderr, "There is already a user called '%s'" % (username,)
sys.exit(3)
user_id = c.fetchone()[0]
print "Created the user '%s' with ID: %d" % (username, user_id)
db_connection.commit()
c.close()
db_connection.close()
|
|
eb5f4c24965a60c36656118da92aeb0ca9736608
|
altair/examples/airport_connections.py
|
altair/examples/airport_connections.py
|
"""
Connections Among U.S. Airports Interactive
-------------------------------------------
This example shows all the connections between major U.S. airports. Lookup transformations
are used to find the coordinates of each airport and connecting airports. Connections
are displayed on mouseover via a single selection.
"""
# category: case studies
import altair as alt
from vega_datasets import data
# Since these data are each more than 5,000 rows we'll import from the URLs
airports = data.airports.url
flights_airport = data.flights_airport.url
states = alt.topo_feature(data.us_10m.url, feature="states")
# Create mouseover selection
select_city = alt.selection_single(
on="mouseover", nearest=True, fields=["origin"], empty="none"
)
# Define which attributes to lookup from airports.csv
lookup_data = alt.LookupData(
airports, key="iata", fields=["state", "latitude", "longitude"]
)
background = alt.Chart(states).mark_geoshape(
fill="lightgray",
stroke="white"
).properties(
width=750,
height=500
).project("albersUsa")
connections = alt.Chart(flights_airport).mark_rule(opacity=0.35).encode(
latitude="latitude:Q",
longitude="longitude:Q",
latitude2="lat2:Q",
longitude2="lon2:Q"
).transform_lookup(
lookup="origin",
from_=lookup_data
).transform_lookup(
lookup="destination",
from_=lookup_data,
as_=["state", "lat2", "lon2"]
).transform_filter(
select_city
)
points = alt.Chart(flights_airport).mark_circle().encode(
latitude="latitude:Q",
longitude="longitude:Q",
size=alt.Size("routes:Q", scale=alt.Scale(range=[0, 1000]), legend=None),
order=alt.Order("routes:Q", sort="descending")
).transform_aggregate(
routes="count()",
groupby=["origin"]
).transform_lookup(
lookup="origin",
from_=lookup_data
).transform_filter(
(alt.datum.state != "PR") & (alt.datum.state != "VI")
).add_selection(
select_city
)
(background + connections + points).configure_view(stroke=None)
|
Add example for Connections Among U.S. Airports Interactive
|
DOC: Add example for Connections Among U.S. Airports Interactive
|
Python
|
bsd-3-clause
|
jakevdp/altair,altair-viz/altair
|
DOC: Add example for Connections Among U.S. Airports Interactive
|
"""
Connections Among U.S. Airports Interactive
-------------------------------------------
This example shows all the connections between major U.S. airports. Lookup transformations
are used to find the coordinates of each airport and connecting airports. Connections
are displayed on mouseover via a single selection.
"""
# category: case studies
import altair as alt
from vega_datasets import data
# Since these data are each more than 5,000 rows we'll import from the URLs
airports = data.airports.url
flights_airport = data.flights_airport.url
states = alt.topo_feature(data.us_10m.url, feature="states")
# Create mouseover selection
select_city = alt.selection_single(
on="mouseover", nearest=True, fields=["origin"], empty="none"
)
# Define which attributes to lookup from airports.csv
lookup_data = alt.LookupData(
airports, key="iata", fields=["state", "latitude", "longitude"]
)
background = alt.Chart(states).mark_geoshape(
fill="lightgray",
stroke="white"
).properties(
width=750,
height=500
).project("albersUsa")
connections = alt.Chart(flights_airport).mark_rule(opacity=0.35).encode(
latitude="latitude:Q",
longitude="longitude:Q",
latitude2="lat2:Q",
longitude2="lon2:Q"
).transform_lookup(
lookup="origin",
from_=lookup_data
).transform_lookup(
lookup="destination",
from_=lookup_data,
as_=["state", "lat2", "lon2"]
).transform_filter(
select_city
)
points = alt.Chart(flights_airport).mark_circle().encode(
latitude="latitude:Q",
longitude="longitude:Q",
size=alt.Size("routes:Q", scale=alt.Scale(range=[0, 1000]), legend=None),
order=alt.Order("routes:Q", sort="descending")
).transform_aggregate(
routes="count()",
groupby=["origin"]
).transform_lookup(
lookup="origin",
from_=lookup_data
).transform_filter(
(alt.datum.state != "PR") & (alt.datum.state != "VI")
).add_selection(
select_city
)
(background + connections + points).configure_view(stroke=None)
|
<commit_before><commit_msg>DOC: Add example for Connections Among U.S. Airports Interactive<commit_after>
|
"""
Connections Among U.S. Airports Interactive
-------------------------------------------
This example shows all the connections between major U.S. airports. Lookup transformations
are used to find the coordinates of each airport and connecting airports. Connections
are displayed on mouseover via a single selection.
"""
# category: case studies
import altair as alt
from vega_datasets import data
# Since these data are each more than 5,000 rows we'll import from the URLs
airports = data.airports.url
flights_airport = data.flights_airport.url
states = alt.topo_feature(data.us_10m.url, feature="states")
# Create mouseover selection
select_city = alt.selection_single(
on="mouseover", nearest=True, fields=["origin"], empty="none"
)
# Define which attributes to lookup from airports.csv
lookup_data = alt.LookupData(
airports, key="iata", fields=["state", "latitude", "longitude"]
)
background = alt.Chart(states).mark_geoshape(
fill="lightgray",
stroke="white"
).properties(
width=750,
height=500
).project("albersUsa")
connections = alt.Chart(flights_airport).mark_rule(opacity=0.35).encode(
latitude="latitude:Q",
longitude="longitude:Q",
latitude2="lat2:Q",
longitude2="lon2:Q"
).transform_lookup(
lookup="origin",
from_=lookup_data
).transform_lookup(
lookup="destination",
from_=lookup_data,
as_=["state", "lat2", "lon2"]
).transform_filter(
select_city
)
points = alt.Chart(flights_airport).mark_circle().encode(
latitude="latitude:Q",
longitude="longitude:Q",
size=alt.Size("routes:Q", scale=alt.Scale(range=[0, 1000]), legend=None),
order=alt.Order("routes:Q", sort="descending")
).transform_aggregate(
routes="count()",
groupby=["origin"]
).transform_lookup(
lookup="origin",
from_=lookup_data
).transform_filter(
(alt.datum.state != "PR") & (alt.datum.state != "VI")
).add_selection(
select_city
)
(background + connections + points).configure_view(stroke=None)
|
DOC: Add example for Connections Among U.S. Airports Interactive"""
Connections Among U.S. Airports Interactive
-------------------------------------------
This example shows all the connections between major U.S. airports. Lookup transformations
are used to find the coordinates of each airport and connecting airports. Connections
are displayed on mouseover via a single selection.
"""
# category: case studies
import altair as alt
from vega_datasets import data
# Since these data are each more than 5,000 rows we'll import from the URLs
airports = data.airports.url
flights_airport = data.flights_airport.url
states = alt.topo_feature(data.us_10m.url, feature="states")
# Create mouseover selection
select_city = alt.selection_single(
on="mouseover", nearest=True, fields=["origin"], empty="none"
)
# Define which attributes to lookup from airports.csv
lookup_data = alt.LookupData(
airports, key="iata", fields=["state", "latitude", "longitude"]
)
background = alt.Chart(states).mark_geoshape(
fill="lightgray",
stroke="white"
).properties(
width=750,
height=500
).project("albersUsa")
connections = alt.Chart(flights_airport).mark_rule(opacity=0.35).encode(
latitude="latitude:Q",
longitude="longitude:Q",
latitude2="lat2:Q",
longitude2="lon2:Q"
).transform_lookup(
lookup="origin",
from_=lookup_data
).transform_lookup(
lookup="destination",
from_=lookup_data,
as_=["state", "lat2", "lon2"]
).transform_filter(
select_city
)
points = alt.Chart(flights_airport).mark_circle().encode(
latitude="latitude:Q",
longitude="longitude:Q",
size=alt.Size("routes:Q", scale=alt.Scale(range=[0, 1000]), legend=None),
order=alt.Order("routes:Q", sort="descending")
).transform_aggregate(
routes="count()",
groupby=["origin"]
).transform_lookup(
lookup="origin",
from_=lookup_data
).transform_filter(
(alt.datum.state != "PR") & (alt.datum.state != "VI")
).add_selection(
select_city
)
(background + connections + points).configure_view(stroke=None)
|
<commit_before><commit_msg>DOC: Add example for Connections Among U.S. Airports Interactive<commit_after>"""
Connections Among U.S. Airports Interactive
-------------------------------------------
This example shows all the connections between major U.S. airports. Lookup transformations
are used to find the coordinates of each airport and connecting airports. Connections
are displayed on mouseover via a single selection.
"""
# category: case studies
import altair as alt
from vega_datasets import data
# Since these data are each more than 5,000 rows we'll import from the URLs
airports = data.airports.url
flights_airport = data.flights_airport.url
states = alt.topo_feature(data.us_10m.url, feature="states")
# Create mouseover selection
select_city = alt.selection_single(
on="mouseover", nearest=True, fields=["origin"], empty="none"
)
# Define which attributes to lookup from airports.csv
lookup_data = alt.LookupData(
airports, key="iata", fields=["state", "latitude", "longitude"]
)
background = alt.Chart(states).mark_geoshape(
fill="lightgray",
stroke="white"
).properties(
width=750,
height=500
).project("albersUsa")
connections = alt.Chart(flights_airport).mark_rule(opacity=0.35).encode(
latitude="latitude:Q",
longitude="longitude:Q",
latitude2="lat2:Q",
longitude2="lon2:Q"
).transform_lookup(
lookup="origin",
from_=lookup_data
).transform_lookup(
lookup="destination",
from_=lookup_data,
as_=["state", "lat2", "lon2"]
).transform_filter(
select_city
)
points = alt.Chart(flights_airport).mark_circle().encode(
latitude="latitude:Q",
longitude="longitude:Q",
size=alt.Size("routes:Q", scale=alt.Scale(range=[0, 1000]), legend=None),
order=alt.Order("routes:Q", sort="descending")
).transform_aggregate(
routes="count()",
groupby=["origin"]
).transform_lookup(
lookup="origin",
from_=lookup_data
).transform_filter(
(alt.datum.state != "PR") & (alt.datum.state != "VI")
).add_selection(
select_city
)
(background + connections + points).configure_view(stroke=None)
|
|
e956e2258037e698adcf86a57085bdb2aa7576e5
|
competitions/urls.py
|
competitions/urls.py
|
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from .views import CompetitionRegistrationView, SeasonDetailView
urlpatterns = patterns('',
url(r'^$', CompetitionRegistrationView.as_view(),
name='competitions_competition_register'),
url(r'^registration/successful/', TemplateView.as_view(
template_name='competitions/competition_registration_successful.html'),
name='competitions_competition_register_success'),
url(r'^series/(?P<pk>\d+)$', SeasonDetailView.as_view(),
name='competitions_season_detail'),
)
|
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from .views import CompetitionRegistrationView, SeasonDetailView
urlpatterns = patterns('',
url(r'^$', CompetitionRegistrationView.as_view(),
name='competitions_competition_register'),
url(r'^registration/successful/', TemplateView.as_view(
template_name='competitions/competition_registration_successful.html'),
name='competitions_competition_register_success'),
url(r'^season/(?P<pk>\d+)$', SeasonDetailView.as_view(),
name='competitions_season_detail'),
)
|
Correct url of the SeasonDetailView
|
competitions: Correct url of the SeasonDetailView
|
Python
|
mit
|
rtrembecky/roots,rtrembecky/roots,tbabej/roots,matus-stehlik/roots,matus-stehlik/roots,matus-stehlik/roots,matus-stehlik/glowing-batman,tbabej/roots,tbabej/roots,matus-stehlik/glowing-batman,rtrembecky/roots
|
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from .views import CompetitionRegistrationView, SeasonDetailView
urlpatterns = patterns('',
url(r'^$', CompetitionRegistrationView.as_view(),
name='competitions_competition_register'),
url(r'^registration/successful/', TemplateView.as_view(
template_name='competitions/competition_registration_successful.html'),
name='competitions_competition_register_success'),
url(r'^series/(?P<pk>\d+)$', SeasonDetailView.as_view(),
name='competitions_season_detail'),
)
competitions: Correct url of the SeasonDetailView
|
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from .views import CompetitionRegistrationView, SeasonDetailView
urlpatterns = patterns('',
url(r'^$', CompetitionRegistrationView.as_view(),
name='competitions_competition_register'),
url(r'^registration/successful/', TemplateView.as_view(
template_name='competitions/competition_registration_successful.html'),
name='competitions_competition_register_success'),
url(r'^season/(?P<pk>\d+)$', SeasonDetailView.as_view(),
name='competitions_season_detail'),
)
|
<commit_before>from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from .views import CompetitionRegistrationView, SeasonDetailView
urlpatterns = patterns('',
url(r'^$', CompetitionRegistrationView.as_view(),
name='competitions_competition_register'),
url(r'^registration/successful/', TemplateView.as_view(
template_name='competitions/competition_registration_successful.html'),
name='competitions_competition_register_success'),
url(r'^series/(?P<pk>\d+)$', SeasonDetailView.as_view(),
name='competitions_season_detail'),
)
<commit_msg>competitions: Correct url of the SeasonDetailView<commit_after>
|
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from .views import CompetitionRegistrationView, SeasonDetailView
urlpatterns = patterns('',
url(r'^$', CompetitionRegistrationView.as_view(),
name='competitions_competition_register'),
url(r'^registration/successful/', TemplateView.as_view(
template_name='competitions/competition_registration_successful.html'),
name='competitions_competition_register_success'),
url(r'^season/(?P<pk>\d+)$', SeasonDetailView.as_view(),
name='competitions_season_detail'),
)
|
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from .views import CompetitionRegistrationView, SeasonDetailView
urlpatterns = patterns('',
url(r'^$', CompetitionRegistrationView.as_view(),
name='competitions_competition_register'),
url(r'^registration/successful/', TemplateView.as_view(
template_name='competitions/competition_registration_successful.html'),
name='competitions_competition_register_success'),
url(r'^series/(?P<pk>\d+)$', SeasonDetailView.as_view(),
name='competitions_season_detail'),
)
competitions: Correct url of the SeasonDetailViewfrom django.conf.urls import patterns, url
from django.views.generic import TemplateView
from .views import CompetitionRegistrationView, SeasonDetailView
urlpatterns = patterns('',
url(r'^$', CompetitionRegistrationView.as_view(),
name='competitions_competition_register'),
url(r'^registration/successful/', TemplateView.as_view(
template_name='competitions/competition_registration_successful.html'),
name='competitions_competition_register_success'),
url(r'^season/(?P<pk>\d+)$', SeasonDetailView.as_view(),
name='competitions_season_detail'),
)
|
<commit_before>from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from .views import CompetitionRegistrationView, SeasonDetailView
urlpatterns = patterns('',
url(r'^$', CompetitionRegistrationView.as_view(),
name='competitions_competition_register'),
url(r'^registration/successful/', TemplateView.as_view(
template_name='competitions/competition_registration_successful.html'),
name='competitions_competition_register_success'),
url(r'^series/(?P<pk>\d+)$', SeasonDetailView.as_view(),
name='competitions_season_detail'),
)
<commit_msg>competitions: Correct url of the SeasonDetailView<commit_after>from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from .views import CompetitionRegistrationView, SeasonDetailView
urlpatterns = patterns('',
url(r'^$', CompetitionRegistrationView.as_view(),
name='competitions_competition_register'),
url(r'^registration/successful/', TemplateView.as_view(
template_name='competitions/competition_registration_successful.html'),
name='competitions_competition_register_success'),
url(r'^season/(?P<pk>\d+)$', SeasonDetailView.as_view(),
name='competitions_season_detail'),
)
|
e2627f3c98c45791a23fd80fcdc80f5aaf8acf4c
|
terminus/tests/city_visitor_test.py
|
terminus/tests/city_visitor_test.py
|
import unittest
import mock
from generators.city_visitor import CityVisitor
class CityVisitorTest(unittest.TestCase):
def test_city_visitor(self):
city = mock.Mock()
visitor = CityVisitor(city)
visitor.run()
city.accept.assert_called()
|
Add test for city visitor.
|
Add test for city visitor.
|
Python
|
apache-2.0
|
ekumenlabs/terminus,ekumenlabs/terminus
|
Add test for city visitor.
|
import unittest
import mock
from generators.city_visitor import CityVisitor
class CityVisitorTest(unittest.TestCase):
def test_city_visitor(self):
city = mock.Mock()
visitor = CityVisitor(city)
visitor.run()
city.accept.assert_called()
|
<commit_before><commit_msg>Add test for city visitor.<commit_after>
|
import unittest
import mock
from generators.city_visitor import CityVisitor
class CityVisitorTest(unittest.TestCase):
def test_city_visitor(self):
city = mock.Mock()
visitor = CityVisitor(city)
visitor.run()
city.accept.assert_called()
|
Add test for city visitor.import unittest
import mock
from generators.city_visitor import CityVisitor
class CityVisitorTest(unittest.TestCase):
def test_city_visitor(self):
city = mock.Mock()
visitor = CityVisitor(city)
visitor.run()
city.accept.assert_called()
|
<commit_before><commit_msg>Add test for city visitor.<commit_after>import unittest
import mock
from generators.city_visitor import CityVisitor
class CityVisitorTest(unittest.TestCase):
def test_city_visitor(self):
city = mock.Mock()
visitor = CityVisitor(city)
visitor.run()
city.accept.assert_called()
|
|
a3c55d151a1a358f04d431b28e8512b68337825a
|
kremlin/forms.py
|
kremlin/forms.py
|
"""
# # #### ##### # # ##### # # # #
# # # # # ## ## # # # ## # #
### #### #### # # # # # # # # #####
# # # # # # # # ## # # #
# # # ##### # # # # # # # #
Kremlin Magical Everything System
Glasnost Image Board and Boredom Inhibitor
"""
""" Module containing form classes and validation, for use with WTForms """
from flaskext.uploads import UploadSet, IMAGES
from flaskext.wtf import Form, TextField, TextAreaField, FileField, \
file_allowed, file_required, validators
# Get allowed files data from flask-uploads
images = UploadSet("images", IMAGES)
class NewPostForm(Form):
name = TextField(u'Name', validators[validators.required()])
upload = FileField("Image File",
validators=[
file_required(),
file_allowed(images, "Not an image :("),
]
)
note = TextAreaField(u'Note/Comment', validators[validators.optional()])
tags = TextField(u'Tags (separated by space)',
validators=[validators.optional()]
)
|
Add WTForms version of upload form, WTForms is miles better.
|
Add WTForms version of upload form, WTForms is miles better.
|
Python
|
bsd-2-clause
|
glasnost/kremlin,glasnost/kremlin,glasnost/kremlin
|
Add WTForms version of upload form, WTForms is miles better.
|
"""
# # #### ##### # # ##### # # # #
# # # # # ## ## # # # ## # #
### #### #### # # # # # # # # #####
# # # # # # # # ## # # #
# # # ##### # # # # # # # #
Kremlin Magical Everything System
Glasnost Image Board and Boredom Inhibitor
"""
""" Module containing form classes and validation, for use with WTForms """
from flaskext.uploads import UploadSet, IMAGES
from flaskext.wtf import Form, TextField, TextAreaField, FileField, \
file_allowed, file_required, validators
# Get allowed files data from flask-uploads
images = UploadSet("images", IMAGES)
class NewPostForm(Form):
name = TextField(u'Name', validators[validators.required()])
upload = FileField("Image File",
validators=[
file_required(),
file_allowed(images, "Not an image :("),
]
)
note = TextAreaField(u'Note/Comment', validators[validators.optional()])
tags = TextField(u'Tags (separated by space)',
validators=[validators.optional()]
)
|
<commit_before><commit_msg>Add WTForms version of upload form, WTForms is miles better.<commit_after>
|
"""
# # #### ##### # # ##### # # # #
# # # # # ## ## # # # ## # #
### #### #### # # # # # # # # #####
# # # # # # # # ## # # #
# # # ##### # # # # # # # #
Kremlin Magical Everything System
Glasnost Image Board and Boredom Inhibitor
"""
""" Module containing form classes and validation, for use with WTForms """
from flaskext.uploads import UploadSet, IMAGES
from flaskext.wtf import Form, TextField, TextAreaField, FileField, \
file_allowed, file_required, validators
# Get allowed files data from flask-uploads
images = UploadSet("images", IMAGES)
class NewPostForm(Form):
name = TextField(u'Name', validators[validators.required()])
upload = FileField("Image File",
validators=[
file_required(),
file_allowed(images, "Not an image :("),
]
)
note = TextAreaField(u'Note/Comment', validators[validators.optional()])
tags = TextField(u'Tags (separated by space)',
validators=[validators.optional()]
)
|
Add WTForms version of upload form, WTForms is miles better."""
# # #### ##### # # ##### # # # #
# # # # # ## ## # # # ## # #
### #### #### # # # # # # # # #####
# # # # # # # # ## # # #
# # # ##### # # # # # # # #
Kremlin Magical Everything System
Glasnost Image Board and Boredom Inhibitor
"""
""" Module containing form classes and validation, for use with WTForms """
from flaskext.uploads import UploadSet, IMAGES
from flaskext.wtf import Form, TextField, TextAreaField, FileField, \
file_allowed, file_required, validators
# Get allowed files data from flask-uploads
images = UploadSet("images", IMAGES)
class NewPostForm(Form):
name = TextField(u'Name', validators[validators.required()])
upload = FileField("Image File",
validators=[
file_required(),
file_allowed(images, "Not an image :("),
]
)
note = TextAreaField(u'Note/Comment', validators[validators.optional()])
tags = TextField(u'Tags (separated by space)',
validators=[validators.optional()]
)
|
<commit_before><commit_msg>Add WTForms version of upload form, WTForms is miles better.<commit_after>"""
# # #### ##### # # ##### # # # #
# # # # # ## ## # # # ## # #
### #### #### # # # # # # # # #####
# # # # # # # # ## # # #
# # # ##### # # # # # # # #
Kremlin Magical Everything System
Glasnost Image Board and Boredom Inhibitor
"""
""" Module containing form classes and validation, for use with WTForms """
from flaskext.uploads import UploadSet, IMAGES
from flaskext.wtf import Form, TextField, TextAreaField, FileField, \
file_allowed, file_required, validators
# Get allowed files data from flask-uploads
images = UploadSet("images", IMAGES)
class NewPostForm(Form):
name = TextField(u'Name', validators[validators.required()])
upload = FileField("Image File",
validators=[
file_required(),
file_allowed(images, "Not an image :("),
]
)
note = TextAreaField(u'Note/Comment', validators[validators.optional()])
tags = TextField(u'Tags (separated by space)',
validators=[validators.optional()]
)
|
|
ae46fab579822b0aaffeca271b50b2a2d89e3909
|
tests/app/main/test_application.py
|
tests/app/main/test_application.py
|
import mock
from nose.tools import assert_equal, assert_true
from ..helpers import BaseApplicationTest
class TestApplication(BaseApplicationTest):
def setup(self):
super(TestApplication, self).setup()
def test_analytics_code_should_be_in_javascript(self):
res = self.client.get('/suppliers/static/javascripts/application.js')
assert_equal(200, res.status_code)
assert_true(
'GOVUK.analytics.trackPageview'
in res.get_data(as_text=True))
|
Add test for analytics JavaScript
|
Add test for analytics JavaScript
|
Python
|
mit
|
alphagov/digitalmarketplace-supplier-frontend,alphagov/digitalmarketplace-supplier-frontend,mtekel/digitalmarketplace-supplier-frontend,mtekel/digitalmarketplace-supplier-frontend,mtekel/digitalmarketplace-supplier-frontend,mtekel/digitalmarketplace-supplier-frontend,alphagov/digitalmarketplace-supplier-frontend,alphagov/digitalmarketplace-supplier-frontend
|
Add test for analytics JavaScript
|
import mock
from nose.tools import assert_equal, assert_true
from ..helpers import BaseApplicationTest
class TestApplication(BaseApplicationTest):
def setup(self):
super(TestApplication, self).setup()
def test_analytics_code_should_be_in_javascript(self):
res = self.client.get('/suppliers/static/javascripts/application.js')
assert_equal(200, res.status_code)
assert_true(
'GOVUK.analytics.trackPageview'
in res.get_data(as_text=True))
|
<commit_before><commit_msg>Add test for analytics JavaScript<commit_after>
|
import mock
from nose.tools import assert_equal, assert_true
from ..helpers import BaseApplicationTest
class TestApplication(BaseApplicationTest):
def setup(self):
super(TestApplication, self).setup()
def test_analytics_code_should_be_in_javascript(self):
res = self.client.get('/suppliers/static/javascripts/application.js')
assert_equal(200, res.status_code)
assert_true(
'GOVUK.analytics.trackPageview'
in res.get_data(as_text=True))
|
Add test for analytics JavaScriptimport mock
from nose.tools import assert_equal, assert_true
from ..helpers import BaseApplicationTest
class TestApplication(BaseApplicationTest):
def setup(self):
super(TestApplication, self).setup()
def test_analytics_code_should_be_in_javascript(self):
res = self.client.get('/suppliers/static/javascripts/application.js')
assert_equal(200, res.status_code)
assert_true(
'GOVUK.analytics.trackPageview'
in res.get_data(as_text=True))
|
<commit_before><commit_msg>Add test for analytics JavaScript<commit_after>import mock
from nose.tools import assert_equal, assert_true
from ..helpers import BaseApplicationTest
class TestApplication(BaseApplicationTest):
def setup(self):
super(TestApplication, self).setup()
def test_analytics_code_should_be_in_javascript(self):
res = self.client.get('/suppliers/static/javascripts/application.js')
assert_equal(200, res.status_code)
assert_true(
'GOVUK.analytics.trackPageview'
in res.get_data(as_text=True))
|
|
b1fc5436ecf69d1fe5852568fb1e8e551c7bd9b3
|
pfile_tools/scripts/dump_pfile_header.py
|
pfile_tools/scripts/dump_pfile_header.py
|
#!/usr/bin/env python
import sys
import optparse
from pfile_tools import headers, struct_utils
def known_revisions():
return sorted(headers.REVISIONS().keys())
def build_option_parser():
revision_opt_strs = ", ".join([str(r) for r in known_revisions()])
p = optparse.OptionParser(
usage="usage: %prog [OPTIONS] pfile",
description="Dumps header information from a GE P-file")
p.add_option(
"-r", "--revision", action="store", choices=known_revisions(),
help="Force a header revision (available: %s)" % revision_opt_strs)
p.add_option(
"--offsets", action="store_true", default=False, dest="offsets",
help="Show offsets to data elements")
p.add_option(
"--sizes", action="store_true", default=False, dest="sizes",
help="Show data element sizes")
p.add_option(
"--show-padding", action="store_true", default=False, dest="padding",
help="Print unknown 'padding' elements")
return p
def main():
parser = build_option_parser()
opts, args = parser.parse_args()
if len(args) < 1:
parser.error("Must specify a p-file.")
rev = opts.revision
if rev is not None:
rev = int(rev)
ph = headers.Pfile.from_file(args[0], force_revision=rev)
dumped = struct_utils.dump_struct(ph.header)
for info in dumped:
if (info.label.find("pad") == 0) and not opts.padding:
continue
s = "%s: %s" % (info.label, info.value)
if opts.offsets:
s += " offset: %#x" % (info.offset)
if opts.sizes:
s += " size: %s" % (info.size)
print(s)
if __name__ == "__main__":
main()
|
Add friendly header dumper program.
|
Add friendly header dumper program.
|
Python
|
bsd-3-clause
|
njvack/pfile-tools
|
Add friendly header dumper program.
|
#!/usr/bin/env python
import sys
import optparse
from pfile_tools import headers, struct_utils
def known_revisions():
return sorted(headers.REVISIONS().keys())
def build_option_parser():
revision_opt_strs = ", ".join([str(r) for r in known_revisions()])
p = optparse.OptionParser(
usage="usage: %prog [OPTIONS] pfile",
description="Dumps header information from a GE P-file")
p.add_option(
"-r", "--revision", action="store", choices=known_revisions(),
help="Force a header revision (available: %s)" % revision_opt_strs)
p.add_option(
"--offsets", action="store_true", default=False, dest="offsets",
help="Show offsets to data elements")
p.add_option(
"--sizes", action="store_true", default=False, dest="sizes",
help="Show data element sizes")
p.add_option(
"--show-padding", action="store_true", default=False, dest="padding",
help="Print unknown 'padding' elements")
return p
def main():
parser = build_option_parser()
opts, args = parser.parse_args()
if len(args) < 1:
parser.error("Must specify a p-file.")
rev = opts.revision
if rev is not None:
rev = int(rev)
ph = headers.Pfile.from_file(args[0], force_revision=rev)
dumped = struct_utils.dump_struct(ph.header)
for info in dumped:
if (info.label.find("pad") == 0) and not opts.padding:
continue
s = "%s: %s" % (info.label, info.value)
if opts.offsets:
s += " offset: %#x" % (info.offset)
if opts.sizes:
s += " size: %s" % (info.size)
print(s)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add friendly header dumper program.<commit_after>
|
#!/usr/bin/env python
import sys
import optparse
from pfile_tools import headers, struct_utils
def known_revisions():
return sorted(headers.REVISIONS().keys())
def build_option_parser():
revision_opt_strs = ", ".join([str(r) for r in known_revisions()])
p = optparse.OptionParser(
usage="usage: %prog [OPTIONS] pfile",
description="Dumps header information from a GE P-file")
p.add_option(
"-r", "--revision", action="store", choices=known_revisions(),
help="Force a header revision (available: %s)" % revision_opt_strs)
p.add_option(
"--offsets", action="store_true", default=False, dest="offsets",
help="Show offsets to data elements")
p.add_option(
"--sizes", action="store_true", default=False, dest="sizes",
help="Show data element sizes")
p.add_option(
"--show-padding", action="store_true", default=False, dest="padding",
help="Print unknown 'padding' elements")
return p
def main():
parser = build_option_parser()
opts, args = parser.parse_args()
if len(args) < 1:
parser.error("Must specify a p-file.")
rev = opts.revision
if rev is not None:
rev = int(rev)
ph = headers.Pfile.from_file(args[0], force_revision=rev)
dumped = struct_utils.dump_struct(ph.header)
for info in dumped:
if (info.label.find("pad") == 0) and not opts.padding:
continue
s = "%s: %s" % (info.label, info.value)
if opts.offsets:
s += " offset: %#x" % (info.offset)
if opts.sizes:
s += " size: %s" % (info.size)
print(s)
if __name__ == "__main__":
main()
|
Add friendly header dumper program.#!/usr/bin/env python
import sys
import optparse
from pfile_tools import headers, struct_utils
def known_revisions():
return sorted(headers.REVISIONS().keys())
def build_option_parser():
revision_opt_strs = ", ".join([str(r) for r in known_revisions()])
p = optparse.OptionParser(
usage="usage: %prog [OPTIONS] pfile",
description="Dumps header information from a GE P-file")
p.add_option(
"-r", "--revision", action="store", choices=known_revisions(),
help="Force a header revision (available: %s)" % revision_opt_strs)
p.add_option(
"--offsets", action="store_true", default=False, dest="offsets",
help="Show offsets to data elements")
p.add_option(
"--sizes", action="store_true", default=False, dest="sizes",
help="Show data element sizes")
p.add_option(
"--show-padding", action="store_true", default=False, dest="padding",
help="Print unknown 'padding' elements")
return p
def main():
parser = build_option_parser()
opts, args = parser.parse_args()
if len(args) < 1:
parser.error("Must specify a p-file.")
rev = opts.revision
if rev is not None:
rev = int(rev)
ph = headers.Pfile.from_file(args[0], force_revision=rev)
dumped = struct_utils.dump_struct(ph.header)
for info in dumped:
if (info.label.find("pad") == 0) and not opts.padding:
continue
s = "%s: %s" % (info.label, info.value)
if opts.offsets:
s += " offset: %#x" % (info.offset)
if opts.sizes:
s += " size: %s" % (info.size)
print(s)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add friendly header dumper program.<commit_after>#!/usr/bin/env python
import sys
import optparse
from pfile_tools import headers, struct_utils
def known_revisions():
return sorted(headers.REVISIONS().keys())
def build_option_parser():
revision_opt_strs = ", ".join([str(r) for r in known_revisions()])
p = optparse.OptionParser(
usage="usage: %prog [OPTIONS] pfile",
description="Dumps header information from a GE P-file")
p.add_option(
"-r", "--revision", action="store", choices=known_revisions(),
help="Force a header revision (available: %s)" % revision_opt_strs)
p.add_option(
"--offsets", action="store_true", default=False, dest="offsets",
help="Show offsets to data elements")
p.add_option(
"--sizes", action="store_true", default=False, dest="sizes",
help="Show data element sizes")
p.add_option(
"--show-padding", action="store_true", default=False, dest="padding",
help="Print unknown 'padding' elements")
return p
def main():
parser = build_option_parser()
opts, args = parser.parse_args()
if len(args) < 1:
parser.error("Must specify a p-file.")
rev = opts.revision
if rev is not None:
rev = int(rev)
ph = headers.Pfile.from_file(args[0], force_revision=rev)
dumped = struct_utils.dump_struct(ph.header)
for info in dumped:
if (info.label.find("pad") == 0) and not opts.padding:
continue
s = "%s: %s" % (info.label, info.value)
if opts.offsets:
s += " offset: %#x" % (info.offset)
if opts.sizes:
s += " size: %s" % (info.size)
print(s)
if __name__ == "__main__":
main()
|
|
352fa259d439a9be5c299470868e2fa82ecd835f
|
client/python/tests/unittest_runner.py
|
client/python/tests/unittest_runner.py
|
#!/usr/bin/env python
import os
import re
import sys
import shutil
import subprocess
__author__ = "Christopher Choi <chutsu@gmail.com>"
# SETTINGS
keep_unittest_logs = False
unittests_bin_dir = "tests"
unittests_log_dir = "unittests_log"
unittests_file_pattern = "^[a-zA-Z0-9_]*_tests.*$"
class TC:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def print_stdout(unittest_output_fp):
# open unittest stdout log file
unittest_output = open(unittest_output_fp, 'r')
output_content = unittest_output.read()
unittest_output.close()
# print unittest stdout
print("-" * 79)
print(output_content)
print("-" * 79)
def get_files(path, pattern):
file_list = []
for root, directory, files in os.walk(path):
for f in files:
if re.match(pattern, f):
file_list.append(os.path.join(root, f))
return file_list
if __name__ == "__main__":
orig_cwd = os.getcwd()
# make log dir if not already exist
if not os.path.exists(unittests_log_dir):
os.mkdir(unittests_log_dir)
# gather all unittests
file_list = os.listdir(unittests_bin_dir)
unittests = get_files(unittests_bin_dir, unittests_file_pattern)
# execute all unittests
error = False
return_val = 0
for unittest in unittests:
# execute unittest
try:
print "UNITTEST [{0}] {1}Starting{2}".format(unittest, TC.OKBLUE, TC.ENDC)
print("UNITTEST [{0}] ".format(unittest)),
unittest_output_fp = os.path.join(
orig_cwd,
unittests_log_dir,
os.path.basename(unittest) + ".log"
)
unittest_output = open(unittest_output_fp, 'w')
return_val = subprocess.check_call(
["./{0}".format(unittest)],
stdout=unittest_output,
stderr=unittest_output
)
unittest_output.close()
print("{0}PASSED!{1}".format(TC.OKGREEN, TC.ENDC))
except:
unittest_output.close()
print("{0}FAILED!{1}".format(TC.FAIL, TC.ENDC))
print_stdout(unittest_output_fp)
error = True
os.chdir(orig_cwd)
# keep unittest stdout dir?
if keep_unittest_logs is False:
shutil.rmtree(unittests_log_dir)
if error is True:
sys.exit(-1)
else:
sys.exit(0)
|
Add test folder for python
|
Add test folder for python
|
Python
|
apache-2.0
|
wallarelvo/backfire,wallarelvo/backfire
|
Add test folder for python
|
#!/usr/bin/env python
import os
import re
import sys
import shutil
import subprocess
__author__ = "Christopher Choi <chutsu@gmail.com>"
# SETTINGS
keep_unittest_logs = False
unittests_bin_dir = "tests"
unittests_log_dir = "unittests_log"
unittests_file_pattern = "^[a-zA-Z0-9_]*_tests.*$"
class TC:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def print_stdout(unittest_output_fp):
# open unittest stdout log file
unittest_output = open(unittest_output_fp, 'r')
output_content = unittest_output.read()
unittest_output.close()
# print unittest stdout
print("-" * 79)
print(output_content)
print("-" * 79)
def get_files(path, pattern):
file_list = []
for root, directory, files in os.walk(path):
for f in files:
if re.match(pattern, f):
file_list.append(os.path.join(root, f))
return file_list
if __name__ == "__main__":
orig_cwd = os.getcwd()
# make log dir if not already exist
if not os.path.exists(unittests_log_dir):
os.mkdir(unittests_log_dir)
# gather all unittests
file_list = os.listdir(unittests_bin_dir)
unittests = get_files(unittests_bin_dir, unittests_file_pattern)
# execute all unittests
error = False
return_val = 0
for unittest in unittests:
# execute unittest
try:
print "UNITTEST [{0}] {1}Starting{2}".format(unittest, TC.OKBLUE, TC.ENDC)
print("UNITTEST [{0}] ".format(unittest)),
unittest_output_fp = os.path.join(
orig_cwd,
unittests_log_dir,
os.path.basename(unittest) + ".log"
)
unittest_output = open(unittest_output_fp, 'w')
return_val = subprocess.check_call(
["./{0}".format(unittest)],
stdout=unittest_output,
stderr=unittest_output
)
unittest_output.close()
print("{0}PASSED!{1}".format(TC.OKGREEN, TC.ENDC))
except:
unittest_output.close()
print("{0}FAILED!{1}".format(TC.FAIL, TC.ENDC))
print_stdout(unittest_output_fp)
error = True
os.chdir(orig_cwd)
# keep unittest stdout dir?
if keep_unittest_logs is False:
shutil.rmtree(unittests_log_dir)
if error is True:
sys.exit(-1)
else:
sys.exit(0)
|
<commit_before><commit_msg>Add test folder for python<commit_after>
|
#!/usr/bin/env python
import os
import re
import sys
import shutil
import subprocess
__author__ = "Christopher Choi <chutsu@gmail.com>"
# SETTINGS
keep_unittest_logs = False
unittests_bin_dir = "tests"
unittests_log_dir = "unittests_log"
unittests_file_pattern = "^[a-zA-Z0-9_]*_tests.*$"
class TC:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def print_stdout(unittest_output_fp):
# open unittest stdout log file
unittest_output = open(unittest_output_fp, 'r')
output_content = unittest_output.read()
unittest_output.close()
# print unittest stdout
print("-" * 79)
print(output_content)
print("-" * 79)
def get_files(path, pattern):
file_list = []
for root, directory, files in os.walk(path):
for f in files:
if re.match(pattern, f):
file_list.append(os.path.join(root, f))
return file_list
if __name__ == "__main__":
orig_cwd = os.getcwd()
# make log dir if not already exist
if not os.path.exists(unittests_log_dir):
os.mkdir(unittests_log_dir)
# gather all unittests
file_list = os.listdir(unittests_bin_dir)
unittests = get_files(unittests_bin_dir, unittests_file_pattern)
# execute all unittests
error = False
return_val = 0
for unittest in unittests:
# execute unittest
try:
print "UNITTEST [{0}] {1}Starting{2}".format(unittest, TC.OKBLUE, TC.ENDC)
print("UNITTEST [{0}] ".format(unittest)),
unittest_output_fp = os.path.join(
orig_cwd,
unittests_log_dir,
os.path.basename(unittest) + ".log"
)
unittest_output = open(unittest_output_fp, 'w')
return_val = subprocess.check_call(
["./{0}".format(unittest)],
stdout=unittest_output,
stderr=unittest_output
)
unittest_output.close()
print("{0}PASSED!{1}".format(TC.OKGREEN, TC.ENDC))
except:
unittest_output.close()
print("{0}FAILED!{1}".format(TC.FAIL, TC.ENDC))
print_stdout(unittest_output_fp)
error = True
os.chdir(orig_cwd)
# keep unittest stdout dir?
if keep_unittest_logs is False:
shutil.rmtree(unittests_log_dir)
if error is True:
sys.exit(-1)
else:
sys.exit(0)
|
Add test folder for python#!/usr/bin/env python
import os
import re
import sys
import shutil
import subprocess
__author__ = "Christopher Choi <chutsu@gmail.com>"
# SETTINGS
keep_unittest_logs = False
unittests_bin_dir = "tests"
unittests_log_dir = "unittests_log"
unittests_file_pattern = "^[a-zA-Z0-9_]*_tests.*$"
class TC:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def print_stdout(unittest_output_fp):
# open unittest stdout log file
unittest_output = open(unittest_output_fp, 'r')
output_content = unittest_output.read()
unittest_output.close()
# print unittest stdout
print("-" * 79)
print(output_content)
print("-" * 79)
def get_files(path, pattern):
file_list = []
for root, directory, files in os.walk(path):
for f in files:
if re.match(pattern, f):
file_list.append(os.path.join(root, f))
return file_list
if __name__ == "__main__":
orig_cwd = os.getcwd()
# make log dir if not already exist
if not os.path.exists(unittests_log_dir):
os.mkdir(unittests_log_dir)
# gather all unittests
file_list = os.listdir(unittests_bin_dir)
unittests = get_files(unittests_bin_dir, unittests_file_pattern)
# execute all unittests
error = False
return_val = 0
for unittest in unittests:
# execute unittest
try:
print "UNITTEST [{0}] {1}Starting{2}".format(unittest, TC.OKBLUE, TC.ENDC)
print("UNITTEST [{0}] ".format(unittest)),
unittest_output_fp = os.path.join(
orig_cwd,
unittests_log_dir,
os.path.basename(unittest) + ".log"
)
unittest_output = open(unittest_output_fp, 'w')
return_val = subprocess.check_call(
["./{0}".format(unittest)],
stdout=unittest_output,
stderr=unittest_output
)
unittest_output.close()
print("{0}PASSED!{1}".format(TC.OKGREEN, TC.ENDC))
except:
unittest_output.close()
print("{0}FAILED!{1}".format(TC.FAIL, TC.ENDC))
print_stdout(unittest_output_fp)
error = True
os.chdir(orig_cwd)
# keep unittest stdout dir?
if keep_unittest_logs is False:
shutil.rmtree(unittests_log_dir)
if error is True:
sys.exit(-1)
else:
sys.exit(0)
|
<commit_before><commit_msg>Add test folder for python<commit_after>#!/usr/bin/env python
import os
import re
import sys
import shutil
import subprocess
__author__ = "Christopher Choi <chutsu@gmail.com>"
# SETTINGS
keep_unittest_logs = False
unittests_bin_dir = "tests"
unittests_log_dir = "unittests_log"
unittests_file_pattern = "^[a-zA-Z0-9_]*_tests.*$"
class TC:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def print_stdout(unittest_output_fp):
# open unittest stdout log file
unittest_output = open(unittest_output_fp, 'r')
output_content = unittest_output.read()
unittest_output.close()
# print unittest stdout
print("-" * 79)
print(output_content)
print("-" * 79)
def get_files(path, pattern):
file_list = []
for root, directory, files in os.walk(path):
for f in files:
if re.match(pattern, f):
file_list.append(os.path.join(root, f))
return file_list
if __name__ == "__main__":
orig_cwd = os.getcwd()
# make log dir if not already exist
if not os.path.exists(unittests_log_dir):
os.mkdir(unittests_log_dir)
# gather all unittests
file_list = os.listdir(unittests_bin_dir)
unittests = get_files(unittests_bin_dir, unittests_file_pattern)
# execute all unittests
error = False
return_val = 0
for unittest in unittests:
# execute unittest
try:
print "UNITTEST [{0}] {1}Starting{2}".format(unittest, TC.OKBLUE, TC.ENDC)
print("UNITTEST [{0}] ".format(unittest)),
unittest_output_fp = os.path.join(
orig_cwd,
unittests_log_dir,
os.path.basename(unittest) + ".log"
)
unittest_output = open(unittest_output_fp, 'w')
return_val = subprocess.check_call(
["./{0}".format(unittest)],
stdout=unittest_output,
stderr=unittest_output
)
unittest_output.close()
print("{0}PASSED!{1}".format(TC.OKGREEN, TC.ENDC))
except:
unittest_output.close()
print("{0}FAILED!{1}".format(TC.FAIL, TC.ENDC))
print_stdout(unittest_output_fp)
error = True
os.chdir(orig_cwd)
# keep unittest stdout dir?
if keep_unittest_logs is False:
shutil.rmtree(unittests_log_dir)
if error is True:
sys.exit(-1)
else:
sys.exit(0)
|
|
d62201e3be36c1dff0276800d2c0f765572a1abb
|
src/WaveBlocksND/Quadrature.py
|
src/WaveBlocksND/Quadrature.py
|
"""The WaveBlocks Project
@author: R. Bourquin
@copyright: Copyright (C) 2013 R. Bourquin
@license: Modified BSD License
"""
__all__ = ["Quadrature"]
class Quadrature(object):
r"""This class is an abstract interface to quadratures in general.
"""
def __init__(self):
r"""General interface for quadratures.
:raise NotImplementedError: Abstract interface.
"""
raise NotImplementedError("'Quadrature' is an abstract interface.")
def __str__(self):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def get_description(self):
r"""Return a description of this quadrature object.
A description is a ``dict`` containing all key-value pairs
necessary to reconstruct the current instance. A description
never contains any data.
"""
raise NotImplementedError("'Quadrature' is an abstract interface.")
def set_qr(self, QR):
r"""Set the :py:class:`QuadratureRule` subclass instance used for quadrature.
:param QR: The new :py:class:`QuadratureRule` instance.
"""
# TODO: Allow a list of QRs, one QR for each component of Psi
self._QR = QR
def get_qr(self):
r"""Return the :py:class:`QuadratureRule` subclass instance used for quadrature.
:return: The current instance of the quadrature rule.
"""
return self._QR
def initialize_packet(self):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def initialize_operator(self):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def prepare(self, rows, cols):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def prepare_for_row(self, row, col):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def preprare_for_col(self, row, col):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def perform_quadrature(self, row, col):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def perform_build_matrix(self, row, col):
raise NotImplementedError("'Quadrature' is an abstract interface.")
|
Define interface for quadrature evaluator (first try)
|
Define interface for quadrature evaluator (first try)
|
Python
|
bsd-3-clause
|
WaveBlocks/WaveBlocksND,WaveBlocks/WaveBlocksND
|
Define interface for quadrature evaluator (first try)
|
"""The WaveBlocks Project
@author: R. Bourquin
@copyright: Copyright (C) 2013 R. Bourquin
@license: Modified BSD License
"""
__all__ = ["Quadrature"]
class Quadrature(object):
r"""This class is an abstract interface to quadratures in general.
"""
def __init__(self):
r"""General interface for quadratures.
:raise NotImplementedError: Abstract interface.
"""
raise NotImplementedError("'Quadrature' is an abstract interface.")
def __str__(self):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def get_description(self):
r"""Return a description of this quadrature object.
A description is a ``dict`` containing all key-value pairs
necessary to reconstruct the current instance. A description
never contains any data.
"""
raise NotImplementedError("'Quadrature' is an abstract interface.")
def set_qr(self, QR):
r"""Set the :py:class:`QuadratureRule` subclass instance used for quadrature.
:param QR: The new :py:class:`QuadratureRule` instance.
"""
# TODO: Allow a list of QRs, one QR for each component of Psi
self._QR = QR
def get_qr(self):
r"""Return the :py:class:`QuadratureRule` subclass instance used for quadrature.
:return: The current instance of the quadrature rule.
"""
return self._QR
def initialize_packet(self):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def initialize_operator(self):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def prepare(self, rows, cols):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def prepare_for_row(self, row, col):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def preprare_for_col(self, row, col):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def perform_quadrature(self, row, col):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def perform_build_matrix(self, row, col):
raise NotImplementedError("'Quadrature' is an abstract interface.")
|
<commit_before><commit_msg>Define interface for quadrature evaluator (first try)<commit_after>
|
"""The WaveBlocks Project
@author: R. Bourquin
@copyright: Copyright (C) 2013 R. Bourquin
@license: Modified BSD License
"""
__all__ = ["Quadrature"]
class Quadrature(object):
r"""This class is an abstract interface to quadratures in general.
"""
def __init__(self):
r"""General interface for quadratures.
:raise NotImplementedError: Abstract interface.
"""
raise NotImplementedError("'Quadrature' is an abstract interface.")
def __str__(self):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def get_description(self):
r"""Return a description of this quadrature object.
A description is a ``dict`` containing all key-value pairs
necessary to reconstruct the current instance. A description
never contains any data.
"""
raise NotImplementedError("'Quadrature' is an abstract interface.")
def set_qr(self, QR):
r"""Set the :py:class:`QuadratureRule` subclass instance used for quadrature.
:param QR: The new :py:class:`QuadratureRule` instance.
"""
# TODO: Allow a list of QRs, one QR for each component of Psi
self._QR = QR
def get_qr(self):
r"""Return the :py:class:`QuadratureRule` subclass instance used for quadrature.
:return: The current instance of the quadrature rule.
"""
return self._QR
def initialize_packet(self):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def initialize_operator(self):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def prepare(self, rows, cols):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def prepare_for_row(self, row, col):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def preprare_for_col(self, row, col):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def perform_quadrature(self, row, col):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def perform_build_matrix(self, row, col):
raise NotImplementedError("'Quadrature' is an abstract interface.")
|
Define interface for quadrature evaluator (first try)"""The WaveBlocks Project
@author: R. Bourquin
@copyright: Copyright (C) 2013 R. Bourquin
@license: Modified BSD License
"""
__all__ = ["Quadrature"]
class Quadrature(object):
r"""This class is an abstract interface to quadratures in general.
"""
def __init__(self):
r"""General interface for quadratures.
:raise NotImplementedError: Abstract interface.
"""
raise NotImplementedError("'Quadrature' is an abstract interface.")
def __str__(self):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def get_description(self):
r"""Return a description of this quadrature object.
A description is a ``dict`` containing all key-value pairs
necessary to reconstruct the current instance. A description
never contains any data.
"""
raise NotImplementedError("'Quadrature' is an abstract interface.")
def set_qr(self, QR):
r"""Set the :py:class:`QuadratureRule` subclass instance used for quadrature.
:param QR: The new :py:class:`QuadratureRule` instance.
"""
# TODO: Allow a list of QRs, one QR for each component of Psi
self._QR = QR
def get_qr(self):
r"""Return the :py:class:`QuadratureRule` subclass instance used for quadrature.
:return: The current instance of the quadrature rule.
"""
return self._QR
def initialize_packet(self):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def initialize_operator(self):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def prepare(self, rows, cols):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def prepare_for_row(self, row, col):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def preprare_for_col(self, row, col):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def perform_quadrature(self, row, col):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def perform_build_matrix(self, row, col):
raise NotImplementedError("'Quadrature' is an abstract interface.")
|
<commit_before><commit_msg>Define interface for quadrature evaluator (first try)<commit_after>"""The WaveBlocks Project
@author: R. Bourquin
@copyright: Copyright (C) 2013 R. Bourquin
@license: Modified BSD License
"""
__all__ = ["Quadrature"]
class Quadrature(object):
r"""This class is an abstract interface to quadratures in general.
"""
def __init__(self):
r"""General interface for quadratures.
:raise NotImplementedError: Abstract interface.
"""
raise NotImplementedError("'Quadrature' is an abstract interface.")
def __str__(self):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def get_description(self):
r"""Return a description of this quadrature object.
A description is a ``dict`` containing all key-value pairs
necessary to reconstruct the current instance. A description
never contains any data.
"""
raise NotImplementedError("'Quadrature' is an abstract interface.")
def set_qr(self, QR):
r"""Set the :py:class:`QuadratureRule` subclass instance used for quadrature.
:param QR: The new :py:class:`QuadratureRule` instance.
"""
# TODO: Allow a list of QRs, one QR for each component of Psi
self._QR = QR
def get_qr(self):
r"""Return the :py:class:`QuadratureRule` subclass instance used for quadrature.
:return: The current instance of the quadrature rule.
"""
return self._QR
def initialize_packet(self):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def initialize_operator(self):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def prepare(self, rows, cols):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def prepare_for_row(self, row, col):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def preprare_for_col(self, row, col):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def perform_quadrature(self, row, col):
raise NotImplementedError("'Quadrature' is an abstract interface.")
def perform_build_matrix(self, row, col):
raise NotImplementedError("'Quadrature' is an abstract interface.")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.