commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
91e16cd265a04539f6eb8af5179a2b46262cbad1
|
tests/test_registration.py
|
tests/test_registration.py
|
from aiosip import auth
AUTH = {
'auth_with_qop': 'Digest realm="asterisk",'
'nonce="1535646722/5d9e709c8f2ccd74601946bfbd77b032",'
'algorithm=md5,'
'qop="auth",'
'nc="00000001",'
'response="7aafeb20b391dfb0af52c6d39bbef36e",'
'cnonce="0a4f113b"',
'auth_without_qop': 'Digest realm="asterisk",'
'nonce="1535646722/5d9e709c8f2ccd74601946bfbd77b032",'
'algorithm=md5,'
'response="05d233c1f0c0ef3d2fa203512363ce64"',
'method': 'REGISTER',
'uri': 'sip:5000@10.10.26.12',
'username': '5000',
'password': 'sangoma',
'response_with_qop': '7aafeb20b391dfb0af52c6d39bbef36e',
'response_without_qop': '05d233c1f0c0ef3d2fa203512363ce64'
}
def test_with_qop():
authenticate = auth.Auth.from_authenticate_header(
AUTH['auth_with_qop'],
AUTH['method']
)
assert authenticate.validate_authorization(
authenticate,
password=AUTH['password'],
username=AUTH['username'],
uri=AUTH['uri']
)
def test_without_qop():
authenticate = auth.Auth.from_authenticate_header(
AUTH['auth_without_qop'],
AUTH['method']
)
assert authenticate.validate_authorization(
authenticate,
password=AUTH['password'],
username=AUTH['username'],
uri=AUTH['uri']
)
|
Add the registration unit test
|
Add the registration unit test
|
Python
|
apache-2.0
|
Eyepea/aiosip
|
Add the registration unit test
|
from aiosip import auth
AUTH = {
'auth_with_qop': 'Digest realm="asterisk",'
'nonce="1535646722/5d9e709c8f2ccd74601946bfbd77b032",'
'algorithm=md5,'
'qop="auth",'
'nc="00000001",'
'response="7aafeb20b391dfb0af52c6d39bbef36e",'
'cnonce="0a4f113b"',
'auth_without_qop': 'Digest realm="asterisk",'
'nonce="1535646722/5d9e709c8f2ccd74601946bfbd77b032",'
'algorithm=md5,'
'response="05d233c1f0c0ef3d2fa203512363ce64"',
'method': 'REGISTER',
'uri': 'sip:5000@10.10.26.12',
'username': '5000',
'password': 'sangoma',
'response_with_qop': '7aafeb20b391dfb0af52c6d39bbef36e',
'response_without_qop': '05d233c1f0c0ef3d2fa203512363ce64'
}
def test_with_qop():
authenticate = auth.Auth.from_authenticate_header(
AUTH['auth_with_qop'],
AUTH['method']
)
assert authenticate.validate_authorization(
authenticate,
password=AUTH['password'],
username=AUTH['username'],
uri=AUTH['uri']
)
def test_without_qop():
authenticate = auth.Auth.from_authenticate_header(
AUTH['auth_without_qop'],
AUTH['method']
)
assert authenticate.validate_authorization(
authenticate,
password=AUTH['password'],
username=AUTH['username'],
uri=AUTH['uri']
)
|
<commit_before><commit_msg>Add the registration unit test<commit_after>
|
from aiosip import auth
AUTH = {
'auth_with_qop': 'Digest realm="asterisk",'
'nonce="1535646722/5d9e709c8f2ccd74601946bfbd77b032",'
'algorithm=md5,'
'qop="auth",'
'nc="00000001",'
'response="7aafeb20b391dfb0af52c6d39bbef36e",'
'cnonce="0a4f113b"',
'auth_without_qop': 'Digest realm="asterisk",'
'nonce="1535646722/5d9e709c8f2ccd74601946bfbd77b032",'
'algorithm=md5,'
'response="05d233c1f0c0ef3d2fa203512363ce64"',
'method': 'REGISTER',
'uri': 'sip:5000@10.10.26.12',
'username': '5000',
'password': 'sangoma',
'response_with_qop': '7aafeb20b391dfb0af52c6d39bbef36e',
'response_without_qop': '05d233c1f0c0ef3d2fa203512363ce64'
}
def test_with_qop():
authenticate = auth.Auth.from_authenticate_header(
AUTH['auth_with_qop'],
AUTH['method']
)
assert authenticate.validate_authorization(
authenticate,
password=AUTH['password'],
username=AUTH['username'],
uri=AUTH['uri']
)
def test_without_qop():
authenticate = auth.Auth.from_authenticate_header(
AUTH['auth_without_qop'],
AUTH['method']
)
assert authenticate.validate_authorization(
authenticate,
password=AUTH['password'],
username=AUTH['username'],
uri=AUTH['uri']
)
|
Add the registration unit testfrom aiosip import auth
AUTH = {
'auth_with_qop': 'Digest realm="asterisk",'
'nonce="1535646722/5d9e709c8f2ccd74601946bfbd77b032",'
'algorithm=md5,'
'qop="auth",'
'nc="00000001",'
'response="7aafeb20b391dfb0af52c6d39bbef36e",'
'cnonce="0a4f113b"',
'auth_without_qop': 'Digest realm="asterisk",'
'nonce="1535646722/5d9e709c8f2ccd74601946bfbd77b032",'
'algorithm=md5,'
'response="05d233c1f0c0ef3d2fa203512363ce64"',
'method': 'REGISTER',
'uri': 'sip:5000@10.10.26.12',
'username': '5000',
'password': 'sangoma',
'response_with_qop': '7aafeb20b391dfb0af52c6d39bbef36e',
'response_without_qop': '05d233c1f0c0ef3d2fa203512363ce64'
}
def test_with_qop():
authenticate = auth.Auth.from_authenticate_header(
AUTH['auth_with_qop'],
AUTH['method']
)
assert authenticate.validate_authorization(
authenticate,
password=AUTH['password'],
username=AUTH['username'],
uri=AUTH['uri']
)
def test_without_qop():
authenticate = auth.Auth.from_authenticate_header(
AUTH['auth_without_qop'],
AUTH['method']
)
assert authenticate.validate_authorization(
authenticate,
password=AUTH['password'],
username=AUTH['username'],
uri=AUTH['uri']
)
|
<commit_before><commit_msg>Add the registration unit test<commit_after>from aiosip import auth
AUTH = {
'auth_with_qop': 'Digest realm="asterisk",'
'nonce="1535646722/5d9e709c8f2ccd74601946bfbd77b032",'
'algorithm=md5,'
'qop="auth",'
'nc="00000001",'
'response="7aafeb20b391dfb0af52c6d39bbef36e",'
'cnonce="0a4f113b"',
'auth_without_qop': 'Digest realm="asterisk",'
'nonce="1535646722/5d9e709c8f2ccd74601946bfbd77b032",'
'algorithm=md5,'
'response="05d233c1f0c0ef3d2fa203512363ce64"',
'method': 'REGISTER',
'uri': 'sip:5000@10.10.26.12',
'username': '5000',
'password': 'sangoma',
'response_with_qop': '7aafeb20b391dfb0af52c6d39bbef36e',
'response_without_qop': '05d233c1f0c0ef3d2fa203512363ce64'
}
def test_with_qop():
authenticate = auth.Auth.from_authenticate_header(
AUTH['auth_with_qop'],
AUTH['method']
)
assert authenticate.validate_authorization(
authenticate,
password=AUTH['password'],
username=AUTH['username'],
uri=AUTH['uri']
)
def test_without_qop():
authenticate = auth.Auth.from_authenticate_header(
AUTH['auth_without_qop'],
AUTH['method']
)
assert authenticate.validate_authorization(
authenticate,
password=AUTH['password'],
username=AUTH['username'],
uri=AUTH['uri']
)
|
|
ed1fde8b1dde1c7d9b7e7ef4554aaa38a202144c
|
registration/tps_gpu.py
|
registration/tps_gpu.py
|
from __future__ import division
import numpy as np
import pycuda.gpuarray as gpuarray
import pycuda.autoinit
import scikits.cuda.linalg as culinalg
culinalg.init()
def balance_matrix3_gpu(prob_nm, max_iter, row_priors, col_priors, outlierfrac, r_N = None):
n,m = prob_nm.shape
prob_NM = np.empty((n+1, m+1), 'f4')
prob_NM[:n, :m] = prob_nm
prob_NM[:n, m] = row_priors
prob_NM[n, :m] = col_priors
prob_NM[n, m] = np.sqrt(np.sum(row_priors)*np.sum(col_priors)) # this can `be weighted bigger weight = fewer outliers
a_N = np.ones((n+1),'f4')
a_N[n] = m*outlierfrac
b_M = np.ones((m+1),'f4')
b_M[m] = n*outlierfrac
if r_N is None: r_N = np.ones((n+1,1),'f4')
prob_NM_gpu = gpuarray.empty((n+1,m+1), dtype=np.float32)
prob_MN_gpu = gpuarray.empty((m+1,n+1), dtype=np.float32)
r_N_gpu = gpuarray.empty((n+1,1), dtype=np.float32)
c_M_gpu = gpuarray.empty((m+1,1), dtype=np.float32)
prob_NM_gpu.set_async(prob_NM)
prob_MN_gpu.set_async(prob_NM.T.copy())
r_N_gpu.set_async(r_N)
for _ in xrange(max_iter):
culinalg.dot(prob_NM_gpu, r_N_gpu, transa='T', out=c_M_gpu)
c_M_gpu.set_async(b_M[:,None]/c_M_gpu.get())
culinalg.dot(prob_MN_gpu, c_M_gpu, transa='T', out=r_N_gpu)
r_N_gpu.set_async(a_N[:,None]/r_N_gpu.get())
r_N = r_N_gpu.get()
c_M = c_M_gpu.get()
prob_NM *= r_N
prob_NM *= c_M.T
return prob_NM[:n, :m].astype(np.float64), r_N, c_M
|
Add GPU version of balance_matrix3.
|
Add GPU version of balance_matrix3.
|
Python
|
bsd-2-clause
|
wjchen84/lfd,rll/lfd,wjchen84/lfd,rll/lfd,rll/lfd,wjchen84/lfd
|
Add GPU version of balance_matrix3.
|
from __future__ import division
import numpy as np
import pycuda.gpuarray as gpuarray
import pycuda.autoinit
import scikits.cuda.linalg as culinalg
culinalg.init()
def balance_matrix3_gpu(prob_nm, max_iter, row_priors, col_priors, outlierfrac, r_N = None):
n,m = prob_nm.shape
prob_NM = np.empty((n+1, m+1), 'f4')
prob_NM[:n, :m] = prob_nm
prob_NM[:n, m] = row_priors
prob_NM[n, :m] = col_priors
prob_NM[n, m] = np.sqrt(np.sum(row_priors)*np.sum(col_priors)) # this can `be weighted bigger weight = fewer outliers
a_N = np.ones((n+1),'f4')
a_N[n] = m*outlierfrac
b_M = np.ones((m+1),'f4')
b_M[m] = n*outlierfrac
if r_N is None: r_N = np.ones((n+1,1),'f4')
prob_NM_gpu = gpuarray.empty((n+1,m+1), dtype=np.float32)
prob_MN_gpu = gpuarray.empty((m+1,n+1), dtype=np.float32)
r_N_gpu = gpuarray.empty((n+1,1), dtype=np.float32)
c_M_gpu = gpuarray.empty((m+1,1), dtype=np.float32)
prob_NM_gpu.set_async(prob_NM)
prob_MN_gpu.set_async(prob_NM.T.copy())
r_N_gpu.set_async(r_N)
for _ in xrange(max_iter):
culinalg.dot(prob_NM_gpu, r_N_gpu, transa='T', out=c_M_gpu)
c_M_gpu.set_async(b_M[:,None]/c_M_gpu.get())
culinalg.dot(prob_MN_gpu, c_M_gpu, transa='T', out=r_N_gpu)
r_N_gpu.set_async(a_N[:,None]/r_N_gpu.get())
r_N = r_N_gpu.get()
c_M = c_M_gpu.get()
prob_NM *= r_N
prob_NM *= c_M.T
return prob_NM[:n, :m].astype(np.float64), r_N, c_M
|
<commit_before><commit_msg>Add GPU version of balance_matrix3.<commit_after>
|
from __future__ import division
import numpy as np
import pycuda.gpuarray as gpuarray
import pycuda.autoinit
import scikits.cuda.linalg as culinalg
culinalg.init()
def balance_matrix3_gpu(prob_nm, max_iter, row_priors, col_priors, outlierfrac, r_N = None):
n,m = prob_nm.shape
prob_NM = np.empty((n+1, m+1), 'f4')
prob_NM[:n, :m] = prob_nm
prob_NM[:n, m] = row_priors
prob_NM[n, :m] = col_priors
prob_NM[n, m] = np.sqrt(np.sum(row_priors)*np.sum(col_priors)) # this can `be weighted bigger weight = fewer outliers
a_N = np.ones((n+1),'f4')
a_N[n] = m*outlierfrac
b_M = np.ones((m+1),'f4')
b_M[m] = n*outlierfrac
if r_N is None: r_N = np.ones((n+1,1),'f4')
prob_NM_gpu = gpuarray.empty((n+1,m+1), dtype=np.float32)
prob_MN_gpu = gpuarray.empty((m+1,n+1), dtype=np.float32)
r_N_gpu = gpuarray.empty((n+1,1), dtype=np.float32)
c_M_gpu = gpuarray.empty((m+1,1), dtype=np.float32)
prob_NM_gpu.set_async(prob_NM)
prob_MN_gpu.set_async(prob_NM.T.copy())
r_N_gpu.set_async(r_N)
for _ in xrange(max_iter):
culinalg.dot(prob_NM_gpu, r_N_gpu, transa='T', out=c_M_gpu)
c_M_gpu.set_async(b_M[:,None]/c_M_gpu.get())
culinalg.dot(prob_MN_gpu, c_M_gpu, transa='T', out=r_N_gpu)
r_N_gpu.set_async(a_N[:,None]/r_N_gpu.get())
r_N = r_N_gpu.get()
c_M = c_M_gpu.get()
prob_NM *= r_N
prob_NM *= c_M.T
return prob_NM[:n, :m].astype(np.float64), r_N, c_M
|
Add GPU version of balance_matrix3.from __future__ import division
import numpy as np
import pycuda.gpuarray as gpuarray
import pycuda.autoinit
import scikits.cuda.linalg as culinalg
culinalg.init()
def balance_matrix3_gpu(prob_nm, max_iter, row_priors, col_priors, outlierfrac, r_N = None):
n,m = prob_nm.shape
prob_NM = np.empty((n+1, m+1), 'f4')
prob_NM[:n, :m] = prob_nm
prob_NM[:n, m] = row_priors
prob_NM[n, :m] = col_priors
prob_NM[n, m] = np.sqrt(np.sum(row_priors)*np.sum(col_priors)) # this can `be weighted bigger weight = fewer outliers
a_N = np.ones((n+1),'f4')
a_N[n] = m*outlierfrac
b_M = np.ones((m+1),'f4')
b_M[m] = n*outlierfrac
if r_N is None: r_N = np.ones((n+1,1),'f4')
prob_NM_gpu = gpuarray.empty((n+1,m+1), dtype=np.float32)
prob_MN_gpu = gpuarray.empty((m+1,n+1), dtype=np.float32)
r_N_gpu = gpuarray.empty((n+1,1), dtype=np.float32)
c_M_gpu = gpuarray.empty((m+1,1), dtype=np.float32)
prob_NM_gpu.set_async(prob_NM)
prob_MN_gpu.set_async(prob_NM.T.copy())
r_N_gpu.set_async(r_N)
for _ in xrange(max_iter):
culinalg.dot(prob_NM_gpu, r_N_gpu, transa='T', out=c_M_gpu)
c_M_gpu.set_async(b_M[:,None]/c_M_gpu.get())
culinalg.dot(prob_MN_gpu, c_M_gpu, transa='T', out=r_N_gpu)
r_N_gpu.set_async(a_N[:,None]/r_N_gpu.get())
r_N = r_N_gpu.get()
c_M = c_M_gpu.get()
prob_NM *= r_N
prob_NM *= c_M.T
return prob_NM[:n, :m].astype(np.float64), r_N, c_M
|
<commit_before><commit_msg>Add GPU version of balance_matrix3.<commit_after>from __future__ import division
import numpy as np
import pycuda.gpuarray as gpuarray
import pycuda.autoinit
import scikits.cuda.linalg as culinalg
culinalg.init()
def balance_matrix3_gpu(prob_nm, max_iter, row_priors, col_priors, outlierfrac, r_N = None):
n,m = prob_nm.shape
prob_NM = np.empty((n+1, m+1), 'f4')
prob_NM[:n, :m] = prob_nm
prob_NM[:n, m] = row_priors
prob_NM[n, :m] = col_priors
prob_NM[n, m] = np.sqrt(np.sum(row_priors)*np.sum(col_priors)) # this can `be weighted bigger weight = fewer outliers
a_N = np.ones((n+1),'f4')
a_N[n] = m*outlierfrac
b_M = np.ones((m+1),'f4')
b_M[m] = n*outlierfrac
if r_N is None: r_N = np.ones((n+1,1),'f4')
prob_NM_gpu = gpuarray.empty((n+1,m+1), dtype=np.float32)
prob_MN_gpu = gpuarray.empty((m+1,n+1), dtype=np.float32)
r_N_gpu = gpuarray.empty((n+1,1), dtype=np.float32)
c_M_gpu = gpuarray.empty((m+1,1), dtype=np.float32)
prob_NM_gpu.set_async(prob_NM)
prob_MN_gpu.set_async(prob_NM.T.copy())
r_N_gpu.set_async(r_N)
for _ in xrange(max_iter):
culinalg.dot(prob_NM_gpu, r_N_gpu, transa='T', out=c_M_gpu)
c_M_gpu.set_async(b_M[:,None]/c_M_gpu.get())
culinalg.dot(prob_MN_gpu, c_M_gpu, transa='T', out=r_N_gpu)
r_N_gpu.set_async(a_N[:,None]/r_N_gpu.get())
r_N = r_N_gpu.get()
c_M = c_M_gpu.get()
prob_NM *= r_N
prob_NM *= c_M.T
return prob_NM[:n, :m].astype(np.float64), r_N, c_M
|
|
9090c5243814b867257ee843eef6d46c44817eff
|
alexandria/models/record.py
|
alexandria/models/record.py
|
import datetime
from pyramid.compat import (
text_type,
binary_type
)
from .meta import (
Base,
DBSession,
)
from sqlalchemy import (
Column,
DateTime,
Enum,
ForeignKey,
Integer,
String,
Table,
Text,
and_,
text,
)
from sqlalchemy.orm import (
contains_eager,
noload,
relationship,
)
from sqlalchemy.ext.hybrid import (
hybrid_property,
Comparator,
)
from sqlalchemy.dialects.postgresql import UUID
from .idna import IdnaComparator
from .types import value_to_type
class Record(Base):
__table__ = Table('records', Base.metadata,
Column('id', UUID(as_uuid=True), server_default=text('uuid_generate_v4()'), primary_key=True, unique=True),
Column('domain_id', ForeignKey('domains.id', onupdate='CASCADE', ondelete='CASCADE'), nullable=False, index=True),
Column('resource', String(256), index=True, unique=True),
Column('ttl', Integer, server_default=text('3600')),
Column('class', String(10), default=u"IN"),
Column('type', Enum(*[str(x) for x in value_to_type.keys()], name='resource_type')),
Column('record', Text),
Column('priority', Integer, server_default=text('0'), nullable=True),
Column('created', DateTime, server_default=text('current_timestamp')),
Column('updated', DateTime, server_default=text('current_timestamp'), server_onupdate=text('current_timestamp')),
)
_resource = __table__.c.resource
# cls.resource
@hybrid_property
def resource(self):
if isinstance(self, Domain):
return self._resource.encode('ascii').decode('idna')
return self._resource
@resource.setter
def resource(self, value):
if isinstance(value, text_type):
self._resource = value.encode('idna').decode('utf-8').lower()
elif isinstance(value, binary_type):
self._resource = value
else:
raise ValueError("Unable to store value as requested.")
@resource.comparator
def resource(cls):
return IdnaComparator(cls._resource)
|
Add the model for Records
|
Add the model for Records
|
Python
|
isc
|
cdunklau/alexandria,cdunklau/alexandria,cdunklau/alexandria,bertjwregeer/alexandria,bertjwregeer/alexandria
|
Add the model for Records
|
import datetime
from pyramid.compat import (
text_type,
binary_type
)
from .meta import (
Base,
DBSession,
)
from sqlalchemy import (
Column,
DateTime,
Enum,
ForeignKey,
Integer,
String,
Table,
Text,
and_,
text,
)
from sqlalchemy.orm import (
contains_eager,
noload,
relationship,
)
from sqlalchemy.ext.hybrid import (
hybrid_property,
Comparator,
)
from sqlalchemy.dialects.postgresql import UUID
from .idna import IdnaComparator
from .types import value_to_type
class Record(Base):
__table__ = Table('records', Base.metadata,
Column('id', UUID(as_uuid=True), server_default=text('uuid_generate_v4()'), primary_key=True, unique=True),
Column('domain_id', ForeignKey('domains.id', onupdate='CASCADE', ondelete='CASCADE'), nullable=False, index=True),
Column('resource', String(256), index=True, unique=True),
Column('ttl', Integer, server_default=text('3600')),
Column('class', String(10), default=u"IN"),
Column('type', Enum(*[str(x) for x in value_to_type.keys()], name='resource_type')),
Column('record', Text),
Column('priority', Integer, server_default=text('0'), nullable=True),
Column('created', DateTime, server_default=text('current_timestamp')),
Column('updated', DateTime, server_default=text('current_timestamp'), server_onupdate=text('current_timestamp')),
)
_resource = __table__.c.resource
# cls.resource
@hybrid_property
def resource(self):
if isinstance(self, Domain):
return self._resource.encode('ascii').decode('idna')
return self._resource
@resource.setter
def resource(self, value):
if isinstance(value, text_type):
self._resource = value.encode('idna').decode('utf-8').lower()
elif isinstance(value, binary_type):
self._resource = value
else:
raise ValueError("Unable to store value as requested.")
@resource.comparator
def resource(cls):
return IdnaComparator(cls._resource)
|
<commit_before><commit_msg>Add the model for Records<commit_after>
|
import datetime
from pyramid.compat import (
text_type,
binary_type
)
from .meta import (
Base,
DBSession,
)
from sqlalchemy import (
Column,
DateTime,
Enum,
ForeignKey,
Integer,
String,
Table,
Text,
and_,
text,
)
from sqlalchemy.orm import (
contains_eager,
noload,
relationship,
)
from sqlalchemy.ext.hybrid import (
hybrid_property,
Comparator,
)
from sqlalchemy.dialects.postgresql import UUID
from .idna import IdnaComparator
from .types import value_to_type
class Record(Base):
__table__ = Table('records', Base.metadata,
Column('id', UUID(as_uuid=True), server_default=text('uuid_generate_v4()'), primary_key=True, unique=True),
Column('domain_id', ForeignKey('domains.id', onupdate='CASCADE', ondelete='CASCADE'), nullable=False, index=True),
Column('resource', String(256), index=True, unique=True),
Column('ttl', Integer, server_default=text('3600')),
Column('class', String(10), default=u"IN"),
Column('type', Enum(*[str(x) for x in value_to_type.keys()], name='resource_type')),
Column('record', Text),
Column('priority', Integer, server_default=text('0'), nullable=True),
Column('created', DateTime, server_default=text('current_timestamp')),
Column('updated', DateTime, server_default=text('current_timestamp'), server_onupdate=text('current_timestamp')),
)
_resource = __table__.c.resource
# cls.resource
@hybrid_property
def resource(self):
if isinstance(self, Domain):
return self._resource.encode('ascii').decode('idna')
return self._resource
@resource.setter
def resource(self, value):
if isinstance(value, text_type):
self._resource = value.encode('idna').decode('utf-8').lower()
elif isinstance(value, binary_type):
self._resource = value
else:
raise ValueError("Unable to store value as requested.")
@resource.comparator
def resource(cls):
return IdnaComparator(cls._resource)
|
Add the model for Recordsimport datetime
from pyramid.compat import (
text_type,
binary_type
)
from .meta import (
Base,
DBSession,
)
from sqlalchemy import (
Column,
DateTime,
Enum,
ForeignKey,
Integer,
String,
Table,
Text,
and_,
text,
)
from sqlalchemy.orm import (
contains_eager,
noload,
relationship,
)
from sqlalchemy.ext.hybrid import (
hybrid_property,
Comparator,
)
from sqlalchemy.dialects.postgresql import UUID
from .idna import IdnaComparator
from .types import value_to_type
class Record(Base):
__table__ = Table('records', Base.metadata,
Column('id', UUID(as_uuid=True), server_default=text('uuid_generate_v4()'), primary_key=True, unique=True),
Column('domain_id', ForeignKey('domains.id', onupdate='CASCADE', ondelete='CASCADE'), nullable=False, index=True),
Column('resource', String(256), index=True, unique=True),
Column('ttl', Integer, server_default=text('3600')),
Column('class', String(10), default=u"IN"),
Column('type', Enum(*[str(x) for x in value_to_type.keys()], name='resource_type')),
Column('record', Text),
Column('priority', Integer, server_default=text('0'), nullable=True),
Column('created', DateTime, server_default=text('current_timestamp')),
Column('updated', DateTime, server_default=text('current_timestamp'), server_onupdate=text('current_timestamp')),
)
_resource = __table__.c.resource
# cls.resource
@hybrid_property
def resource(self):
if isinstance(self, Domain):
return self._resource.encode('ascii').decode('idna')
return self._resource
@resource.setter
def resource(self, value):
if isinstance(value, text_type):
self._resource = value.encode('idna').decode('utf-8').lower()
elif isinstance(value, binary_type):
self._resource = value
else:
raise ValueError("Unable to store value as requested.")
@resource.comparator
def resource(cls):
return IdnaComparator(cls._resource)
|
<commit_before><commit_msg>Add the model for Records<commit_after>import datetime
from pyramid.compat import (
text_type,
binary_type
)
from .meta import (
Base,
DBSession,
)
from sqlalchemy import (
Column,
DateTime,
Enum,
ForeignKey,
Integer,
String,
Table,
Text,
and_,
text,
)
from sqlalchemy.orm import (
contains_eager,
noload,
relationship,
)
from sqlalchemy.ext.hybrid import (
hybrid_property,
Comparator,
)
from sqlalchemy.dialects.postgresql import UUID
from .idna import IdnaComparator
from .types import value_to_type
class Record(Base):
__table__ = Table('records', Base.metadata,
Column('id', UUID(as_uuid=True), server_default=text('uuid_generate_v4()'), primary_key=True, unique=True),
Column('domain_id', ForeignKey('domains.id', onupdate='CASCADE', ondelete='CASCADE'), nullable=False, index=True),
Column('resource', String(256), index=True, unique=True),
Column('ttl', Integer, server_default=text('3600')),
Column('class', String(10), default=u"IN"),
Column('type', Enum(*[str(x) for x in value_to_type.keys()], name='resource_type')),
Column('record', Text),
Column('priority', Integer, server_default=text('0'), nullable=True),
Column('created', DateTime, server_default=text('current_timestamp')),
Column('updated', DateTime, server_default=text('current_timestamp'), server_onupdate=text('current_timestamp')),
)
_resource = __table__.c.resource
# cls.resource
@hybrid_property
def resource(self):
if isinstance(self, Domain):
return self._resource.encode('ascii').decode('idna')
return self._resource
@resource.setter
def resource(self, value):
if isinstance(value, text_type):
self._resource = value.encode('idna').decode('utf-8').lower()
elif isinstance(value, binary_type):
self._resource = value
else:
raise ValueError("Unable to store value as requested.")
@resource.comparator
def resource(cls):
return IdnaComparator(cls._resource)
|
|
a4ab336cdfd8c65d627d39e90f5bb9a63b5572b6
|
test/hoomd_script/test_option.py
|
test/hoomd_script/test_option.py
|
# -*- coding: iso-8859-1 -*-
# Maintainer: joaander
from hoomd_script import *
import unittest
import os
# unit tests for init.create_random
class init_create_random_tests (unittest.TestCase):
def setUp(self):
print
# tests that mode settings work properly
def test_mode(self):
option.set_mode('gpu');
self.assert_(globals.options.mode == 'gpu');
option.set_mode('cpu');
self.assert_(globals.options.mode == 'cpu');
option.set_mode(None);
self.assert_(globals.options.mode is None);
self.assertRaises(RuntimeError, option.set_mode, 'foo');
def test_gpu(self):
option.set_gpu(1);
self.assert_(globals.options.gpu == 1);
self.assert_(globals.options.mode == 'gpu');
option.set_gpu(None);
self.assert_(globals.options.gpu is None);
self.assertRaises(RuntimeError, option.set_gpu, 'foo');
def test_ncpu(self):
option.set_ncpu(1);
self.assert_(globals.options.ncpu == 1);
self.assert_(globals.options.mode == 'cpu');
option.set_ncpu(None);
self.assert_(globals.options.ncpu is None);
self.assertRaises(RuntimeError, option.set_ncpu, 'foo');
def test_gpu_error_checking(self):
option.set_gpu_error_checking(False);
self.assert_(globals.options.gpu_error_checking == False);
option.set_gpu_error_checking(True);
self.assert_(globals.options.gpu_error_checking == True);
def test_min_cpu(self):
option.set_min_cpu(False);
self.assert_(globals.options.min_cpu == False);
option.set_min_cpu(True);
self.assert_(globals.options.min_cpu == True);
def test_ignore_display(self):
option.set_ignore_display(False);
self.assert_(globals.options.ignore_display == False);
option.set_ignore_display(True);
self.assert_(globals.options.ignore_display == True);
def tearDown(self):
pass;
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
|
Add script unit test for option
|
Add script unit test for option
resf #192
|
Python
|
bsd-3-clause
|
joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue
|
Add script unit test for option
resf #192
|
# -*- coding: iso-8859-1 -*-
# Maintainer: joaander
from hoomd_script import *
import unittest
import os
# unit tests for init.create_random
class init_create_random_tests (unittest.TestCase):
def setUp(self):
print
# tests that mode settings work properly
def test_mode(self):
option.set_mode('gpu');
self.assert_(globals.options.mode == 'gpu');
option.set_mode('cpu');
self.assert_(globals.options.mode == 'cpu');
option.set_mode(None);
self.assert_(globals.options.mode is None);
self.assertRaises(RuntimeError, option.set_mode, 'foo');
def test_gpu(self):
option.set_gpu(1);
self.assert_(globals.options.gpu == 1);
self.assert_(globals.options.mode == 'gpu');
option.set_gpu(None);
self.assert_(globals.options.gpu is None);
self.assertRaises(RuntimeError, option.set_gpu, 'foo');
def test_ncpu(self):
option.set_ncpu(1);
self.assert_(globals.options.ncpu == 1);
self.assert_(globals.options.mode == 'cpu');
option.set_ncpu(None);
self.assert_(globals.options.ncpu is None);
self.assertRaises(RuntimeError, option.set_ncpu, 'foo');
def test_gpu_error_checking(self):
option.set_gpu_error_checking(False);
self.assert_(globals.options.gpu_error_checking == False);
option.set_gpu_error_checking(True);
self.assert_(globals.options.gpu_error_checking == True);
def test_min_cpu(self):
option.set_min_cpu(False);
self.assert_(globals.options.min_cpu == False);
option.set_min_cpu(True);
self.assert_(globals.options.min_cpu == True);
def test_ignore_display(self):
option.set_ignore_display(False);
self.assert_(globals.options.ignore_display == False);
option.set_ignore_display(True);
self.assert_(globals.options.ignore_display == True);
def tearDown(self):
pass;
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
|
<commit_before><commit_msg>Add script unit test for option
resf #192<commit_after>
|
# -*- coding: iso-8859-1 -*-
# Maintainer: joaander
from hoomd_script import *
import unittest
import os
# unit tests for init.create_random
class init_create_random_tests (unittest.TestCase):
def setUp(self):
print
# tests that mode settings work properly
def test_mode(self):
option.set_mode('gpu');
self.assert_(globals.options.mode == 'gpu');
option.set_mode('cpu');
self.assert_(globals.options.mode == 'cpu');
option.set_mode(None);
self.assert_(globals.options.mode is None);
self.assertRaises(RuntimeError, option.set_mode, 'foo');
def test_gpu(self):
option.set_gpu(1);
self.assert_(globals.options.gpu == 1);
self.assert_(globals.options.mode == 'gpu');
option.set_gpu(None);
self.assert_(globals.options.gpu is None);
self.assertRaises(RuntimeError, option.set_gpu, 'foo');
def test_ncpu(self):
option.set_ncpu(1);
self.assert_(globals.options.ncpu == 1);
self.assert_(globals.options.mode == 'cpu');
option.set_ncpu(None);
self.assert_(globals.options.ncpu is None);
self.assertRaises(RuntimeError, option.set_ncpu, 'foo');
def test_gpu_error_checking(self):
option.set_gpu_error_checking(False);
self.assert_(globals.options.gpu_error_checking == False);
option.set_gpu_error_checking(True);
self.assert_(globals.options.gpu_error_checking == True);
def test_min_cpu(self):
option.set_min_cpu(False);
self.assert_(globals.options.min_cpu == False);
option.set_min_cpu(True);
self.assert_(globals.options.min_cpu == True);
def test_ignore_display(self):
option.set_ignore_display(False);
self.assert_(globals.options.ignore_display == False);
option.set_ignore_display(True);
self.assert_(globals.options.ignore_display == True);
def tearDown(self):
pass;
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
|
Add script unit test for option
resf #192# -*- coding: iso-8859-1 -*-
# Maintainer: joaander
from hoomd_script import *
import unittest
import os
# unit tests for init.create_random
class init_create_random_tests (unittest.TestCase):
def setUp(self):
print
# tests that mode settings work properly
def test_mode(self):
option.set_mode('gpu');
self.assert_(globals.options.mode == 'gpu');
option.set_mode('cpu');
self.assert_(globals.options.mode == 'cpu');
option.set_mode(None);
self.assert_(globals.options.mode is None);
self.assertRaises(RuntimeError, option.set_mode, 'foo');
def test_gpu(self):
option.set_gpu(1);
self.assert_(globals.options.gpu == 1);
self.assert_(globals.options.mode == 'gpu');
option.set_gpu(None);
self.assert_(globals.options.gpu is None);
self.assertRaises(RuntimeError, option.set_gpu, 'foo');
def test_ncpu(self):
option.set_ncpu(1);
self.assert_(globals.options.ncpu == 1);
self.assert_(globals.options.mode == 'cpu');
option.set_ncpu(None);
self.assert_(globals.options.ncpu is None);
self.assertRaises(RuntimeError, option.set_ncpu, 'foo');
def test_gpu_error_checking(self):
option.set_gpu_error_checking(False);
self.assert_(globals.options.gpu_error_checking == False);
option.set_gpu_error_checking(True);
self.assert_(globals.options.gpu_error_checking == True);
def test_min_cpu(self):
option.set_min_cpu(False);
self.assert_(globals.options.min_cpu == False);
option.set_min_cpu(True);
self.assert_(globals.options.min_cpu == True);
def test_ignore_display(self):
option.set_ignore_display(False);
self.assert_(globals.options.ignore_display == False);
option.set_ignore_display(True);
self.assert_(globals.options.ignore_display == True);
def tearDown(self):
pass;
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
|
<commit_before><commit_msg>Add script unit test for option
resf #192<commit_after># -*- coding: iso-8859-1 -*-
# Maintainer: joaander
from hoomd_script import *
import unittest
import os
# unit tests for init.create_random
class init_create_random_tests (unittest.TestCase):
def setUp(self):
print
# tests that mode settings work properly
def test_mode(self):
option.set_mode('gpu');
self.assert_(globals.options.mode == 'gpu');
option.set_mode('cpu');
self.assert_(globals.options.mode == 'cpu');
option.set_mode(None);
self.assert_(globals.options.mode is None);
self.assertRaises(RuntimeError, option.set_mode, 'foo');
def test_gpu(self):
option.set_gpu(1);
self.assert_(globals.options.gpu == 1);
self.assert_(globals.options.mode == 'gpu');
option.set_gpu(None);
self.assert_(globals.options.gpu is None);
self.assertRaises(RuntimeError, option.set_gpu, 'foo');
def test_ncpu(self):
option.set_ncpu(1);
self.assert_(globals.options.ncpu == 1);
self.assert_(globals.options.mode == 'cpu');
option.set_ncpu(None);
self.assert_(globals.options.ncpu is None);
self.assertRaises(RuntimeError, option.set_ncpu, 'foo');
def test_gpu_error_checking(self):
option.set_gpu_error_checking(False);
self.assert_(globals.options.gpu_error_checking == False);
option.set_gpu_error_checking(True);
self.assert_(globals.options.gpu_error_checking == True);
def test_min_cpu(self):
option.set_min_cpu(False);
self.assert_(globals.options.min_cpu == False);
option.set_min_cpu(True);
self.assert_(globals.options.min_cpu == True);
def test_ignore_display(self):
option.set_ignore_display(False);
self.assert_(globals.options.ignore_display == False);
option.set_ignore_display(True);
self.assert_(globals.options.ignore_display == True);
def tearDown(self):
pass;
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
|
|
5cabd000e3a656ff372a4ba8b2865e2564eec903
|
hug/documentation.py
|
hug/documentation.py
|
from collections import OrderedDict
import hug.types
def generate(module, base_url=""):
documentation = OrderedDict()
documenation['overview'] = module.__doc__
for url, method_handler in module.HUG_API_CALLS.items():
url_doc = documenation.setdefault(url, {})
mapping = OrderedDict()
for method, handler in method_handler.items():
mapping.setdefault(handler, []).append(method.split("_")[-1].upper())
for handler, methods in mapping.items():
doc = url_doc.setdefault(",".join(methods), OrderedDict())
doc['usage'] = handler.api_function.__doc__
if handler.example:
doc['example'] = "{0}{1}".format(base_url, url)
if isinstance(handler.example, str):
doc['example'] += "?{0}".format(handler.example)
inputs = doc.setdefault('inputs', OrderedDict())
doc['outputs'] = OrderedDict(format=handler.output_format.__doc__)
api = handler.api_function
types = api.__annotations__
arguments = api.__code__.co_varnames[:api.__code__.co_argcount]
defaults = {}
for index, default in enumerate(api.__defaults__ or ()):
defaults[arguments[-(index + 1))]] = default
for argument in arguments:
if argument in ('request', 'response'):
continue
input_definition = inputs.setdefault(argument, OrderedDict())
input_definition['type'] = types.get(argument, hug.types.text).__doc__
default = defaults.get(argument, None)
if default is not None:
input_definition['default'] = default
|
Create initial api doc generator
|
Create initial api doc generator
|
Python
|
mit
|
alisaifee/hug,alisaifee/hug,giserh/hug,jean/hug,gbn972/hug,MuhammadAlkarouri/hug,STANAPO/hug,gbn972/hug,janusnic/hug,yasoob/hug,MuhammadAlkarouri/hug,janusnic/hug,shaunstanislaus/hug,origingod/hug,MuhammadAlkarouri/hug,timothycrosley/hug,STANAPO/hug,origingod/hug,jean/hug,philiptzou/hug,yasoob/hug,timothycrosley/hug,shaunstanislaus/hug,philiptzou/hug,timothycrosley/hug,giserh/hug
|
Create initial api doc generator
|
from collections import OrderedDict
import hug.types
def generate(module, base_url=""):
documentation = OrderedDict()
documenation['overview'] = module.__doc__
for url, method_handler in module.HUG_API_CALLS.items():
url_doc = documenation.setdefault(url, {})
mapping = OrderedDict()
for method, handler in method_handler.items():
mapping.setdefault(handler, []).append(method.split("_")[-1].upper())
for handler, methods in mapping.items():
doc = url_doc.setdefault(",".join(methods), OrderedDict())
doc['usage'] = handler.api_function.__doc__
if handler.example:
doc['example'] = "{0}{1}".format(base_url, url)
if isinstance(handler.example, str):
doc['example'] += "?{0}".format(handler.example)
inputs = doc.setdefault('inputs', OrderedDict())
doc['outputs'] = OrderedDict(format=handler.output_format.__doc__)
api = handler.api_function
types = api.__annotations__
arguments = api.__code__.co_varnames[:api.__code__.co_argcount]
defaults = {}
for index, default in enumerate(api.__defaults__ or ()):
defaults[arguments[-(index + 1))]] = default
for argument in arguments:
if argument in ('request', 'response'):
continue
input_definition = inputs.setdefault(argument, OrderedDict())
input_definition['type'] = types.get(argument, hug.types.text).__doc__
default = defaults.get(argument, None)
if default is not None:
input_definition['default'] = default
|
<commit_before><commit_msg>Create initial api doc generator<commit_after>
|
from collections import OrderedDict
import hug.types
def generate(module, base_url=""):
documentation = OrderedDict()
documenation['overview'] = module.__doc__
for url, method_handler in module.HUG_API_CALLS.items():
url_doc = documenation.setdefault(url, {})
mapping = OrderedDict()
for method, handler in method_handler.items():
mapping.setdefault(handler, []).append(method.split("_")[-1].upper())
for handler, methods in mapping.items():
doc = url_doc.setdefault(",".join(methods), OrderedDict())
doc['usage'] = handler.api_function.__doc__
if handler.example:
doc['example'] = "{0}{1}".format(base_url, url)
if isinstance(handler.example, str):
doc['example'] += "?{0}".format(handler.example)
inputs = doc.setdefault('inputs', OrderedDict())
doc['outputs'] = OrderedDict(format=handler.output_format.__doc__)
api = handler.api_function
types = api.__annotations__
arguments = api.__code__.co_varnames[:api.__code__.co_argcount]
defaults = {}
for index, default in enumerate(api.__defaults__ or ()):
defaults[arguments[-(index + 1))]] = default
for argument in arguments:
if argument in ('request', 'response'):
continue
input_definition = inputs.setdefault(argument, OrderedDict())
input_definition['type'] = types.get(argument, hug.types.text).__doc__
default = defaults.get(argument, None)
if default is not None:
input_definition['default'] = default
|
Create initial api doc generatorfrom collections import OrderedDict
import hug.types
def generate(module, base_url=""):
documentation = OrderedDict()
documenation['overview'] = module.__doc__
for url, method_handler in module.HUG_API_CALLS.items():
url_doc = documenation.setdefault(url, {})
mapping = OrderedDict()
for method, handler in method_handler.items():
mapping.setdefault(handler, []).append(method.split("_")[-1].upper())
for handler, methods in mapping.items():
doc = url_doc.setdefault(",".join(methods), OrderedDict())
doc['usage'] = handler.api_function.__doc__
if handler.example:
doc['example'] = "{0}{1}".format(base_url, url)
if isinstance(handler.example, str):
doc['example'] += "?{0}".format(handler.example)
inputs = doc.setdefault('inputs', OrderedDict())
doc['outputs'] = OrderedDict(format=handler.output_format.__doc__)
api = handler.api_function
types = api.__annotations__
arguments = api.__code__.co_varnames[:api.__code__.co_argcount]
defaults = {}
for index, default in enumerate(api.__defaults__ or ()):
defaults[arguments[-(index + 1))]] = default
for argument in arguments:
if argument in ('request', 'response'):
continue
input_definition = inputs.setdefault(argument, OrderedDict())
input_definition['type'] = types.get(argument, hug.types.text).__doc__
default = defaults.get(argument, None)
if default is not None:
input_definition['default'] = default
|
<commit_before><commit_msg>Create initial api doc generator<commit_after>from collections import OrderedDict
import hug.types
def generate(module, base_url=""):
documentation = OrderedDict()
documenation['overview'] = module.__doc__
for url, method_handler in module.HUG_API_CALLS.items():
url_doc = documenation.setdefault(url, {})
mapping = OrderedDict()
for method, handler in method_handler.items():
mapping.setdefault(handler, []).append(method.split("_")[-1].upper())
for handler, methods in mapping.items():
doc = url_doc.setdefault(",".join(methods), OrderedDict())
doc['usage'] = handler.api_function.__doc__
if handler.example:
doc['example'] = "{0}{1}".format(base_url, url)
if isinstance(handler.example, str):
doc['example'] += "?{0}".format(handler.example)
inputs = doc.setdefault('inputs', OrderedDict())
doc['outputs'] = OrderedDict(format=handler.output_format.__doc__)
api = handler.api_function
types = api.__annotations__
arguments = api.__code__.co_varnames[:api.__code__.co_argcount]
defaults = {}
for index, default in enumerate(api.__defaults__ or ()):
defaults[arguments[-(index + 1))]] = default
for argument in arguments:
if argument in ('request', 'response'):
continue
input_definition = inputs.setdefault(argument, OrderedDict())
input_definition['type'] = types.get(argument, hug.types.text).__doc__
default = defaults.get(argument, None)
if default is not None:
input_definition['default'] = default
|
|
8a1e4164b1dbcf8b17de638e915ed0fb4302707c
|
greenfan/management/commands/list-nodes.py
|
greenfan/management/commands/list-nodes.py
|
#
# Copyright 2012 Cisco Systems, Inc.
#
# Author: Soren Hansen <sorhanse@cisco.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
import urlparse
from subprocess import Popen
from time import sleep, time
from django.core.management.base import BaseCommand
from django.template import Context, Template
from fabric.api import env as fabric_env
from fabric.api import run, local, sudo, put
from greenfan import utils
from greenfan.models import Configuration, TestSpecification, Server
def run_cmd(args):
proc = Popen(args)
return proc.communicate()
format_string = '%-35s %-20s %-20s %s'
def header():
print format_string % ('fqdn', 'IP', 'MAC', '')
def describe_node(node, extra_info=''):
print format_string % (node.fqdn(), node.ip, node.mac, extra_info)
class Command(BaseCommand):
def handle(self, job_id, **options):
job = TestSpecification.objects.get(id=job_id)
config = Configuration.get()
print "Participating nodes:"
header()
for node in job.nodes():
extra_info = []
if node == job.build_node():
extra_info += ['build node']
if node == job.control_node():
extra_info += ['controller node']
describe_node(node, ', '.join(extra_info))
|
Add a command to list participating nodes
|
Add a command to list participating nodes
|
Python
|
apache-2.0
|
sorenh/python-django-greenfan,sorenh/python-django-greenfan
|
Add a command to list participating nodes
|
#
# Copyright 2012 Cisco Systems, Inc.
#
# Author: Soren Hansen <sorhanse@cisco.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
import urlparse
from subprocess import Popen
from time import sleep, time
from django.core.management.base import BaseCommand
from django.template import Context, Template
from fabric.api import env as fabric_env
from fabric.api import run, local, sudo, put
from greenfan import utils
from greenfan.models import Configuration, TestSpecification, Server
def run_cmd(args):
proc = Popen(args)
return proc.communicate()
format_string = '%-35s %-20s %-20s %s'
def header():
print format_string % ('fqdn', 'IP', 'MAC', '')
def describe_node(node, extra_info=''):
print format_string % (node.fqdn(), node.ip, node.mac, extra_info)
class Command(BaseCommand):
def handle(self, job_id, **options):
job = TestSpecification.objects.get(id=job_id)
config = Configuration.get()
print "Participating nodes:"
header()
for node in job.nodes():
extra_info = []
if node == job.build_node():
extra_info += ['build node']
if node == job.control_node():
extra_info += ['controller node']
describe_node(node, ', '.join(extra_info))
|
<commit_before><commit_msg>Add a command to list participating nodes<commit_after>
|
#
# Copyright 2012 Cisco Systems, Inc.
#
# Author: Soren Hansen <sorhanse@cisco.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
import urlparse
from subprocess import Popen
from time import sleep, time
from django.core.management.base import BaseCommand
from django.template import Context, Template
from fabric.api import env as fabric_env
from fabric.api import run, local, sudo, put
from greenfan import utils
from greenfan.models import Configuration, TestSpecification, Server
def run_cmd(args):
proc = Popen(args)
return proc.communicate()
format_string = '%-35s %-20s %-20s %s'
def header():
print format_string % ('fqdn', 'IP', 'MAC', '')
def describe_node(node, extra_info=''):
print format_string % (node.fqdn(), node.ip, node.mac, extra_info)
class Command(BaseCommand):
def handle(self, job_id, **options):
job = TestSpecification.objects.get(id=job_id)
config = Configuration.get()
print "Participating nodes:"
header()
for node in job.nodes():
extra_info = []
if node == job.build_node():
extra_info += ['build node']
if node == job.control_node():
extra_info += ['controller node']
describe_node(node, ', '.join(extra_info))
|
Add a command to list participating nodes#
# Copyright 2012 Cisco Systems, Inc.
#
# Author: Soren Hansen <sorhanse@cisco.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
import urlparse
from subprocess import Popen
from time import sleep, time
from django.core.management.base import BaseCommand
from django.template import Context, Template
from fabric.api import env as fabric_env
from fabric.api import run, local, sudo, put
from greenfan import utils
from greenfan.models import Configuration, TestSpecification, Server
def run_cmd(args):
proc = Popen(args)
return proc.communicate()
format_string = '%-35s %-20s %-20s %s'
def header():
print format_string % ('fqdn', 'IP', 'MAC', '')
def describe_node(node, extra_info=''):
print format_string % (node.fqdn(), node.ip, node.mac, extra_info)
class Command(BaseCommand):
def handle(self, job_id, **options):
job = TestSpecification.objects.get(id=job_id)
config = Configuration.get()
print "Participating nodes:"
header()
for node in job.nodes():
extra_info = []
if node == job.build_node():
extra_info += ['build node']
if node == job.control_node():
extra_info += ['controller node']
describe_node(node, ', '.join(extra_info))
|
<commit_before><commit_msg>Add a command to list participating nodes<commit_after>#
# Copyright 2012 Cisco Systems, Inc.
#
# Author: Soren Hansen <sorhanse@cisco.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
import urlparse
from subprocess import Popen
from time import sleep, time
from django.core.management.base import BaseCommand
from django.template import Context, Template
from fabric.api import env as fabric_env
from fabric.api import run, local, sudo, put
from greenfan import utils
from greenfan.models import Configuration, TestSpecification, Server
def run_cmd(args):
proc = Popen(args)
return proc.communicate()
format_string = '%-35s %-20s %-20s %s'
def header():
print format_string % ('fqdn', 'IP', 'MAC', '')
def describe_node(node, extra_info=''):
print format_string % (node.fqdn(), node.ip, node.mac, extra_info)
class Command(BaseCommand):
def handle(self, job_id, **options):
job = TestSpecification.objects.get(id=job_id)
config = Configuration.get()
print "Participating nodes:"
header()
for node in job.nodes():
extra_info = []
if node == job.build_node():
extra_info += ['build node']
if node == job.control_node():
extra_info += ['controller node']
describe_node(node, ', '.join(extra_info))
|
|
eb8a12371f97600e46614d199d08191ba367b825
|
OpenPNM/Geometry/models/throat_perimeter.py
|
OpenPNM/Geometry/models/throat_perimeter.py
|
r"""
===============================================================================
Submodule -- throat_perimeter
===============================================================================
"""
def voronoi(geometry,
throat_perimeter='throat.perimeter',
**kwargs):
r"""
As the throat perimeter is stored on the network, lookup the indices pertaining to the geom and retrieve
"""
network = geometry._net
tindex = network.throats(geometry.name)
value = network[throat_perimeter][tindex]
return value
|
Create throat perimeter submodule to use with extrude
|
Create throat perimeter submodule to use with extrude
Signed-off-by: Tom Tranter <5f16e196b3c33003e3a85b835da4e658fd57e2ad@leeds.ac.uk>
|
Python
|
mit
|
amdouglas/OpenPNM,amdouglas/OpenPNM,stadelmanma/OpenPNM,PMEAL/OpenPNM,TomTranter/OpenPNM
|
Create throat perimeter submodule to use with extrude
Signed-off-by: Tom Tranter <5f16e196b3c33003e3a85b835da4e658fd57e2ad@leeds.ac.uk>
|
r"""
===============================================================================
Submodule -- throat_perimeter
===============================================================================
"""
def voronoi(geometry,
throat_perimeter='throat.perimeter',
**kwargs):
r"""
As the throat perimeter is stored on the network, lookup the indices pertaining to the geom and retrieve
"""
network = geometry._net
tindex = network.throats(geometry.name)
value = network[throat_perimeter][tindex]
return value
|
<commit_before><commit_msg>Create throat perimeter submodule to use with extrude
Signed-off-by: Tom Tranter <5f16e196b3c33003e3a85b835da4e658fd57e2ad@leeds.ac.uk><commit_after>
|
r"""
===============================================================================
Submodule -- throat_perimeter
===============================================================================
"""
def voronoi(geometry,
throat_perimeter='throat.perimeter',
**kwargs):
r"""
As the throat perimeter is stored on the network, lookup the indices pertaining to the geom and retrieve
"""
network = geometry._net
tindex = network.throats(geometry.name)
value = network[throat_perimeter][tindex]
return value
|
Create throat perimeter submodule to use with extrude
Signed-off-by: Tom Tranter <5f16e196b3c33003e3a85b835da4e658fd57e2ad@leeds.ac.uk>r"""
===============================================================================
Submodule -- throat_perimeter
===============================================================================
"""
def voronoi(geometry,
throat_perimeter='throat.perimeter',
**kwargs):
r"""
As the throat perimeter is stored on the network, lookup the indices pertaining to the geom and retrieve
"""
network = geometry._net
tindex = network.throats(geometry.name)
value = network[throat_perimeter][tindex]
return value
|
<commit_before><commit_msg>Create throat perimeter submodule to use with extrude
Signed-off-by: Tom Tranter <5f16e196b3c33003e3a85b835da4e658fd57e2ad@leeds.ac.uk><commit_after>r"""
===============================================================================
Submodule -- throat_perimeter
===============================================================================
"""
def voronoi(geometry,
throat_perimeter='throat.perimeter',
**kwargs):
r"""
As the throat perimeter is stored on the network, lookup the indices pertaining to the geom and retrieve
"""
network = geometry._net
tindex = network.throats(geometry.name)
value = network[throat_perimeter][tindex]
return value
|
|
24df17e1c1265d81879a8f7e7a494cf4703dae69
|
agent_test.py
|
agent_test.py
|
import time
import msvcrt
import zmq
from agent_pb2 import *
zctx = zmq.Context()
zsck_ctrl = zctx.socket(zmq.PUSH)
zsck_status = zctx.socket(zmq.SUB)
zsck_status.setsockopt(zmq.SUBSCRIBE, '')
zsck_ctrl.connect('tcp://127.0.0.1:17267')
zsck_status.connect('tcp://127.0.0.1:17268')
j = 0
while True:
cmd = msvcrt.getch() if msvcrt.kbhit() else 0
msg = FfmpegControl()
if cmd == 'r':
msg.opcode = FfmpegControl.RECORD
zsck_ctrl.send(msg.SerializeToString())
elif cmd == 't':
msg.opcode = FfmpegControl.IDLE
zsck_ctrl.send(msg.SerializeToString())
elif cmd == 'p':
msg.opcode = FfmpegControl.PAUSE
zsck_ctrl.send(msg.SerializeToString())
elif cmd == 'P':
msg.opcode = FfmpegControl.UNPAUSE
zsck_ctrl.send(msg.SerializeToString())
elif cmd == 'X':
msg.opcode = FfmpegControl.SHUTDOWN
zsck_ctrl.send(msg.SerializeToString())
try:
zmsg = zsck_status.recv(zmq.NOBLOCK)
status = FfmpegStatus()
status.ParseFromString(zmsg)
print j, status
j = j + 1
except zmq.ZMQError as e:
if e.errno != zmq.EAGAIN:
raise
|
Add simple manual test for remote control
|
Add simple manual test for remote control
|
Python
|
mit
|
dkrikun/ffmpeg-rcd
|
Add simple manual test for remote control
|
import time
import msvcrt
import zmq
from agent_pb2 import *
zctx = zmq.Context()
zsck_ctrl = zctx.socket(zmq.PUSH)
zsck_status = zctx.socket(zmq.SUB)
zsck_status.setsockopt(zmq.SUBSCRIBE, '')
zsck_ctrl.connect('tcp://127.0.0.1:17267')
zsck_status.connect('tcp://127.0.0.1:17268')
j = 0
while True:
cmd = msvcrt.getch() if msvcrt.kbhit() else 0
msg = FfmpegControl()
if cmd == 'r':
msg.opcode = FfmpegControl.RECORD
zsck_ctrl.send(msg.SerializeToString())
elif cmd == 't':
msg.opcode = FfmpegControl.IDLE
zsck_ctrl.send(msg.SerializeToString())
elif cmd == 'p':
msg.opcode = FfmpegControl.PAUSE
zsck_ctrl.send(msg.SerializeToString())
elif cmd == 'P':
msg.opcode = FfmpegControl.UNPAUSE
zsck_ctrl.send(msg.SerializeToString())
elif cmd == 'X':
msg.opcode = FfmpegControl.SHUTDOWN
zsck_ctrl.send(msg.SerializeToString())
try:
zmsg = zsck_status.recv(zmq.NOBLOCK)
status = FfmpegStatus()
status.ParseFromString(zmsg)
print j, status
j = j + 1
except zmq.ZMQError as e:
if e.errno != zmq.EAGAIN:
raise
|
<commit_before><commit_msg>Add simple manual test for remote control<commit_after>
|
import time
import msvcrt
import zmq
from agent_pb2 import *
zctx = zmq.Context()
zsck_ctrl = zctx.socket(zmq.PUSH)
zsck_status = zctx.socket(zmq.SUB)
zsck_status.setsockopt(zmq.SUBSCRIBE, '')
zsck_ctrl.connect('tcp://127.0.0.1:17267')
zsck_status.connect('tcp://127.0.0.1:17268')
j = 0
while True:
cmd = msvcrt.getch() if msvcrt.kbhit() else 0
msg = FfmpegControl()
if cmd == 'r':
msg.opcode = FfmpegControl.RECORD
zsck_ctrl.send(msg.SerializeToString())
elif cmd == 't':
msg.opcode = FfmpegControl.IDLE
zsck_ctrl.send(msg.SerializeToString())
elif cmd == 'p':
msg.opcode = FfmpegControl.PAUSE
zsck_ctrl.send(msg.SerializeToString())
elif cmd == 'P':
msg.opcode = FfmpegControl.UNPAUSE
zsck_ctrl.send(msg.SerializeToString())
elif cmd == 'X':
msg.opcode = FfmpegControl.SHUTDOWN
zsck_ctrl.send(msg.SerializeToString())
try:
zmsg = zsck_status.recv(zmq.NOBLOCK)
status = FfmpegStatus()
status.ParseFromString(zmsg)
print j, status
j = j + 1
except zmq.ZMQError as e:
if e.errno != zmq.EAGAIN:
raise
|
Add simple manual test for remote controlimport time
import msvcrt
import zmq
from agent_pb2 import *
zctx = zmq.Context()
zsck_ctrl = zctx.socket(zmq.PUSH)
zsck_status = zctx.socket(zmq.SUB)
zsck_status.setsockopt(zmq.SUBSCRIBE, '')
zsck_ctrl.connect('tcp://127.0.0.1:17267')
zsck_status.connect('tcp://127.0.0.1:17268')
j = 0
while True:
cmd = msvcrt.getch() if msvcrt.kbhit() else 0
msg = FfmpegControl()
if cmd == 'r':
msg.opcode = FfmpegControl.RECORD
zsck_ctrl.send(msg.SerializeToString())
elif cmd == 't':
msg.opcode = FfmpegControl.IDLE
zsck_ctrl.send(msg.SerializeToString())
elif cmd == 'p':
msg.opcode = FfmpegControl.PAUSE
zsck_ctrl.send(msg.SerializeToString())
elif cmd == 'P':
msg.opcode = FfmpegControl.UNPAUSE
zsck_ctrl.send(msg.SerializeToString())
elif cmd == 'X':
msg.opcode = FfmpegControl.SHUTDOWN
zsck_ctrl.send(msg.SerializeToString())
try:
zmsg = zsck_status.recv(zmq.NOBLOCK)
status = FfmpegStatus()
status.ParseFromString(zmsg)
print j, status
j = j + 1
except zmq.ZMQError as e:
if e.errno != zmq.EAGAIN:
raise
|
<commit_before><commit_msg>Add simple manual test for remote control<commit_after>import time
import msvcrt
import zmq
from agent_pb2 import *
zctx = zmq.Context()
zsck_ctrl = zctx.socket(zmq.PUSH)
zsck_status = zctx.socket(zmq.SUB)
zsck_status.setsockopt(zmq.SUBSCRIBE, '')
zsck_ctrl.connect('tcp://127.0.0.1:17267')
zsck_status.connect('tcp://127.0.0.1:17268')
j = 0
while True:
cmd = msvcrt.getch() if msvcrt.kbhit() else 0
msg = FfmpegControl()
if cmd == 'r':
msg.opcode = FfmpegControl.RECORD
zsck_ctrl.send(msg.SerializeToString())
elif cmd == 't':
msg.opcode = FfmpegControl.IDLE
zsck_ctrl.send(msg.SerializeToString())
elif cmd == 'p':
msg.opcode = FfmpegControl.PAUSE
zsck_ctrl.send(msg.SerializeToString())
elif cmd == 'P':
msg.opcode = FfmpegControl.UNPAUSE
zsck_ctrl.send(msg.SerializeToString())
elif cmd == 'X':
msg.opcode = FfmpegControl.SHUTDOWN
zsck_ctrl.send(msg.SerializeToString())
try:
zmsg = zsck_status.recv(zmq.NOBLOCK)
status = FfmpegStatus()
status.ParseFromString(zmsg)
print j, status
j = j + 1
except zmq.ZMQError as e:
if e.errno != zmq.EAGAIN:
raise
|
|
30268d35e12224a8deee33fff78f7beabb4b4b79
|
phi/math/_fit.py
|
phi/math/_fit.py
|
from ._shape import DimFilter, instance, shape
from ._tensors import Tensor
from ._ops import mean
def fit_line_2d(x: Tensor, y: Tensor, point_dim: DimFilter = instance, weights: Tensor = 1.):
"""
Fits a line of the form *slope · x + offset* to pass through the data points defined by their coordinates `x` and `y`.
Args:
x: X coordinate of the points.
y: Y coordinate of the points.
point_dim: Dimension listing the points the line should pass through. This dimension will be reduced in the operation.
By default, all instance dimensions
weights: (Optional) Tensor assigning a weight to each point in `x` and `y` to determine the relative influence of that point in the overall fit.
Returns:
slope: Line slope in units y/x as `Tensor`
offset: Line value for x=0.
"""
assert shape(x).only(point_dim) or shape(y).only(point_dim), f"Either x or y need to have a dimension corresponding to point_dim but got x: {shape(x)}, y: {shape(y)}"
if not shape(weights): # unweighted fit
mean_x = mean(x, point_dim)
x_rel = x - mean_x
var_x = mean(x_rel ** 2, point_dim)
slope = mean(x_rel * y, point_dim) / var_x
offset = mean(y, point_dim) - slope * mean_x
else: # weighted fit
mean_w = mean(weights, point_dim)
mean_x = mean(weights * x, point_dim) / mean_w
x_rel = x - mean_x
var_wx = mean(weights * x_rel ** 2, point_dim)
slope = mean(weights * x_rel * y, point_dim) / var_wx
offset = mean(weights * y, point_dim) / mean_w - slope * mean_x
return slope, offset
|
Add fit_line_2d (private for now)
|
[math] Add fit_line_2d (private for now)
|
Python
|
mit
|
tum-pbs/PhiFlow,tum-pbs/PhiFlow
|
[math] Add fit_line_2d (private for now)
|
from ._shape import DimFilter, instance, shape
from ._tensors import Tensor
from ._ops import mean
def fit_line_2d(x: Tensor, y: Tensor, point_dim: DimFilter = instance, weights: Tensor = 1.):
"""
Fits a line of the form *slope · x + offset* to pass through the data points defined by their coordinates `x` and `y`.
Args:
x: X coordinate of the points.
y: Y coordinate of the points.
point_dim: Dimension listing the points the line should pass through. This dimension will be reduced in the operation.
By default, all instance dimensions
weights: (Optional) Tensor assigning a weight to each point in `x` and `y` to determine the relative influence of that point in the overall fit.
Returns:
slope: Line slope in units y/x as `Tensor`
offset: Line value for x=0.
"""
assert shape(x).only(point_dim) or shape(y).only(point_dim), f"Either x or y need to have a dimension corresponding to point_dim but got x: {shape(x)}, y: {shape(y)}"
if not shape(weights): # unweighted fit
mean_x = mean(x, point_dim)
x_rel = x - mean_x
var_x = mean(x_rel ** 2, point_dim)
slope = mean(x_rel * y, point_dim) / var_x
offset = mean(y, point_dim) - slope * mean_x
else: # weighted fit
mean_w = mean(weights, point_dim)
mean_x = mean(weights * x, point_dim) / mean_w
x_rel = x - mean_x
var_wx = mean(weights * x_rel ** 2, point_dim)
slope = mean(weights * x_rel * y, point_dim) / var_wx
offset = mean(weights * y, point_dim) / mean_w - slope * mean_x
return slope, offset
|
<commit_before><commit_msg>[math] Add fit_line_2d (private for now)<commit_after>
|
from ._shape import DimFilter, instance, shape
from ._tensors import Tensor
from ._ops import mean
def fit_line_2d(x: Tensor, y: Tensor, point_dim: DimFilter = instance, weights: Tensor = 1.):
"""
Fits a line of the form *slope · x + offset* to pass through the data points defined by their coordinates `x` and `y`.
Args:
x: X coordinate of the points.
y: Y coordinate of the points.
point_dim: Dimension listing the points the line should pass through. This dimension will be reduced in the operation.
By default, all instance dimensions
weights: (Optional) Tensor assigning a weight to each point in `x` and `y` to determine the relative influence of that point in the overall fit.
Returns:
slope: Line slope in units y/x as `Tensor`
offset: Line value for x=0.
"""
assert shape(x).only(point_dim) or shape(y).only(point_dim), f"Either x or y need to have a dimension corresponding to point_dim but got x: {shape(x)}, y: {shape(y)}"
if not shape(weights): # unweighted fit
mean_x = mean(x, point_dim)
x_rel = x - mean_x
var_x = mean(x_rel ** 2, point_dim)
slope = mean(x_rel * y, point_dim) / var_x
offset = mean(y, point_dim) - slope * mean_x
else: # weighted fit
mean_w = mean(weights, point_dim)
mean_x = mean(weights * x, point_dim) / mean_w
x_rel = x - mean_x
var_wx = mean(weights * x_rel ** 2, point_dim)
slope = mean(weights * x_rel * y, point_dim) / var_wx
offset = mean(weights * y, point_dim) / mean_w - slope * mean_x
return slope, offset
|
[math] Add fit_line_2d (private for now)from ._shape import DimFilter, instance, shape
from ._tensors import Tensor
from ._ops import mean
def fit_line_2d(x: Tensor, y: Tensor, point_dim: DimFilter = instance, weights: Tensor = 1.):
"""
Fits a line of the form *slope · x + offset* to pass through the data points defined by their coordinates `x` and `y`.
Args:
x: X coordinate of the points.
y: Y coordinate of the points.
point_dim: Dimension listing the points the line should pass through. This dimension will be reduced in the operation.
By default, all instance dimensions
weights: (Optional) Tensor assigning a weight to each point in `x` and `y` to determine the relative influence of that point in the overall fit.
Returns:
slope: Line slope in units y/x as `Tensor`
offset: Line value for x=0.
"""
assert shape(x).only(point_dim) or shape(y).only(point_dim), f"Either x or y need to have a dimension corresponding to point_dim but got x: {shape(x)}, y: {shape(y)}"
if not shape(weights): # unweighted fit
mean_x = mean(x, point_dim)
x_rel = x - mean_x
var_x = mean(x_rel ** 2, point_dim)
slope = mean(x_rel * y, point_dim) / var_x
offset = mean(y, point_dim) - slope * mean_x
else: # weighted fit
mean_w = mean(weights, point_dim)
mean_x = mean(weights * x, point_dim) / mean_w
x_rel = x - mean_x
var_wx = mean(weights * x_rel ** 2, point_dim)
slope = mean(weights * x_rel * y, point_dim) / var_wx
offset = mean(weights * y, point_dim) / mean_w - slope * mean_x
return slope, offset
|
<commit_before><commit_msg>[math] Add fit_line_2d (private for now)<commit_after>from ._shape import DimFilter, instance, shape
from ._tensors import Tensor
from ._ops import mean
def fit_line_2d(x: Tensor, y: Tensor, point_dim: DimFilter = instance, weights: Tensor = 1.):
"""
Fits a line of the form *slope · x + offset* to pass through the data points defined by their coordinates `x` and `y`.
Args:
x: X coordinate of the points.
y: Y coordinate of the points.
point_dim: Dimension listing the points the line should pass through. This dimension will be reduced in the operation.
By default, all instance dimensions
weights: (Optional) Tensor assigning a weight to each point in `x` and `y` to determine the relative influence of that point in the overall fit.
Returns:
slope: Line slope in units y/x as `Tensor`
offset: Line value for x=0.
"""
assert shape(x).only(point_dim) or shape(y).only(point_dim), f"Either x or y need to have a dimension corresponding to point_dim but got x: {shape(x)}, y: {shape(y)}"
if not shape(weights): # unweighted fit
mean_x = mean(x, point_dim)
x_rel = x - mean_x
var_x = mean(x_rel ** 2, point_dim)
slope = mean(x_rel * y, point_dim) / var_x
offset = mean(y, point_dim) - slope * mean_x
else: # weighted fit
mean_w = mean(weights, point_dim)
mean_x = mean(weights * x, point_dim) / mean_w
x_rel = x - mean_x
var_wx = mean(weights * x_rel ** 2, point_dim)
slope = mean(weights * x_rel * y, point_dim) / var_wx
offset = mean(weights * y, point_dim) / mean_w - slope * mean_x
return slope, offset
|
|
1a91b07ed22781f597a73fb6cf5c3391c5b691c1
|
tests/run/pure_parallel.py
|
tests/run/pure_parallel.py
|
# mode: run
# tag: openmp, pure3.6
import cython
from cython.parallel import prange, parallel
def prange_regression(n: cython.int, data: list):
"""
>>> prange_regression(10, list(range(1, 4)))
19
"""
s: cython.int = 0
i: cython.int
d: cython.int[3] = data
for i in prange(n, num_threads=3, nogil=True):
s += d[i % 3]
return s
def prange_with_gil(n: cython.int, x):
"""
>>> sum(3*i for i in range(10))
135
>>> prange_with_gil(10, 3)
135
"""
i: cython.int
s: cython.int = 0
for i in prange(n, num_threads=3, nogil=True):
with cython.gil:
s += x * i
return s
@cython.cfunc
def use_nogil(x, i: cython.int) -> cython.int:
cx: cython.int = x
with cython.nogil:
return cx * i
def prange_with_gil_call_nogil(n: cython.int, x):
"""
>>> sum(3*i for i in range(10))
135
>>> prange_with_gil(10, 3)
135
"""
i: cython.int
s: cython.int = 0
for i in prange(n, num_threads=3, nogil=True):
with cython.gil:
s += use_nogil(x, i)
return s
|
Add a pure Python OpenMP test to make sure everything works from pure Python mode.
|
Add a pure Python OpenMP test to make sure everything works from pure Python mode.
|
Python
|
apache-2.0
|
cython/cython,da-woods/cython,scoder/cython,da-woods/cython,da-woods/cython,cython/cython,scoder/cython,da-woods/cython,cython/cython,cython/cython,scoder/cython,scoder/cython
|
Add a pure Python OpenMP test to make sure everything works from pure Python mode.
|
# mode: run
# tag: openmp, pure3.6
import cython
from cython.parallel import prange, parallel
def prange_regression(n: cython.int, data: list):
"""
>>> prange_regression(10, list(range(1, 4)))
19
"""
s: cython.int = 0
i: cython.int
d: cython.int[3] = data
for i in prange(n, num_threads=3, nogil=True):
s += d[i % 3]
return s
def prange_with_gil(n: cython.int, x):
"""
>>> sum(3*i for i in range(10))
135
>>> prange_with_gil(10, 3)
135
"""
i: cython.int
s: cython.int = 0
for i in prange(n, num_threads=3, nogil=True):
with cython.gil:
s += x * i
return s
@cython.cfunc
def use_nogil(x, i: cython.int) -> cython.int:
cx: cython.int = x
with cython.nogil:
return cx * i
def prange_with_gil_call_nogil(n: cython.int, x):
"""
>>> sum(3*i for i in range(10))
135
>>> prange_with_gil(10, 3)
135
"""
i: cython.int
s: cython.int = 0
for i in prange(n, num_threads=3, nogil=True):
with cython.gil:
s += use_nogil(x, i)
return s
|
<commit_before><commit_msg>Add a pure Python OpenMP test to make sure everything works from pure Python mode.<commit_after>
|
# mode: run
# tag: openmp, pure3.6
import cython
from cython.parallel import prange, parallel
def prange_regression(n: cython.int, data: list):
"""
>>> prange_regression(10, list(range(1, 4)))
19
"""
s: cython.int = 0
i: cython.int
d: cython.int[3] = data
for i in prange(n, num_threads=3, nogil=True):
s += d[i % 3]
return s
def prange_with_gil(n: cython.int, x):
"""
>>> sum(3*i for i in range(10))
135
>>> prange_with_gil(10, 3)
135
"""
i: cython.int
s: cython.int = 0
for i in prange(n, num_threads=3, nogil=True):
with cython.gil:
s += x * i
return s
@cython.cfunc
def use_nogil(x, i: cython.int) -> cython.int:
cx: cython.int = x
with cython.nogil:
return cx * i
def prange_with_gil_call_nogil(n: cython.int, x):
"""
>>> sum(3*i for i in range(10))
135
>>> prange_with_gil(10, 3)
135
"""
i: cython.int
s: cython.int = 0
for i in prange(n, num_threads=3, nogil=True):
with cython.gil:
s += use_nogil(x, i)
return s
|
Add a pure Python OpenMP test to make sure everything works from pure Python mode.# mode: run
# tag: openmp, pure3.6
import cython
from cython.parallel import prange, parallel
def prange_regression(n: cython.int, data: list):
"""
>>> prange_regression(10, list(range(1, 4)))
19
"""
s: cython.int = 0
i: cython.int
d: cython.int[3] = data
for i in prange(n, num_threads=3, nogil=True):
s += d[i % 3]
return s
def prange_with_gil(n: cython.int, x):
"""
>>> sum(3*i for i in range(10))
135
>>> prange_with_gil(10, 3)
135
"""
i: cython.int
s: cython.int = 0
for i in prange(n, num_threads=3, nogil=True):
with cython.gil:
s += x * i
return s
@cython.cfunc
def use_nogil(x, i: cython.int) -> cython.int:
cx: cython.int = x
with cython.nogil:
return cx * i
def prange_with_gil_call_nogil(n: cython.int, x):
"""
>>> sum(3*i for i in range(10))
135
>>> prange_with_gil(10, 3)
135
"""
i: cython.int
s: cython.int = 0
for i in prange(n, num_threads=3, nogil=True):
with cython.gil:
s += use_nogil(x, i)
return s
|
<commit_before><commit_msg>Add a pure Python OpenMP test to make sure everything works from pure Python mode.<commit_after># mode: run
# tag: openmp, pure3.6
import cython
from cython.parallel import prange, parallel
def prange_regression(n: cython.int, data: list):
"""
>>> prange_regression(10, list(range(1, 4)))
19
"""
s: cython.int = 0
i: cython.int
d: cython.int[3] = data
for i in prange(n, num_threads=3, nogil=True):
s += d[i % 3]
return s
def prange_with_gil(n: cython.int, x):
"""
>>> sum(3*i for i in range(10))
135
>>> prange_with_gil(10, 3)
135
"""
i: cython.int
s: cython.int = 0
for i in prange(n, num_threads=3, nogil=True):
with cython.gil:
s += x * i
return s
@cython.cfunc
def use_nogil(x, i: cython.int) -> cython.int:
cx: cython.int = x
with cython.nogil:
return cx * i
def prange_with_gil_call_nogil(n: cython.int, x):
"""
>>> sum(3*i for i in range(10))
135
>>> prange_with_gil(10, 3)
135
"""
i: cython.int
s: cython.int = 0
for i in prange(n, num_threads=3, nogil=True):
with cython.gil:
s += use_nogil(x, i)
return s
|
|
05ce9df1b02b98b163cadc9aed6cde34555d116e
|
compilebot/tests/run_all.py
|
compilebot/tests/run_all.py
|
# This script runs all of the suites from each unit test file.
# Run this file from the parent directory with the following command:
# python -m tests.run_all
from tests import *
import unittest
def main():
test_suites = [
test_reply.test_suite(),
test_compiler.test_suite()
]
all_tests = unittest.TestSuite(test_suites)
unittest.TextTestRunner().run(all_tests)
if __name__ == "__main__":
main()
|
Add script to run all tests
|
Add script to run all tests
|
Python
|
apache-2.0
|
renfredxh/compilebot
|
Add script to run all tests
|
# This script runs all of the suites from each unit test file.
# Run this file from the parent directory with the following command:
# python -m tests.run_all
from tests import *
import unittest
def main():
test_suites = [
test_reply.test_suite(),
test_compiler.test_suite()
]
all_tests = unittest.TestSuite(test_suites)
unittest.TextTestRunner().run(all_tests)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to run all tests<commit_after>
|
# This script runs all of the suites from each unit test file.
# Run this file from the parent directory with the following command:
# python -m tests.run_all
from tests import *
import unittest
def main():
test_suites = [
test_reply.test_suite(),
test_compiler.test_suite()
]
all_tests = unittest.TestSuite(test_suites)
unittest.TextTestRunner().run(all_tests)
if __name__ == "__main__":
main()
|
Add script to run all tests# This script runs all of the suites from each unit test file.
# Run this file from the parent directory with the following command:
# python -m tests.run_all
from tests import *
import unittest
def main():
test_suites = [
test_reply.test_suite(),
test_compiler.test_suite()
]
all_tests = unittest.TestSuite(test_suites)
unittest.TextTestRunner().run(all_tests)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to run all tests<commit_after># This script runs all of the suites from each unit test file.
# Run this file from the parent directory with the following command:
# python -m tests.run_all
from tests import *
import unittest
def main():
test_suites = [
test_reply.test_suite(),
test_compiler.test_suite()
]
all_tests = unittest.TestSuite(test_suites)
unittest.TextTestRunner().run(all_tests)
if __name__ == "__main__":
main()
|
|
d01799c33d442a8017b466db57afd15a852036a7
|
dmoj/executors/mixins.py
|
dmoj/executors/mixins.py
|
import os
class NullStdoutMixin(object):
def __init__(self, *args, **kwargs):
self._devnull = open(os.devnull, 'w')
super(NullStdoutMixin, self).__init__(*args, **kwargs)
def cleanup(self):
if hasattr(self, '_devnull'):
self._devnull.close()
super(NullStdoutMixin, self).cleanup()
def get_compile_popen_kwargs(self):
result = super(NullStdoutMixin, self).get_compile_popen_kwargs()
result['stdout'] = self.devnull
return result
|
Convert stdout redirection magic to a mixin.
|
Convert stdout redirection magic to a mixin.
|
Python
|
agpl-3.0
|
DMOJ/judge,DMOJ/judge,DMOJ/judge
|
Convert stdout redirection magic to a mixin.
|
import os
class NullStdoutMixin(object):
def __init__(self, *args, **kwargs):
self._devnull = open(os.devnull, 'w')
super(NullStdoutMixin, self).__init__(*args, **kwargs)
def cleanup(self):
if hasattr(self, '_devnull'):
self._devnull.close()
super(NullStdoutMixin, self).cleanup()
def get_compile_popen_kwargs(self):
result = super(NullStdoutMixin, self).get_compile_popen_kwargs()
result['stdout'] = self.devnull
return result
|
<commit_before><commit_msg>Convert stdout redirection magic to a mixin.<commit_after>
|
import os
class NullStdoutMixin(object):
def __init__(self, *args, **kwargs):
self._devnull = open(os.devnull, 'w')
super(NullStdoutMixin, self).__init__(*args, **kwargs)
def cleanup(self):
if hasattr(self, '_devnull'):
self._devnull.close()
super(NullStdoutMixin, self).cleanup()
def get_compile_popen_kwargs(self):
result = super(NullStdoutMixin, self).get_compile_popen_kwargs()
result['stdout'] = self.devnull
return result
|
Convert stdout redirection magic to a mixin.import os
class NullStdoutMixin(object):
def __init__(self, *args, **kwargs):
self._devnull = open(os.devnull, 'w')
super(NullStdoutMixin, self).__init__(*args, **kwargs)
def cleanup(self):
if hasattr(self, '_devnull'):
self._devnull.close()
super(NullStdoutMixin, self).cleanup()
def get_compile_popen_kwargs(self):
result = super(NullStdoutMixin, self).get_compile_popen_kwargs()
result['stdout'] = self.devnull
return result
|
<commit_before><commit_msg>Convert stdout redirection magic to a mixin.<commit_after>import os
class NullStdoutMixin(object):
def __init__(self, *args, **kwargs):
self._devnull = open(os.devnull, 'w')
super(NullStdoutMixin, self).__init__(*args, **kwargs)
def cleanup(self):
if hasattr(self, '_devnull'):
self._devnull.close()
super(NullStdoutMixin, self).cleanup()
def get_compile_popen_kwargs(self):
result = super(NullStdoutMixin, self).get_compile_popen_kwargs()
result['stdout'] = self.devnull
return result
|
|
91dcfdfa7b3bcfdabaf5e23ed8ea2c64a0ae5240
|
tests/filter/test_classbasedview_filterset.py
|
tests/filter/test_classbasedview_filterset.py
|
import pytest
from tests.apps.questions import models as question_models
from adhocracy4.filters.filters import ClassBasedViewFilterSet
from adhocracy4.filters.views import FilteredListView
class ExampleFilterSet(ClassBasedViewFilterSet):
class Meta:
model = question_models.Question
fields = ['text']
@pytest.fixture
def question_list_view():
class DummyView(FilteredListView):
model = question_models.Question
filter_set = ExampleFilterSet
return DummyView
@pytest.mark.django_db
def test_class_based_filterset(rf):
class ViewPlaceHolder:
pass
view = ViewPlaceHolder
request = rf.get('/questions')
filterset = ExampleFilterSet(request.GET, view=view)
assert filterset.view == view
assert filterset.filters['text'].view == view
def test_integration_into_filtered_listview(rf, question_list_view):
request = rf.get('/')
view = question_list_view.as_view()
response = view(request)
view_instance = response.context_data['view']
assert view_instance.filter().view == view_instance
|
Add test for classbased view filters
|
Add test for classbased view filters
|
Python
|
agpl-3.0
|
liqd/adhocracy4,liqd/adhocracy4,liqd/adhocracy4,liqd/adhocracy4
|
Add test for classbased view filters
|
import pytest
from tests.apps.questions import models as question_models
from adhocracy4.filters.filters import ClassBasedViewFilterSet
from adhocracy4.filters.views import FilteredListView
class ExampleFilterSet(ClassBasedViewFilterSet):
class Meta:
model = question_models.Question
fields = ['text']
@pytest.fixture
def question_list_view():
class DummyView(FilteredListView):
model = question_models.Question
filter_set = ExampleFilterSet
return DummyView
@pytest.mark.django_db
def test_class_based_filterset(rf):
class ViewPlaceHolder:
pass
view = ViewPlaceHolder
request = rf.get('/questions')
filterset = ExampleFilterSet(request.GET, view=view)
assert filterset.view == view
assert filterset.filters['text'].view == view
def test_integration_into_filtered_listview(rf, question_list_view):
request = rf.get('/')
view = question_list_view.as_view()
response = view(request)
view_instance = response.context_data['view']
assert view_instance.filter().view == view_instance
|
<commit_before><commit_msg>Add test for classbased view filters<commit_after>
|
import pytest
from tests.apps.questions import models as question_models
from adhocracy4.filters.filters import ClassBasedViewFilterSet
from adhocracy4.filters.views import FilteredListView
class ExampleFilterSet(ClassBasedViewFilterSet):
class Meta:
model = question_models.Question
fields = ['text']
@pytest.fixture
def question_list_view():
class DummyView(FilteredListView):
model = question_models.Question
filter_set = ExampleFilterSet
return DummyView
@pytest.mark.django_db
def test_class_based_filterset(rf):
class ViewPlaceHolder:
pass
view = ViewPlaceHolder
request = rf.get('/questions')
filterset = ExampleFilterSet(request.GET, view=view)
assert filterset.view == view
assert filterset.filters['text'].view == view
def test_integration_into_filtered_listview(rf, question_list_view):
request = rf.get('/')
view = question_list_view.as_view()
response = view(request)
view_instance = response.context_data['view']
assert view_instance.filter().view == view_instance
|
Add test for classbased view filtersimport pytest
from tests.apps.questions import models as question_models
from adhocracy4.filters.filters import ClassBasedViewFilterSet
from adhocracy4.filters.views import FilteredListView
class ExampleFilterSet(ClassBasedViewFilterSet):
class Meta:
model = question_models.Question
fields = ['text']
@pytest.fixture
def question_list_view():
class DummyView(FilteredListView):
model = question_models.Question
filter_set = ExampleFilterSet
return DummyView
@pytest.mark.django_db
def test_class_based_filterset(rf):
class ViewPlaceHolder:
pass
view = ViewPlaceHolder
request = rf.get('/questions')
filterset = ExampleFilterSet(request.GET, view=view)
assert filterset.view == view
assert filterset.filters['text'].view == view
def test_integration_into_filtered_listview(rf, question_list_view):
request = rf.get('/')
view = question_list_view.as_view()
response = view(request)
view_instance = response.context_data['view']
assert view_instance.filter().view == view_instance
|
<commit_before><commit_msg>Add test for classbased view filters<commit_after>import pytest
from tests.apps.questions import models as question_models
from adhocracy4.filters.filters import ClassBasedViewFilterSet
from adhocracy4.filters.views import FilteredListView
class ExampleFilterSet(ClassBasedViewFilterSet):
class Meta:
model = question_models.Question
fields = ['text']
@pytest.fixture
def question_list_view():
class DummyView(FilteredListView):
model = question_models.Question
filter_set = ExampleFilterSet
return DummyView
@pytest.mark.django_db
def test_class_based_filterset(rf):
class ViewPlaceHolder:
pass
view = ViewPlaceHolder
request = rf.get('/questions')
filterset = ExampleFilterSet(request.GET, view=view)
assert filterset.view == view
assert filterset.filters['text'].view == view
def test_integration_into_filtered_listview(rf, question_list_view):
request = rf.get('/')
view = question_list_view.as_view()
response = view(request)
view_instance = response.context_data['view']
assert view_instance.filter().view == view_instance
|
|
f4521d01fcb7a4f9f192ea932d103675e02a5ad9
|
tests/integration/sts/openflow_buffer_test.py
|
tests/integration/sts/openflow_buffer_test.py
|
#!/usr/bin/env python
#
# Copyright 2011-2013 Colin Scott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os
import time
sys.path.append(os.path.dirname(__file__) + "/../../..")
from config.experiment_config_lib import ControllerConfig
from sts.topology import MeshTopology
from sts.simulation_state import SimulationConfig
from sts.control_flow import RecordingSyncCallback
class OpenflowBufferTest(unittest.TestCase):
def basic_test(self):
start_cmd = ('''./pox.py --verbose '''
'''openflow.discovery forwarding.l2_multi '''
'''sts.util.socket_mux.pox_monkeypatcher '''
'''openflow.of_01 --address=__address__ --port=__port__''')
controllers = [ControllerConfig(start_cmd, cwd="pox")]
topology_class = MeshTopology
topology_params = "num_switches=2"
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
multiplex_sockets=True)
simulation = simulation_config.bootstrap(RecordingSyncCallback(None))
simulation.set_record_only()
simulation.connect_to_controllers()
time.sleep(1)
observed_events = simulation.unset_record_only()
print "Observed events: %s" % str(observed_events)
self.assertTrue(observed_events != [])
if __name__ == '__main__':
unittest.main()
|
Add test for record_only mode
|
Add test for record_only mode
|
Python
|
apache-2.0
|
ucb-sts/sts,ucb-sts/sts,jmiserez/sts,jmiserez/sts
|
Add test for record_only mode
|
#!/usr/bin/env python
#
# Copyright 2011-2013 Colin Scott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os
import time
sys.path.append(os.path.dirname(__file__) + "/../../..")
from config.experiment_config_lib import ControllerConfig
from sts.topology import MeshTopology
from sts.simulation_state import SimulationConfig
from sts.control_flow import RecordingSyncCallback
class OpenflowBufferTest(unittest.TestCase):
def basic_test(self):
start_cmd = ('''./pox.py --verbose '''
'''openflow.discovery forwarding.l2_multi '''
'''sts.util.socket_mux.pox_monkeypatcher '''
'''openflow.of_01 --address=__address__ --port=__port__''')
controllers = [ControllerConfig(start_cmd, cwd="pox")]
topology_class = MeshTopology
topology_params = "num_switches=2"
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
multiplex_sockets=True)
simulation = simulation_config.bootstrap(RecordingSyncCallback(None))
simulation.set_record_only()
simulation.connect_to_controllers()
time.sleep(1)
observed_events = simulation.unset_record_only()
print "Observed events: %s" % str(observed_events)
self.assertTrue(observed_events != [])
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for record_only mode<commit_after>
|
#!/usr/bin/env python
#
# Copyright 2011-2013 Colin Scott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os
import time
sys.path.append(os.path.dirname(__file__) + "/../../..")
from config.experiment_config_lib import ControllerConfig
from sts.topology import MeshTopology
from sts.simulation_state import SimulationConfig
from sts.control_flow import RecordingSyncCallback
class OpenflowBufferTest(unittest.TestCase):
def basic_test(self):
start_cmd = ('''./pox.py --verbose '''
'''openflow.discovery forwarding.l2_multi '''
'''sts.util.socket_mux.pox_monkeypatcher '''
'''openflow.of_01 --address=__address__ --port=__port__''')
controllers = [ControllerConfig(start_cmd, cwd="pox")]
topology_class = MeshTopology
topology_params = "num_switches=2"
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
multiplex_sockets=True)
simulation = simulation_config.bootstrap(RecordingSyncCallback(None))
simulation.set_record_only()
simulation.connect_to_controllers()
time.sleep(1)
observed_events = simulation.unset_record_only()
print "Observed events: %s" % str(observed_events)
self.assertTrue(observed_events != [])
if __name__ == '__main__':
unittest.main()
|
Add test for record_only mode#!/usr/bin/env python
#
# Copyright 2011-2013 Colin Scott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os
import time
sys.path.append(os.path.dirname(__file__) + "/../../..")
from config.experiment_config_lib import ControllerConfig
from sts.topology import MeshTopology
from sts.simulation_state import SimulationConfig
from sts.control_flow import RecordingSyncCallback
class OpenflowBufferTest(unittest.TestCase):
def basic_test(self):
start_cmd = ('''./pox.py --verbose '''
'''openflow.discovery forwarding.l2_multi '''
'''sts.util.socket_mux.pox_monkeypatcher '''
'''openflow.of_01 --address=__address__ --port=__port__''')
controllers = [ControllerConfig(start_cmd, cwd="pox")]
topology_class = MeshTopology
topology_params = "num_switches=2"
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
multiplex_sockets=True)
simulation = simulation_config.bootstrap(RecordingSyncCallback(None))
simulation.set_record_only()
simulation.connect_to_controllers()
time.sleep(1)
observed_events = simulation.unset_record_only()
print "Observed events: %s" % str(observed_events)
self.assertTrue(observed_events != [])
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for record_only mode<commit_after>#!/usr/bin/env python
#
# Copyright 2011-2013 Colin Scott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os
import time
sys.path.append(os.path.dirname(__file__) + "/../../..")
from config.experiment_config_lib import ControllerConfig
from sts.topology import MeshTopology
from sts.simulation_state import SimulationConfig
from sts.control_flow import RecordingSyncCallback
class OpenflowBufferTest(unittest.TestCase):
def basic_test(self):
start_cmd = ('''./pox.py --verbose '''
'''openflow.discovery forwarding.l2_multi '''
'''sts.util.socket_mux.pox_monkeypatcher '''
'''openflow.of_01 --address=__address__ --port=__port__''')
controllers = [ControllerConfig(start_cmd, cwd="pox")]
topology_class = MeshTopology
topology_params = "num_switches=2"
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
multiplex_sockets=True)
simulation = simulation_config.bootstrap(RecordingSyncCallback(None))
simulation.set_record_only()
simulation.connect_to_controllers()
time.sleep(1)
observed_events = simulation.unset_record_only()
print "Observed events: %s" % str(observed_events)
self.assertTrue(observed_events != [])
if __name__ == '__main__':
unittest.main()
|
|
9899fec1561d30a54279dd753114006f5e0e141c
|
src/functions/exercise4.py
|
src/functions/exercise4.py
|
# Define a function that print a lyrics and a function that repeat first operation 5 times.
def print_lyrics():
print "I'm a lumberjack, and I'm okay."
print "I sleep all night and I work all day."
def repl_lyrics():
for i in range(5):
print_lyrics()
def main():
print_lyrics()
repl_lyrics()
exit(0)
main()
|
Define a function that print a lyrics and a function that repeat first operation 5 times.
|
Define a function that print a lyrics and a function that repeat first operation 5 times.
|
Python
|
mit
|
let42/python-course
|
Define a function that print a lyrics and a function that repeat first operation 5 times.
|
# Define a function that print a lyrics and a function that repeat first operation 5 times.
def print_lyrics():
print "I'm a lumberjack, and I'm okay."
print "I sleep all night and I work all day."
def repl_lyrics():
for i in range(5):
print_lyrics()
def main():
print_lyrics()
repl_lyrics()
exit(0)
main()
|
<commit_before><commit_msg>Define a function that print a lyrics and a function that repeat first operation 5 times.<commit_after>
|
# Define a function that print a lyrics and a function that repeat first operation 5 times.
def print_lyrics():
print "I'm a lumberjack, and I'm okay."
print "I sleep all night and I work all day."
def repl_lyrics():
for i in range(5):
print_lyrics()
def main():
print_lyrics()
repl_lyrics()
exit(0)
main()
|
Define a function that print a lyrics and a function that repeat first operation 5 times.# Define a function that print a lyrics and a function that repeat first operation 5 times.
def print_lyrics():
print "I'm a lumberjack, and I'm okay."
print "I sleep all night and I work all day."
def repl_lyrics():
for i in range(5):
print_lyrics()
def main():
print_lyrics()
repl_lyrics()
exit(0)
main()
|
<commit_before><commit_msg>Define a function that print a lyrics and a function that repeat first operation 5 times.<commit_after># Define a function that print a lyrics and a function that repeat first operation 5 times.
def print_lyrics():
print "I'm a lumberjack, and I'm okay."
print "I sleep all night and I work all day."
def repl_lyrics():
for i in range(5):
print_lyrics()
def main():
print_lyrics()
repl_lyrics()
exit(0)
main()
|
|
017e7cae2aac65e405edf341c00a7052b8b13fa6
|
minimal/ipython_notebook_config.py
|
minimal/ipython_notebook_config.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Configuration file for ipython-notebook.
c = get_config()
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8888
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
c.NotebookApp.tornado_settings = {
'template_path':['/srv/ga/', '/srv/ipython/IPython/html',
'/srv/ipython/IPython/html/templates']
}
|
Set up an IPython config for the minimal image
|
Set up an IPython config for the minimal image
|
Python
|
bsd-3-clause
|
mjbright/docker-demo-images,danielballan/docker-demo-images,Zsailer/docker-jupyter-teaching,odewahn/docker-demo-images,modulexcite/docker-demo-images,parente/docker-demo-images,CognitiveScale/docker-demo-images,ericdill/docker-demo-images,pelucid/docker-demo-images,willjharmer/docker-demo-images,CognitiveScale/docker-demo-images,odewahn/docker-demo-images,ericdill/docker-demo-images,mjbright/docker-demo-images,iamjakob/docker-demo-images,tanyaschlusser/docker-demo-images,parente/docker-demo-images,vanceb/docker-demo-images,mjbright/docker-demo-images,Zsailer/docker-jupyter-teaching,jupyter/docker-demo-images,CognitiveScale/docker-demo-images,rgbkrk/docker-demo-images,philipz/docker-demo-images,dietmarw/jupyter-docker-images,danielballan/docker-demo-images,vanceb/docker-demo-images,parente/docker-demo-images,CognitiveScale/docker-demo-images,dietmarw/jupyter-docker-images,iamjakob/docker-demo-images,dietmarw/jupyter-docker-images,rgbkrk/docker-demo-images,iamjakob/docker-demo-images,rgbkrk/docker-demo-images,tanyaschlusser/docker-demo-images,willjharmer/docker-demo-images,tanyaschlusser/docker-demo-images,modulexcite/docker-demo-images,Zsailer/docker-demo-images,danielballan/docker-demo-images,philipz/docker-demo-images,Zsailer/docker-demo-images,Zsailer/docker-jupyter-teaching,philipz/docker-demo-images,modulexcite/docker-demo-images,jupyter/docker-demo-images,pelucid/docker-demo-images,vanceb/docker-demo-images,odewahn/docker-demo-images,ericdill/docker-demo-images,jupyter/docker-demo-images,willjharmer/docker-demo-images,pelucid/docker-demo-images,Zsailer/docker-jupyter-teaching,Zsailer/docker-demo-images
|
Set up an IPython config for the minimal image
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Configuration file for ipython-notebook.
c = get_config()
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8888
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
c.NotebookApp.tornado_settings = {
'template_path':['/srv/ga/', '/srv/ipython/IPython/html',
'/srv/ipython/IPython/html/templates']
}
|
<commit_before><commit_msg>Set up an IPython config for the minimal image<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Configuration file for ipython-notebook.
c = get_config()
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8888
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
c.NotebookApp.tornado_settings = {
'template_path':['/srv/ga/', '/srv/ipython/IPython/html',
'/srv/ipython/IPython/html/templates']
}
|
Set up an IPython config for the minimal image#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Configuration file for ipython-notebook.
c = get_config()
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8888
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
c.NotebookApp.tornado_settings = {
'template_path':['/srv/ga/', '/srv/ipython/IPython/html',
'/srv/ipython/IPython/html/templates']
}
|
<commit_before><commit_msg>Set up an IPython config for the minimal image<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Configuration file for ipython-notebook.
c = get_config()
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8888
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
c.NotebookApp.tornado_settings = {
'template_path':['/srv/ga/', '/srv/ipython/IPython/html',
'/srv/ipython/IPython/html/templates']
}
|
|
d8f1f40964eb4ca01f66a436a3e33bd1ec357fb3
|
csunplugged/resources/views/binary_cards.py
|
csunplugged/resources/views/binary_cards.py
|
"""Module for generating Binary Cards resource."""
from PIL import Image
def resource_image(get_request, resource):
"""Create a image for Binary Cards resource.
Args:
get_request: HTTP request object
resource: Object of resource data.
Returns:
A Pillow image object.
"""
IMAGE_PATHS = [
"static/img/resources/binary-cards/binary-cards-1-dot.png",
"static/img/resources/binary-cards/binary-cards-2-dots.png",
"static/img/resources/binary-cards/binary-cards-4-dots.png",
"static/img/resources/binary-cards/binary-cards-8-dots.png",
"static/img/resources/binary-cards/binary-cards-16-dots.png",
"static/img/resources/binary-cards/binary-cards-32-dots.png",
"static/img/resources/binary-cards/binary-cards-64-dots.png",
"static/img/resources/binary-cards/binary-cards-128-dots.png",
]
images = []
for image_path in IMAGE_PATHS:
image = Image.open(image_path)
images.append(image)
return images
def subtitle(get_request, resource):
"""Return the subtitle string of the resource.
Used after the resource name in the filename, and
also on the resource image.
Args:
get_request: HTTP request object
resource: Object of resource data.
Returns:
text for subtitle (string)
"""
return resource.name
|
Add basic binary card resource
|
Add basic binary card resource
|
Python
|
mit
|
uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged
|
Add basic binary card resource
|
"""Module for generating Binary Cards resource."""
from PIL import Image
def resource_image(get_request, resource):
"""Create a image for Binary Cards resource.
Args:
get_request: HTTP request object
resource: Object of resource data.
Returns:
A Pillow image object.
"""
IMAGE_PATHS = [
"static/img/resources/binary-cards/binary-cards-1-dot.png",
"static/img/resources/binary-cards/binary-cards-2-dots.png",
"static/img/resources/binary-cards/binary-cards-4-dots.png",
"static/img/resources/binary-cards/binary-cards-8-dots.png",
"static/img/resources/binary-cards/binary-cards-16-dots.png",
"static/img/resources/binary-cards/binary-cards-32-dots.png",
"static/img/resources/binary-cards/binary-cards-64-dots.png",
"static/img/resources/binary-cards/binary-cards-128-dots.png",
]
images = []
for image_path in IMAGE_PATHS:
image = Image.open(image_path)
images.append(image)
return images
def subtitle(get_request, resource):
"""Return the subtitle string of the resource.
Used after the resource name in the filename, and
also on the resource image.
Args:
get_request: HTTP request object
resource: Object of resource data.
Returns:
text for subtitle (string)
"""
return resource.name
|
<commit_before><commit_msg>Add basic binary card resource<commit_after>
|
"""Module for generating Binary Cards resource."""
from PIL import Image
def resource_image(get_request, resource):
"""Create a image for Binary Cards resource.
Args:
get_request: HTTP request object
resource: Object of resource data.
Returns:
A Pillow image object.
"""
IMAGE_PATHS = [
"static/img/resources/binary-cards/binary-cards-1-dot.png",
"static/img/resources/binary-cards/binary-cards-2-dots.png",
"static/img/resources/binary-cards/binary-cards-4-dots.png",
"static/img/resources/binary-cards/binary-cards-8-dots.png",
"static/img/resources/binary-cards/binary-cards-16-dots.png",
"static/img/resources/binary-cards/binary-cards-32-dots.png",
"static/img/resources/binary-cards/binary-cards-64-dots.png",
"static/img/resources/binary-cards/binary-cards-128-dots.png",
]
images = []
for image_path in IMAGE_PATHS:
image = Image.open(image_path)
images.append(image)
return images
def subtitle(get_request, resource):
"""Return the subtitle string of the resource.
Used after the resource name in the filename, and
also on the resource image.
Args:
get_request: HTTP request object
resource: Object of resource data.
Returns:
text for subtitle (string)
"""
return resource.name
|
Add basic binary card resource"""Module for generating Binary Cards resource."""
from PIL import Image
def resource_image(get_request, resource):
"""Create a image for Binary Cards resource.
Args:
get_request: HTTP request object
resource: Object of resource data.
Returns:
A Pillow image object.
"""
IMAGE_PATHS = [
"static/img/resources/binary-cards/binary-cards-1-dot.png",
"static/img/resources/binary-cards/binary-cards-2-dots.png",
"static/img/resources/binary-cards/binary-cards-4-dots.png",
"static/img/resources/binary-cards/binary-cards-8-dots.png",
"static/img/resources/binary-cards/binary-cards-16-dots.png",
"static/img/resources/binary-cards/binary-cards-32-dots.png",
"static/img/resources/binary-cards/binary-cards-64-dots.png",
"static/img/resources/binary-cards/binary-cards-128-dots.png",
]
images = []
for image_path in IMAGE_PATHS:
image = Image.open(image_path)
images.append(image)
return images
def subtitle(get_request, resource):
"""Return the subtitle string of the resource.
Used after the resource name in the filename, and
also on the resource image.
Args:
get_request: HTTP request object
resource: Object of resource data.
Returns:
text for subtitle (string)
"""
return resource.name
|
<commit_before><commit_msg>Add basic binary card resource<commit_after>"""Module for generating Binary Cards resource."""
from PIL import Image
def resource_image(get_request, resource):
"""Create a image for Binary Cards resource.
Args:
get_request: HTTP request object
resource: Object of resource data.
Returns:
A Pillow image object.
"""
IMAGE_PATHS = [
"static/img/resources/binary-cards/binary-cards-1-dot.png",
"static/img/resources/binary-cards/binary-cards-2-dots.png",
"static/img/resources/binary-cards/binary-cards-4-dots.png",
"static/img/resources/binary-cards/binary-cards-8-dots.png",
"static/img/resources/binary-cards/binary-cards-16-dots.png",
"static/img/resources/binary-cards/binary-cards-32-dots.png",
"static/img/resources/binary-cards/binary-cards-64-dots.png",
"static/img/resources/binary-cards/binary-cards-128-dots.png",
]
images = []
for image_path in IMAGE_PATHS:
image = Image.open(image_path)
images.append(image)
return images
def subtitle(get_request, resource):
"""Return the subtitle string of the resource.
Used after the resource name in the filename, and
also on the resource image.
Args:
get_request: HTTP request object
resource: Object of resource data.
Returns:
text for subtitle (string)
"""
return resource.name
|
|
eb0278b8360ad677aa66a6b1d9fc4fb9bd77f553
|
localore/people/migrations/0004_auto_20160314_2320.py
|
localore/people/migrations/0004_auto_20160314_2320.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0003_auto_20160310_2235'),
]
operations = [
migrations.AlterModelOptions(
name='person',
options={'verbose_name_plural': 'people', 'verbose_name': 'person', 'ordering': ('-production', 'first_name', 'last_name')},
),
]
|
Add missed db migration for 50b7b53
|
Add missed db migration for 50b7b53
|
Python
|
mpl-2.0
|
ghostwords/localore,ghostwords/localore,ghostwords/localore
|
Add missed db migration for 50b7b53
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0003_auto_20160310_2235'),
]
operations = [
migrations.AlterModelOptions(
name='person',
options={'verbose_name_plural': 'people', 'verbose_name': 'person', 'ordering': ('-production', 'first_name', 'last_name')},
),
]
|
<commit_before><commit_msg>Add missed db migration for 50b7b53<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0003_auto_20160310_2235'),
]
operations = [
migrations.AlterModelOptions(
name='person',
options={'verbose_name_plural': 'people', 'verbose_name': 'person', 'ordering': ('-production', 'first_name', 'last_name')},
),
]
|
Add missed db migration for 50b7b53# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0003_auto_20160310_2235'),
]
operations = [
migrations.AlterModelOptions(
name='person',
options={'verbose_name_plural': 'people', 'verbose_name': 'person', 'ordering': ('-production', 'first_name', 'last_name')},
),
]
|
<commit_before><commit_msg>Add missed db migration for 50b7b53<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0003_auto_20160310_2235'),
]
operations = [
migrations.AlterModelOptions(
name='person',
options={'verbose_name_plural': 'people', 'verbose_name': 'person', 'ordering': ('-production', 'first_name', 'last_name')},
),
]
|
|
46bc6d5892fd051428ace19fea8c763a1376a2e1
|
cookiecutter/environment.py
|
cookiecutter/environment.py
|
# -*- coding: utf-8 -*-
from jinja2 import Environment, StrictUndefined
class StrictEnvironment(Environment):
"""Jinja2 environment that raises an error when it hits a variable
which is not defined in the context used to render a template.
"""
def __init__(self, **kwargs):
super(StrictEnvironment, self).__init__(
undefined=StrictUndefined,
**kwargs
)
|
Implement a strict subclass of jinja2 Environment
|
Implement a strict subclass of jinja2 Environment
|
Python
|
bsd-3-clause
|
luzfcb/cookiecutter,audreyr/cookiecutter,dajose/cookiecutter,pjbull/cookiecutter,audreyr/cookiecutter,willingc/cookiecutter,hackebrot/cookiecutter,dajose/cookiecutter,Springerle/cookiecutter,hackebrot/cookiecutter,terryjbates/cookiecutter,willingc/cookiecutter,stevepiercy/cookiecutter,pjbull/cookiecutter,Springerle/cookiecutter,luzfcb/cookiecutter,michaeljoseph/cookiecutter,terryjbates/cookiecutter,stevepiercy/cookiecutter,michaeljoseph/cookiecutter
|
Implement a strict subclass of jinja2 Environment
|
# -*- coding: utf-8 -*-
from jinja2 import Environment, StrictUndefined
class StrictEnvironment(Environment):
"""Jinja2 environment that raises an error when it hits a variable
which is not defined in the context used to render a template.
"""
def __init__(self, **kwargs):
super(StrictEnvironment, self).__init__(
undefined=StrictUndefined,
**kwargs
)
|
<commit_before><commit_msg>Implement a strict subclass of jinja2 Environment<commit_after>
|
# -*- coding: utf-8 -*-
from jinja2 import Environment, StrictUndefined
class StrictEnvironment(Environment):
"""Jinja2 environment that raises an error when it hits a variable
which is not defined in the context used to render a template.
"""
def __init__(self, **kwargs):
super(StrictEnvironment, self).__init__(
undefined=StrictUndefined,
**kwargs
)
|
Implement a strict subclass of jinja2 Environment# -*- coding: utf-8 -*-
from jinja2 import Environment, StrictUndefined
class StrictEnvironment(Environment):
"""Jinja2 environment that raises an error when it hits a variable
which is not defined in the context used to render a template.
"""
def __init__(self, **kwargs):
super(StrictEnvironment, self).__init__(
undefined=StrictUndefined,
**kwargs
)
|
<commit_before><commit_msg>Implement a strict subclass of jinja2 Environment<commit_after># -*- coding: utf-8 -*-
from jinja2 import Environment, StrictUndefined
class StrictEnvironment(Environment):
"""Jinja2 environment that raises an error when it hits a variable
which is not defined in the context used to render a template.
"""
def __init__(self, **kwargs):
super(StrictEnvironment, self).__init__(
undefined=StrictUndefined,
**kwargs
)
|
|
f344ba9bbc9d607c10bbed853f041003c5eacfad
|
scripts/ipcthreadtrace.py
|
scripts/ipcthreadtrace.py
|
"""
ipctrace.py
Write a trace of instantaneous IPC values for all cores.
First argument is either a filename, or none to write to standard output.
Second argument is the interval size in nanoseconds (default is 10000)
"""
import sys, os, sim
class IpcTrace:
def setup(self, args):
self.freq = sim.dvfs.get_frequency(0) # This script does not support DVFS
args = dict(enumerate((args or '').split(':')))
filename = args.get(0, None)
interval_ns = long(args.get(1, 10000))
if filename:
self.fd = file(os.path.join(sim.config.output_dir, filename), 'w')
self.isTerminal = False
else:
self.fd = sys.stdout
self.isTerminal = True
self.sd = sim.util.StatsDelta()
self.stats = {
'threadinstrs': [],
'threadtime': [],
}
sim.util.Every(interval_ns * sim.util.Time.NS, self.periodic, statsdelta = self.sd, roi_only = True)
def hook_thread_start(self, threadid, creator):
for thread in range(len(self.stats['threadinstrs']), threadid+1):
self.stats['threadinstrs'].append(self.sd.getter('thread', thread, 'instruction_count'))
self.stats['threadtime'].append(self.sd.getter('thread', thread, 'elapsed_time'))
def periodic(self, time, time_delta):
if self.isTerminal:
self.fd.write('[THREADIPC] ')
self.fd.write('%u' % (time / 1e6)) # Time in ns
for thread in range(sim.thread.get_nthreads()):
# Print per-thread stats
try:
cycles = self.stats['threadtime'][thread].delta * self.freq / 1e9 # convert fs to cycles
instrs = self.stats['threadinstrs'][thread].delta
ipc = instrs / (cycles or 1)
self.fd.write(' %.3f' % ipc)
except TypeError:
pass # Skip newly created threads
self.fd.write('\n')
sim.util.register(IpcTrace())
|
Add a per-thread IPC tracing script. It currently handles scheduling / oversubscription but not DVFS
|
[scripts] Add a per-thread IPC tracing script. It currently handles scheduling / oversubscription but not DVFS
|
Python
|
mit
|
abanaiyan/sniper,abanaiyan/sniper,abanaiyan/sniper,abanaiyan/sniper,abanaiyan/sniper
|
[scripts] Add a per-thread IPC tracing script. It currently handles scheduling / oversubscription but not DVFS
|
"""
ipctrace.py
Write a trace of instantaneous IPC values for all cores.
First argument is either a filename, or none to write to standard output.
Second argument is the interval size in nanoseconds (default is 10000)
"""
import sys, os, sim
class IpcTrace:
def setup(self, args):
self.freq = sim.dvfs.get_frequency(0) # This script does not support DVFS
args = dict(enumerate((args or '').split(':')))
filename = args.get(0, None)
interval_ns = long(args.get(1, 10000))
if filename:
self.fd = file(os.path.join(sim.config.output_dir, filename), 'w')
self.isTerminal = False
else:
self.fd = sys.stdout
self.isTerminal = True
self.sd = sim.util.StatsDelta()
self.stats = {
'threadinstrs': [],
'threadtime': [],
}
sim.util.Every(interval_ns * sim.util.Time.NS, self.periodic, statsdelta = self.sd, roi_only = True)
def hook_thread_start(self, threadid, creator):
for thread in range(len(self.stats['threadinstrs']), threadid+1):
self.stats['threadinstrs'].append(self.sd.getter('thread', thread, 'instruction_count'))
self.stats['threadtime'].append(self.sd.getter('thread', thread, 'elapsed_time'))
def periodic(self, time, time_delta):
if self.isTerminal:
self.fd.write('[THREADIPC] ')
self.fd.write('%u' % (time / 1e6)) # Time in ns
for thread in range(sim.thread.get_nthreads()):
# Print per-thread stats
try:
cycles = self.stats['threadtime'][thread].delta * self.freq / 1e9 # convert fs to cycles
instrs = self.stats['threadinstrs'][thread].delta
ipc = instrs / (cycles or 1)
self.fd.write(' %.3f' % ipc)
except TypeError:
pass # Skip newly created threads
self.fd.write('\n')
sim.util.register(IpcTrace())
|
<commit_before><commit_msg>[scripts] Add a per-thread IPC tracing script. It currently handles scheduling / oversubscription but not DVFS<commit_after>
|
"""
ipctrace.py
Write a trace of instantaneous IPC values for all cores.
First argument is either a filename, or none to write to standard output.
Second argument is the interval size in nanoseconds (default is 10000)
"""
import sys, os, sim
class IpcTrace:
def setup(self, args):
self.freq = sim.dvfs.get_frequency(0) # This script does not support DVFS
args = dict(enumerate((args or '').split(':')))
filename = args.get(0, None)
interval_ns = long(args.get(1, 10000))
if filename:
self.fd = file(os.path.join(sim.config.output_dir, filename), 'w')
self.isTerminal = False
else:
self.fd = sys.stdout
self.isTerminal = True
self.sd = sim.util.StatsDelta()
self.stats = {
'threadinstrs': [],
'threadtime': [],
}
sim.util.Every(interval_ns * sim.util.Time.NS, self.periodic, statsdelta = self.sd, roi_only = True)
def hook_thread_start(self, threadid, creator):
for thread in range(len(self.stats['threadinstrs']), threadid+1):
self.stats['threadinstrs'].append(self.sd.getter('thread', thread, 'instruction_count'))
self.stats['threadtime'].append(self.sd.getter('thread', thread, 'elapsed_time'))
def periodic(self, time, time_delta):
if self.isTerminal:
self.fd.write('[THREADIPC] ')
self.fd.write('%u' % (time / 1e6)) # Time in ns
for thread in range(sim.thread.get_nthreads()):
# Print per-thread stats
try:
cycles = self.stats['threadtime'][thread].delta * self.freq / 1e9 # convert fs to cycles
instrs = self.stats['threadinstrs'][thread].delta
ipc = instrs / (cycles or 1)
self.fd.write(' %.3f' % ipc)
except TypeError:
pass # Skip newly created threads
self.fd.write('\n')
sim.util.register(IpcTrace())
|
[scripts] Add a per-thread IPC tracing script. It currently handles scheduling / oversubscription but not DVFS"""
ipctrace.py
Write a trace of instantaneous IPC values for all cores.
First argument is either a filename, or none to write to standard output.
Second argument is the interval size in nanoseconds (default is 10000)
"""
import sys, os, sim
class IpcTrace:
def setup(self, args):
self.freq = sim.dvfs.get_frequency(0) # This script does not support DVFS
args = dict(enumerate((args or '').split(':')))
filename = args.get(0, None)
interval_ns = long(args.get(1, 10000))
if filename:
self.fd = file(os.path.join(sim.config.output_dir, filename), 'w')
self.isTerminal = False
else:
self.fd = sys.stdout
self.isTerminal = True
self.sd = sim.util.StatsDelta()
self.stats = {
'threadinstrs': [],
'threadtime': [],
}
sim.util.Every(interval_ns * sim.util.Time.NS, self.periodic, statsdelta = self.sd, roi_only = True)
def hook_thread_start(self, threadid, creator):
for thread in range(len(self.stats['threadinstrs']), threadid+1):
self.stats['threadinstrs'].append(self.sd.getter('thread', thread, 'instruction_count'))
self.stats['threadtime'].append(self.sd.getter('thread', thread, 'elapsed_time'))
def periodic(self, time, time_delta):
if self.isTerminal:
self.fd.write('[THREADIPC] ')
self.fd.write('%u' % (time / 1e6)) # Time in ns
for thread in range(sim.thread.get_nthreads()):
# Print per-thread stats
try:
cycles = self.stats['threadtime'][thread].delta * self.freq / 1e9 # convert fs to cycles
instrs = self.stats['threadinstrs'][thread].delta
ipc = instrs / (cycles or 1)
self.fd.write(' %.3f' % ipc)
except TypeError:
pass # Skip newly created threads
self.fd.write('\n')
sim.util.register(IpcTrace())
|
<commit_before><commit_msg>[scripts] Add a per-thread IPC tracing script. It currently handles scheduling / oversubscription but not DVFS<commit_after>"""
ipctrace.py
Write a trace of instantaneous IPC values for all cores.
First argument is either a filename, or none to write to standard output.
Second argument is the interval size in nanoseconds (default is 10000)
"""
import sys, os, sim
class IpcTrace:
def setup(self, args):
self.freq = sim.dvfs.get_frequency(0) # This script does not support DVFS
args = dict(enumerate((args or '').split(':')))
filename = args.get(0, None)
interval_ns = long(args.get(1, 10000))
if filename:
self.fd = file(os.path.join(sim.config.output_dir, filename), 'w')
self.isTerminal = False
else:
self.fd = sys.stdout
self.isTerminal = True
self.sd = sim.util.StatsDelta()
self.stats = {
'threadinstrs': [],
'threadtime': [],
}
sim.util.Every(interval_ns * sim.util.Time.NS, self.periodic, statsdelta = self.sd, roi_only = True)
def hook_thread_start(self, threadid, creator):
for thread in range(len(self.stats['threadinstrs']), threadid+1):
self.stats['threadinstrs'].append(self.sd.getter('thread', thread, 'instruction_count'))
self.stats['threadtime'].append(self.sd.getter('thread', thread, 'elapsed_time'))
def periodic(self, time, time_delta):
if self.isTerminal:
self.fd.write('[THREADIPC] ')
self.fd.write('%u' % (time / 1e6)) # Time in ns
for thread in range(sim.thread.get_nthreads()):
# Print per-thread stats
try:
cycles = self.stats['threadtime'][thread].delta * self.freq / 1e9 # convert fs to cycles
instrs = self.stats['threadinstrs'][thread].delta
ipc = instrs / (cycles or 1)
self.fd.write(' %.3f' % ipc)
except TypeError:
pass # Skip newly created threads
self.fd.write('\n')
sim.util.register(IpcTrace())
|
|
23d2fab545b2b384cb8f4d75932550252336131a
|
possel/irc.py
|
possel/irc.py
|
#!/usr/bin/env python3
# -*- coding: utf8 -*-
from tornado import ioloop, gen, tcpclient
loopinstance = ioloop.IOLoop.instance()
def split_irc_line(s):
"""Breaks a message from an IRC server into its prefix, command, and arguments.
"""
prefix = ''
trailing = []
if not s:
# Raise an exception of some kind
pass
if s[0] == ':':
prefix, s = s[1:].split(' ', 1)
if s.find(' :') != -1:
s, trailing = s.split(' :', 1)
args = s.split()
args.append(trailing)
else:
args = s.split()
command = args.pop(0)
return prefix, command, args
class IRCClient:
def __init__(self):
self.tcp_client_factory = tcpclient.TCPClient()
def start(self):
loopinstance.add_callback(self.connect)
def _write(self, line):
if line[-1] != '\n':
line += '\n'
return self.connection.write(line.encode('utf8'))
def pong(self, value):
self._write('PONG :{}'.format(value))
def handle_line(self, line):
line = str(line, encoding='utf8').strip()
(prefix, command, args) = split_irc_line(line)
print('Prefix: {}\nCommand: {}\nArgs: {}\n\n'.format(prefix, command, args))
if command.lower() == 'ping':
self.pong(args[0])
self._schedule_line()
def _schedule_line(self):
self.connection.read_until(b'\n', self.handle_line)
@gen.coroutine
def connect(self):
print('connecting')
self.connection = yield self.tcp_client_factory.connect('irc.imaginarynet.org.uk', 6667)
print('connected, initialising')
yield self._write('NICK butts')
yield self._write('USER mother 0 * :Your Mother')
print('done that')
self._schedule_line()
def main():
b = IRCClient()
b.start()
loopinstance.start()
if __name__ == '__main__':
import sys
main(*sys.argv[1:])
|
Add rudiments of an IRC library using IOStream
|
Add rudiments of an IRC library using IOStream
|
Python
|
bsd-3-clause
|
possel/possel,possel/possel,possel/possel,possel/possel
|
Add rudiments of an IRC library using IOStream
|
#!/usr/bin/env python3
# -*- coding: utf8 -*-
from tornado import ioloop, gen, tcpclient
loopinstance = ioloop.IOLoop.instance()
def split_irc_line(s):
"""Breaks a message from an IRC server into its prefix, command, and arguments.
"""
prefix = ''
trailing = []
if not s:
# Raise an exception of some kind
pass
if s[0] == ':':
prefix, s = s[1:].split(' ', 1)
if s.find(' :') != -1:
s, trailing = s.split(' :', 1)
args = s.split()
args.append(trailing)
else:
args = s.split()
command = args.pop(0)
return prefix, command, args
class IRCClient:
def __init__(self):
self.tcp_client_factory = tcpclient.TCPClient()
def start(self):
loopinstance.add_callback(self.connect)
def _write(self, line):
if line[-1] != '\n':
line += '\n'
return self.connection.write(line.encode('utf8'))
def pong(self, value):
self._write('PONG :{}'.format(value))
def handle_line(self, line):
line = str(line, encoding='utf8').strip()
(prefix, command, args) = split_irc_line(line)
print('Prefix: {}\nCommand: {}\nArgs: {}\n\n'.format(prefix, command, args))
if command.lower() == 'ping':
self.pong(args[0])
self._schedule_line()
def _schedule_line(self):
self.connection.read_until(b'\n', self.handle_line)
@gen.coroutine
def connect(self):
print('connecting')
self.connection = yield self.tcp_client_factory.connect('irc.imaginarynet.org.uk', 6667)
print('connected, initialising')
yield self._write('NICK butts')
yield self._write('USER mother 0 * :Your Mother')
print('done that')
self._schedule_line()
def main():
b = IRCClient()
b.start()
loopinstance.start()
if __name__ == '__main__':
import sys
main(*sys.argv[1:])
|
<commit_before><commit_msg>Add rudiments of an IRC library using IOStream<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf8 -*-
from tornado import ioloop, gen, tcpclient
loopinstance = ioloop.IOLoop.instance()
def split_irc_line(s):
"""Breaks a message from an IRC server into its prefix, command, and arguments.
"""
prefix = ''
trailing = []
if not s:
# Raise an exception of some kind
pass
if s[0] == ':':
prefix, s = s[1:].split(' ', 1)
if s.find(' :') != -1:
s, trailing = s.split(' :', 1)
args = s.split()
args.append(trailing)
else:
args = s.split()
command = args.pop(0)
return prefix, command, args
class IRCClient:
def __init__(self):
self.tcp_client_factory = tcpclient.TCPClient()
def start(self):
loopinstance.add_callback(self.connect)
def _write(self, line):
if line[-1] != '\n':
line += '\n'
return self.connection.write(line.encode('utf8'))
def pong(self, value):
self._write('PONG :{}'.format(value))
def handle_line(self, line):
line = str(line, encoding='utf8').strip()
(prefix, command, args) = split_irc_line(line)
print('Prefix: {}\nCommand: {}\nArgs: {}\n\n'.format(prefix, command, args))
if command.lower() == 'ping':
self.pong(args[0])
self._schedule_line()
def _schedule_line(self):
self.connection.read_until(b'\n', self.handle_line)
@gen.coroutine
def connect(self):
print('connecting')
self.connection = yield self.tcp_client_factory.connect('irc.imaginarynet.org.uk', 6667)
print('connected, initialising')
yield self._write('NICK butts')
yield self._write('USER mother 0 * :Your Mother')
print('done that')
self._schedule_line()
def main():
b = IRCClient()
b.start()
loopinstance.start()
if __name__ == '__main__':
import sys
main(*sys.argv[1:])
|
Add rudiments of an IRC library using IOStream#!/usr/bin/env python3
# -*- coding: utf8 -*-
from tornado import ioloop, gen, tcpclient
loopinstance = ioloop.IOLoop.instance()
def split_irc_line(s):
"""Breaks a message from an IRC server into its prefix, command, and arguments.
"""
prefix = ''
trailing = []
if not s:
# Raise an exception of some kind
pass
if s[0] == ':':
prefix, s = s[1:].split(' ', 1)
if s.find(' :') != -1:
s, trailing = s.split(' :', 1)
args = s.split()
args.append(trailing)
else:
args = s.split()
command = args.pop(0)
return prefix, command, args
class IRCClient:
def __init__(self):
self.tcp_client_factory = tcpclient.TCPClient()
def start(self):
loopinstance.add_callback(self.connect)
def _write(self, line):
if line[-1] != '\n':
line += '\n'
return self.connection.write(line.encode('utf8'))
def pong(self, value):
self._write('PONG :{}'.format(value))
def handle_line(self, line):
line = str(line, encoding='utf8').strip()
(prefix, command, args) = split_irc_line(line)
print('Prefix: {}\nCommand: {}\nArgs: {}\n\n'.format(prefix, command, args))
if command.lower() == 'ping':
self.pong(args[0])
self._schedule_line()
def _schedule_line(self):
self.connection.read_until(b'\n', self.handle_line)
@gen.coroutine
def connect(self):
print('connecting')
self.connection = yield self.tcp_client_factory.connect('irc.imaginarynet.org.uk', 6667)
print('connected, initialising')
yield self._write('NICK butts')
yield self._write('USER mother 0 * :Your Mother')
print('done that')
self._schedule_line()
def main():
b = IRCClient()
b.start()
loopinstance.start()
if __name__ == '__main__':
import sys
main(*sys.argv[1:])
|
<commit_before><commit_msg>Add rudiments of an IRC library using IOStream<commit_after>#!/usr/bin/env python3
# -*- coding: utf8 -*-
from tornado import ioloop, gen, tcpclient
loopinstance = ioloop.IOLoop.instance()
def split_irc_line(s):
"""Breaks a message from an IRC server into its prefix, command, and arguments.
"""
prefix = ''
trailing = []
if not s:
# Raise an exception of some kind
pass
if s[0] == ':':
prefix, s = s[1:].split(' ', 1)
if s.find(' :') != -1:
s, trailing = s.split(' :', 1)
args = s.split()
args.append(trailing)
else:
args = s.split()
command = args.pop(0)
return prefix, command, args
class IRCClient:
def __init__(self):
self.tcp_client_factory = tcpclient.TCPClient()
def start(self):
loopinstance.add_callback(self.connect)
def _write(self, line):
if line[-1] != '\n':
line += '\n'
return self.connection.write(line.encode('utf8'))
def pong(self, value):
self._write('PONG :{}'.format(value))
def handle_line(self, line):
line = str(line, encoding='utf8').strip()
(prefix, command, args) = split_irc_line(line)
print('Prefix: {}\nCommand: {}\nArgs: {}\n\n'.format(prefix, command, args))
if command.lower() == 'ping':
self.pong(args[0])
self._schedule_line()
def _schedule_line(self):
self.connection.read_until(b'\n', self.handle_line)
@gen.coroutine
def connect(self):
print('connecting')
self.connection = yield self.tcp_client_factory.connect('irc.imaginarynet.org.uk', 6667)
print('connected, initialising')
yield self._write('NICK butts')
yield self._write('USER mother 0 * :Your Mother')
print('done that')
self._schedule_line()
def main():
b = IRCClient()
b.start()
loopinstance.start()
if __name__ == '__main__':
import sys
main(*sys.argv[1:])
|
|
5e18840ef328c18babe53dc38ee6baa4302a22f0
|
Cura/slice/cura_sf/fabmetheus_utilities/fabmetheus_tools/interpret_plugins/amf.py
|
Cura/slice/cura_sf/fabmetheus_utilities/fabmetheus_tools/interpret_plugins/amf.py
|
from __future__ import absolute_import
import zipfile
try:
from xml.etree import cElementTree as ElementTree
except:
from xml.etree import ElementTree
from fabmetheus_utilities.geometry.geometry_tools import face
from fabmetheus_utilities.geometry.solids import triangle_mesh
from fabmetheus_utilities.vector3 import Vector3
def getCarving(fileName=''):
"Get the triangle mesh for the dae file."
return amfModel().load(fileName)
class amfModel(triangle_mesh.TriangleMesh):
def __init__(self):
super(amfModel, self).__init__()
def load(self, filename):
try:
zfile = zipfile.ZipFile(filename)
xml = zfile.read(zfile.namelist()[0])
zfile.close()
except zipfile.BadZipfile:
f = open(filename, "r")
xml = f.read()
f.close()
amf = ElementTree.fromstring(xml)
if 'unit' in amf.attrib:
unit = amf.attrib['unit'].lower()
else:
unit = 'millimeter'
if unit == 'millimeter':
scale = 1.0
elif unit == 'meter':
scale = 1000.0
elif unit == 'inch':
scale = 25.4
elif unit == 'feet':
scale = 304.8
elif unit == 'micron':
scale = 0.001
else:
print "Unknown unit in amf: %s" % (unit)
scale = 1.0
for obj in amf.iter('object'):
for mesh in obj.iter('mesh'):
startIndex = len(self.vertexes)
for vertices in mesh.iter('vertices'):
for vertex in vertices.iter('vertex'):
for coordinates in vertex.iter('coordinates'):
v = [0.0,0.0,0.0]
for t in coordinates:
if t.tag == 'x':
v[0] = float(t.text)
elif t.tag == 'y':
v[1] = float(t.text)
elif t.tag == 'z':
v[2] = float(t.text)
self.vertexes.append(Vector3(v[0], v[1], v[2]))
for volume in mesh.iter('volume'):
for triangle in volume.iter('triangle'):
f = face.Face()
f.index = len(self.faces)
for t in triangle:
if t.tag == 'v1' or t.tag == 'v2' or t.tag == 'v3':
f.vertexIndexes.append(startIndex + int(t.text))
self.faces.append(f)
return self
|
Add basic AMF support to skeinforge.
|
Add basic AMF support to skeinforge.
|
Python
|
agpl-3.0
|
alephobjects/Cura,alephobjects/Cura,alephobjects/Cura
|
Add basic AMF support to skeinforge.
|
from __future__ import absolute_import
import zipfile
try:
from xml.etree import cElementTree as ElementTree
except:
from xml.etree import ElementTree
from fabmetheus_utilities.geometry.geometry_tools import face
from fabmetheus_utilities.geometry.solids import triangle_mesh
from fabmetheus_utilities.vector3 import Vector3
def getCarving(fileName=''):
"Get the triangle mesh for the dae file."
return amfModel().load(fileName)
class amfModel(triangle_mesh.TriangleMesh):
def __init__(self):
super(amfModel, self).__init__()
def load(self, filename):
try:
zfile = zipfile.ZipFile(filename)
xml = zfile.read(zfile.namelist()[0])
zfile.close()
except zipfile.BadZipfile:
f = open(filename, "r")
xml = f.read()
f.close()
amf = ElementTree.fromstring(xml)
if 'unit' in amf.attrib:
unit = amf.attrib['unit'].lower()
else:
unit = 'millimeter'
if unit == 'millimeter':
scale = 1.0
elif unit == 'meter':
scale = 1000.0
elif unit == 'inch':
scale = 25.4
elif unit == 'feet':
scale = 304.8
elif unit == 'micron':
scale = 0.001
else:
print "Unknown unit in amf: %s" % (unit)
scale = 1.0
for obj in amf.iter('object'):
for mesh in obj.iter('mesh'):
startIndex = len(self.vertexes)
for vertices in mesh.iter('vertices'):
for vertex in vertices.iter('vertex'):
for coordinates in vertex.iter('coordinates'):
v = [0.0,0.0,0.0]
for t in coordinates:
if t.tag == 'x':
v[0] = float(t.text)
elif t.tag == 'y':
v[1] = float(t.text)
elif t.tag == 'z':
v[2] = float(t.text)
self.vertexes.append(Vector3(v[0], v[1], v[2]))
for volume in mesh.iter('volume'):
for triangle in volume.iter('triangle'):
f = face.Face()
f.index = len(self.faces)
for t in triangle:
if t.tag == 'v1' or t.tag == 'v2' or t.tag == 'v3':
f.vertexIndexes.append(startIndex + int(t.text))
self.faces.append(f)
return self
|
<commit_before><commit_msg>Add basic AMF support to skeinforge.<commit_after>
|
from __future__ import absolute_import
import zipfile
try:
from xml.etree import cElementTree as ElementTree
except:
from xml.etree import ElementTree
from fabmetheus_utilities.geometry.geometry_tools import face
from fabmetheus_utilities.geometry.solids import triangle_mesh
from fabmetheus_utilities.vector3 import Vector3
def getCarving(fileName=''):
"Get the triangle mesh for the dae file."
return amfModel().load(fileName)
class amfModel(triangle_mesh.TriangleMesh):
def __init__(self):
super(amfModel, self).__init__()
def load(self, filename):
try:
zfile = zipfile.ZipFile(filename)
xml = zfile.read(zfile.namelist()[0])
zfile.close()
except zipfile.BadZipfile:
f = open(filename, "r")
xml = f.read()
f.close()
amf = ElementTree.fromstring(xml)
if 'unit' in amf.attrib:
unit = amf.attrib['unit'].lower()
else:
unit = 'millimeter'
if unit == 'millimeter':
scale = 1.0
elif unit == 'meter':
scale = 1000.0
elif unit == 'inch':
scale = 25.4
elif unit == 'feet':
scale = 304.8
elif unit == 'micron':
scale = 0.001
else:
print "Unknown unit in amf: %s" % (unit)
scale = 1.0
for obj in amf.iter('object'):
for mesh in obj.iter('mesh'):
startIndex = len(self.vertexes)
for vertices in mesh.iter('vertices'):
for vertex in vertices.iter('vertex'):
for coordinates in vertex.iter('coordinates'):
v = [0.0,0.0,0.0]
for t in coordinates:
if t.tag == 'x':
v[0] = float(t.text)
elif t.tag == 'y':
v[1] = float(t.text)
elif t.tag == 'z':
v[2] = float(t.text)
self.vertexes.append(Vector3(v[0], v[1], v[2]))
for volume in mesh.iter('volume'):
for triangle in volume.iter('triangle'):
f = face.Face()
f.index = len(self.faces)
for t in triangle:
if t.tag == 'v1' or t.tag == 'v2' or t.tag == 'v3':
f.vertexIndexes.append(startIndex + int(t.text))
self.faces.append(f)
return self
|
Add basic AMF support to skeinforge.from __future__ import absolute_import
import zipfile
try:
from xml.etree import cElementTree as ElementTree
except:
from xml.etree import ElementTree
from fabmetheus_utilities.geometry.geometry_tools import face
from fabmetheus_utilities.geometry.solids import triangle_mesh
from fabmetheus_utilities.vector3 import Vector3
def getCarving(fileName=''):
"Get the triangle mesh for the dae file."
return amfModel().load(fileName)
class amfModel(triangle_mesh.TriangleMesh):
def __init__(self):
super(amfModel, self).__init__()
def load(self, filename):
try:
zfile = zipfile.ZipFile(filename)
xml = zfile.read(zfile.namelist()[0])
zfile.close()
except zipfile.BadZipfile:
f = open(filename, "r")
xml = f.read()
f.close()
amf = ElementTree.fromstring(xml)
if 'unit' in amf.attrib:
unit = amf.attrib['unit'].lower()
else:
unit = 'millimeter'
if unit == 'millimeter':
scale = 1.0
elif unit == 'meter':
scale = 1000.0
elif unit == 'inch':
scale = 25.4
elif unit == 'feet':
scale = 304.8
elif unit == 'micron':
scale = 0.001
else:
print "Unknown unit in amf: %s" % (unit)
scale = 1.0
for obj in amf.iter('object'):
for mesh in obj.iter('mesh'):
startIndex = len(self.vertexes)
for vertices in mesh.iter('vertices'):
for vertex in vertices.iter('vertex'):
for coordinates in vertex.iter('coordinates'):
v = [0.0,0.0,0.0]
for t in coordinates:
if t.tag == 'x':
v[0] = float(t.text)
elif t.tag == 'y':
v[1] = float(t.text)
elif t.tag == 'z':
v[2] = float(t.text)
self.vertexes.append(Vector3(v[0], v[1], v[2]))
for volume in mesh.iter('volume'):
for triangle in volume.iter('triangle'):
f = face.Face()
f.index = len(self.faces)
for t in triangle:
if t.tag == 'v1' or t.tag == 'v2' or t.tag == 'v3':
f.vertexIndexes.append(startIndex + int(t.text))
self.faces.append(f)
return self
|
<commit_before><commit_msg>Add basic AMF support to skeinforge.<commit_after>from __future__ import absolute_import
import zipfile
try:
from xml.etree import cElementTree as ElementTree
except:
from xml.etree import ElementTree
from fabmetheus_utilities.geometry.geometry_tools import face
from fabmetheus_utilities.geometry.solids import triangle_mesh
from fabmetheus_utilities.vector3 import Vector3
def getCarving(fileName=''):
"Get the triangle mesh for the dae file."
return amfModel().load(fileName)
class amfModel(triangle_mesh.TriangleMesh):
def __init__(self):
super(amfModel, self).__init__()
def load(self, filename):
try:
zfile = zipfile.ZipFile(filename)
xml = zfile.read(zfile.namelist()[0])
zfile.close()
except zipfile.BadZipfile:
f = open(filename, "r")
xml = f.read()
f.close()
amf = ElementTree.fromstring(xml)
if 'unit' in amf.attrib:
unit = amf.attrib['unit'].lower()
else:
unit = 'millimeter'
if unit == 'millimeter':
scale = 1.0
elif unit == 'meter':
scale = 1000.0
elif unit == 'inch':
scale = 25.4
elif unit == 'feet':
scale = 304.8
elif unit == 'micron':
scale = 0.001
else:
print "Unknown unit in amf: %s" % (unit)
scale = 1.0
for obj in amf.iter('object'):
for mesh in obj.iter('mesh'):
startIndex = len(self.vertexes)
for vertices in mesh.iter('vertices'):
for vertex in vertices.iter('vertex'):
for coordinates in vertex.iter('coordinates'):
v = [0.0,0.0,0.0]
for t in coordinates:
if t.tag == 'x':
v[0] = float(t.text)
elif t.tag == 'y':
v[1] = float(t.text)
elif t.tag == 'z':
v[2] = float(t.text)
self.vertexes.append(Vector3(v[0], v[1], v[2]))
for volume in mesh.iter('volume'):
for triangle in volume.iter('triangle'):
f = face.Face()
f.index = len(self.faces)
for t in triangle:
if t.tag == 'v1' or t.tag == 'v2' or t.tag == 'v3':
f.vertexIndexes.append(startIndex + int(t.text))
self.faces.append(f)
return self
|
|
98c019c2328f4524019bf3a8cfb2e40842e2c7c7
|
examples/example_maxent.py
|
examples/example_maxent.py
|
from __future__ import print_function
import dit
import numpy as np
# The functions will import this for you...just make sure you have it.
import cvxopt
def print_output(d, maxent_dists):
# Calculate the entropy for each.
entropies = np.asarray(map(dit.shannon.entropy, maxent_dists))
print()
print("Entropies:")
print(entropies)
# Differences are what we learn at each step.
netinfo = -1 * np.diff(entropies)
print()
print("Network Informations:")
print(netinfo)
# Total correlation is what is learned at ith from (i-1)th starting at i=2.
total_corr = netinfo[1:].sum()
total_corr_true = dit.multivariate.total_correlation(d)
print()
print("Total correlation: {0} (numerically)\t {1} (true)".format(total_corr, total_corr_true))
print()
def example_A():
"""
Calculate network information using marginal maxentropy.
"""
d = dit.example_dists.Xor()
# Calculate marginal maximum entropy distributions up to order 3.
maxent_dists = dit.algorithms.marginal_maxent_dists(d, 3)
print_output(d, maxent_dists)
return maxent_dists
def example_B():
"""
Calculate network information using moment-based maxentropy.
"""
d = dit.example_dists.Xor()
# Calculate moment maximum entropy distributions up to order 3.
mapping = [-1, 1]
maxent_dists = dit.algorithms.moment_maxent_dists(d, mapping, 3, with_replacement=False)
print_output(d, maxent_dists)
return maxent_dists
def example_C():
# Giant bit, perfect correlation.
# Note, doesn't converge if we do this with n=4. e.g.: '1111', '0000'. Lol!
outcomes = ['111', '000']
d = dit.Distribution(outcomes, [.5, .5])
maxent_dists = dit.algorithms.marginal_maxent_dists(d)
print_output(d, maxent_dists)
if __name__ == '__main__':
example_A()
|
Add an example script for maxent.
|
Add an example script for maxent.
|
Python
|
bsd-3-clause
|
Autoplectic/dit,chebee7i/dit,dit/dit,dit/dit,Autoplectic/dit,chebee7i/dit,chebee7i/dit,Autoplectic/dit,dit/dit,Autoplectic/dit,dit/dit,dit/dit,chebee7i/dit,Autoplectic/dit
|
Add an example script for maxent.
|
from __future__ import print_function
import dit
import numpy as np
# The functions will import this for you...just make sure you have it.
import cvxopt
def print_output(d, maxent_dists):
# Calculate the entropy for each.
entropies = np.asarray(map(dit.shannon.entropy, maxent_dists))
print()
print("Entropies:")
print(entropies)
# Differences are what we learn at each step.
netinfo = -1 * np.diff(entropies)
print()
print("Network Informations:")
print(netinfo)
# Total correlation is what is learned at ith from (i-1)th starting at i=2.
total_corr = netinfo[1:].sum()
total_corr_true = dit.multivariate.total_correlation(d)
print()
print("Total correlation: {0} (numerically)\t {1} (true)".format(total_corr, total_corr_true))
print()
def example_A():
"""
Calculate network information using marginal maxentropy.
"""
d = dit.example_dists.Xor()
# Calculate marginal maximum entropy distributions up to order 3.
maxent_dists = dit.algorithms.marginal_maxent_dists(d, 3)
print_output(d, maxent_dists)
return maxent_dists
def example_B():
"""
Calculate network information using moment-based maxentropy.
"""
d = dit.example_dists.Xor()
# Calculate moment maximum entropy distributions up to order 3.
mapping = [-1, 1]
maxent_dists = dit.algorithms.moment_maxent_dists(d, mapping, 3, with_replacement=False)
print_output(d, maxent_dists)
return maxent_dists
def example_C():
# Giant bit, perfect correlation.
# Note, doesn't converge if we do this with n=4. e.g.: '1111', '0000'. Lol!
outcomes = ['111', '000']
d = dit.Distribution(outcomes, [.5, .5])
maxent_dists = dit.algorithms.marginal_maxent_dists(d)
print_output(d, maxent_dists)
if __name__ == '__main__':
example_A()
|
<commit_before><commit_msg>Add an example script for maxent.<commit_after>
|
from __future__ import print_function
import dit
import numpy as np
# The functions will import this for you...just make sure you have it.
import cvxopt
def print_output(d, maxent_dists):
# Calculate the entropy for each.
entropies = np.asarray(map(dit.shannon.entropy, maxent_dists))
print()
print("Entropies:")
print(entropies)
# Differences are what we learn at each step.
netinfo = -1 * np.diff(entropies)
print()
print("Network Informations:")
print(netinfo)
# Total correlation is what is learned at ith from (i-1)th starting at i=2.
total_corr = netinfo[1:].sum()
total_corr_true = dit.multivariate.total_correlation(d)
print()
print("Total correlation: {0} (numerically)\t {1} (true)".format(total_corr, total_corr_true))
print()
def example_A():
"""
Calculate network information using marginal maxentropy.
"""
d = dit.example_dists.Xor()
# Calculate marginal maximum entropy distributions up to order 3.
maxent_dists = dit.algorithms.marginal_maxent_dists(d, 3)
print_output(d, maxent_dists)
return maxent_dists
def example_B():
"""
Calculate network information using moment-based maxentropy.
"""
d = dit.example_dists.Xor()
# Calculate moment maximum entropy distributions up to order 3.
mapping = [-1, 1]
maxent_dists = dit.algorithms.moment_maxent_dists(d, mapping, 3, with_replacement=False)
print_output(d, maxent_dists)
return maxent_dists
def example_C():
# Giant bit, perfect correlation.
# Note, doesn't converge if we do this with n=4. e.g.: '1111', '0000'. Lol!
outcomes = ['111', '000']
d = dit.Distribution(outcomes, [.5, .5])
maxent_dists = dit.algorithms.marginal_maxent_dists(d)
print_output(d, maxent_dists)
if __name__ == '__main__':
example_A()
|
Add an example script for maxent.from __future__ import print_function
import dit
import numpy as np
# The functions will import this for you...just make sure you have it.
import cvxopt
def print_output(d, maxent_dists):
# Calculate the entropy for each.
entropies = np.asarray(map(dit.shannon.entropy, maxent_dists))
print()
print("Entropies:")
print(entropies)
# Differences are what we learn at each step.
netinfo = -1 * np.diff(entropies)
print()
print("Network Informations:")
print(netinfo)
# Total correlation is what is learned at ith from (i-1)th starting at i=2.
total_corr = netinfo[1:].sum()
total_corr_true = dit.multivariate.total_correlation(d)
print()
print("Total correlation: {0} (numerically)\t {1} (true)".format(total_corr, total_corr_true))
print()
def example_A():
"""
Calculate network information using marginal maxentropy.
"""
d = dit.example_dists.Xor()
# Calculate marginal maximum entropy distributions up to order 3.
maxent_dists = dit.algorithms.marginal_maxent_dists(d, 3)
print_output(d, maxent_dists)
return maxent_dists
def example_B():
"""
Calculate network information using moment-based maxentropy.
"""
d = dit.example_dists.Xor()
# Calculate moment maximum entropy distributions up to order 3.
mapping = [-1, 1]
maxent_dists = dit.algorithms.moment_maxent_dists(d, mapping, 3, with_replacement=False)
print_output(d, maxent_dists)
return maxent_dists
def example_C():
# Giant bit, perfect correlation.
# Note, doesn't converge if we do this with n=4. e.g.: '1111', '0000'. Lol!
outcomes = ['111', '000']
d = dit.Distribution(outcomes, [.5, .5])
maxent_dists = dit.algorithms.marginal_maxent_dists(d)
print_output(d, maxent_dists)
if __name__ == '__main__':
example_A()
|
<commit_before><commit_msg>Add an example script for maxent.<commit_after>from __future__ import print_function
import dit
import numpy as np
# The functions will import this for you...just make sure you have it.
import cvxopt
def print_output(d, maxent_dists):
# Calculate the entropy for each.
entropies = np.asarray(map(dit.shannon.entropy, maxent_dists))
print()
print("Entropies:")
print(entropies)
# Differences are what we learn at each step.
netinfo = -1 * np.diff(entropies)
print()
print("Network Informations:")
print(netinfo)
# Total correlation is what is learned at ith from (i-1)th starting at i=2.
total_corr = netinfo[1:].sum()
total_corr_true = dit.multivariate.total_correlation(d)
print()
print("Total correlation: {0} (numerically)\t {1} (true)".format(total_corr, total_corr_true))
print()
def example_A():
"""
Calculate network information using marginal maxentropy.
"""
d = dit.example_dists.Xor()
# Calculate marginal maximum entropy distributions up to order 3.
maxent_dists = dit.algorithms.marginal_maxent_dists(d, 3)
print_output(d, maxent_dists)
return maxent_dists
def example_B():
"""
Calculate network information using moment-based maxentropy.
"""
d = dit.example_dists.Xor()
# Calculate moment maximum entropy distributions up to order 3.
mapping = [-1, 1]
maxent_dists = dit.algorithms.moment_maxent_dists(d, mapping, 3, with_replacement=False)
print_output(d, maxent_dists)
return maxent_dists
def example_C():
# Giant bit, perfect correlation.
# Note, doesn't converge if we do this with n=4. e.g.: '1111', '0000'. Lol!
outcomes = ['111', '000']
d = dit.Distribution(outcomes, [.5, .5])
maxent_dists = dit.algorithms.marginal_maxent_dists(d)
print_output(d, maxent_dists)
if __name__ == '__main__':
example_A()
|
|
9ce9152fc6a610505b055d2648ba3f0bf99dd153
|
opencivicdata/elections/migrations/0002_auto_20170731_2047.py
|
opencivicdata/elections/migrations/0002_auto_20170731_2047.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-31 20:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('elections', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='ballotmeasurecontest',
options={'ordering': ('election', 'name')},
),
migrations.AlterModelOptions(
name='candidatecontest',
options={'ordering': ('election', 'name')},
),
migrations.AlterModelOptions(
name='retentioncontest',
options={'ordering': ('election', 'name')},
),
]
|
Add missing migration after editing meta options
|
Add missing migration after editing meta options
|
Python
|
bsd-3-clause
|
opencivicdata/python-opencivicdata,opencivicdata/python-opencivicdata,opencivicdata/python-opencivicdata-django,opencivicdata/python-opencivicdata-django,opencivicdata/python-opencivicdata-django
|
Add missing migration after editing meta options
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-31 20:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('elections', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='ballotmeasurecontest',
options={'ordering': ('election', 'name')},
),
migrations.AlterModelOptions(
name='candidatecontest',
options={'ordering': ('election', 'name')},
),
migrations.AlterModelOptions(
name='retentioncontest',
options={'ordering': ('election', 'name')},
),
]
|
<commit_before><commit_msg>Add missing migration after editing meta options<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-31 20:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('elections', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='ballotmeasurecontest',
options={'ordering': ('election', 'name')},
),
migrations.AlterModelOptions(
name='candidatecontest',
options={'ordering': ('election', 'name')},
),
migrations.AlterModelOptions(
name='retentioncontest',
options={'ordering': ('election', 'name')},
),
]
|
Add missing migration after editing meta options# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-31 20:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('elections', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='ballotmeasurecontest',
options={'ordering': ('election', 'name')},
),
migrations.AlterModelOptions(
name='candidatecontest',
options={'ordering': ('election', 'name')},
),
migrations.AlterModelOptions(
name='retentioncontest',
options={'ordering': ('election', 'name')},
),
]
|
<commit_before><commit_msg>Add missing migration after editing meta options<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-31 20:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('elections', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='ballotmeasurecontest',
options={'ordering': ('election', 'name')},
),
migrations.AlterModelOptions(
name='candidatecontest',
options={'ordering': ('election', 'name')},
),
migrations.AlterModelOptions(
name='retentioncontest',
options={'ordering': ('election', 'name')},
),
]
|
|
f629b836aad3c06a387416c9a407df722d65d89a
|
py/subarray-product-less-than-k.py
|
py/subarray-product-less-than-k.py
|
class Solution(object):
def numSubarrayProductLessThanK(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
ans = 0
subproduct = 1
if k <= 1:
return 0
start, end = 0, 0
while start < len(nums) or end < len(nums):
if subproduct < k:
ans += end - start
if end < len(nums):
subproduct *= nums[end]
end += 1
else:
break
else:
subproduct /= nums[start]
start += 1
return ans
|
Add py solution for 713. Subarray Product Less Than K
|
Add py solution for 713. Subarray Product Less Than K
713. Subarray Product Less Than K: https://leetcode.com/problems/subarray-product-less-than-k/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 713. Subarray Product Less Than K
713. Subarray Product Less Than K: https://leetcode.com/problems/subarray-product-less-than-k/
|
class Solution(object):
def numSubarrayProductLessThanK(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
ans = 0
subproduct = 1
if k <= 1:
return 0
start, end = 0, 0
while start < len(nums) or end < len(nums):
if subproduct < k:
ans += end - start
if end < len(nums):
subproduct *= nums[end]
end += 1
else:
break
else:
subproduct /= nums[start]
start += 1
return ans
|
<commit_before><commit_msg>Add py solution for 713. Subarray Product Less Than K
713. Subarray Product Less Than K: https://leetcode.com/problems/subarray-product-less-than-k/<commit_after>
|
class Solution(object):
def numSubarrayProductLessThanK(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
ans = 0
subproduct = 1
if k <= 1:
return 0
start, end = 0, 0
while start < len(nums) or end < len(nums):
if subproduct < k:
ans += end - start
if end < len(nums):
subproduct *= nums[end]
end += 1
else:
break
else:
subproduct /= nums[start]
start += 1
return ans
|
Add py solution for 713. Subarray Product Less Than K
713. Subarray Product Less Than K: https://leetcode.com/problems/subarray-product-less-than-k/class Solution(object):
def numSubarrayProductLessThanK(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
ans = 0
subproduct = 1
if k <= 1:
return 0
start, end = 0, 0
while start < len(nums) or end < len(nums):
if subproduct < k:
ans += end - start
if end < len(nums):
subproduct *= nums[end]
end += 1
else:
break
else:
subproduct /= nums[start]
start += 1
return ans
|
<commit_before><commit_msg>Add py solution for 713. Subarray Product Less Than K
713. Subarray Product Less Than K: https://leetcode.com/problems/subarray-product-less-than-k/<commit_after>class Solution(object):
def numSubarrayProductLessThanK(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
ans = 0
subproduct = 1
if k <= 1:
return 0
start, end = 0, 0
while start < len(nums) or end < len(nums):
if subproduct < k:
ans += end - start
if end < len(nums):
subproduct *= nums[end]
end += 1
else:
break
else:
subproduct /= nums[start]
start += 1
return ans
|
|
613318672bf05b76feedc26d7ed06a773cc9faf7
|
card_match_v3.py
|
card_match_v3.py
|
import pyglet
def draw_card():
pyglet.graphics.draw(4,
pyglet.gl.GL_QUADS,
('v2i', (get_card_vertices()))
)
def get_card_width():
return 300
def get_card_height():
return 200
def get_card_vertices():
card_width = get_card_width()
card_height = get_card_height()
card_vertices = [
0, 0,
0, card_height,
card_width, card_height,
card_width, 0
]
return card_vertices
def on_mouse_press(x, y, button, modifiers):
print("Mouse Pressed")
if x < get_card_width() and y < get_card_height():
print("Mouse inside card")
else:
print("Mouse outside card")
# Create a Pyglet Window
window = pyglet.window.Window()
window.push_handlers(on_mouse_press)
# Set up our window event handlers, we need to do this before we start our app running
@window.event
def on_draw():
window.clear()
draw_card()
# Start the app running!
pyglet.app.run()
|
Add card match v3 python file
|
3: Add card match v3 python file
|
Python
|
mit
|
SingingTree/CardMatchPyglet
|
3: Add card match v3 python file
|
import pyglet
def draw_card():
pyglet.graphics.draw(4,
pyglet.gl.GL_QUADS,
('v2i', (get_card_vertices()))
)
def get_card_width():
return 300
def get_card_height():
return 200
def get_card_vertices():
card_width = get_card_width()
card_height = get_card_height()
card_vertices = [
0, 0,
0, card_height,
card_width, card_height,
card_width, 0
]
return card_vertices
def on_mouse_press(x, y, button, modifiers):
print("Mouse Pressed")
if x < get_card_width() and y < get_card_height():
print("Mouse inside card")
else:
print("Mouse outside card")
# Create a Pyglet Window
window = pyglet.window.Window()
window.push_handlers(on_mouse_press)
# Set up our window event handlers, we need to do this before we start our app running
@window.event
def on_draw():
window.clear()
draw_card()
# Start the app running!
pyglet.app.run()
|
<commit_before><commit_msg>3: Add card match v3 python file<commit_after>
|
import pyglet
def draw_card():
pyglet.graphics.draw(4,
pyglet.gl.GL_QUADS,
('v2i', (get_card_vertices()))
)
def get_card_width():
return 300
def get_card_height():
return 200
def get_card_vertices():
card_width = get_card_width()
card_height = get_card_height()
card_vertices = [
0, 0,
0, card_height,
card_width, card_height,
card_width, 0
]
return card_vertices
def on_mouse_press(x, y, button, modifiers):
print("Mouse Pressed")
if x < get_card_width() and y < get_card_height():
print("Mouse inside card")
else:
print("Mouse outside card")
# Create a Pyglet Window
window = pyglet.window.Window()
window.push_handlers(on_mouse_press)
# Set up our window event handlers, we need to do this before we start our app running
@window.event
def on_draw():
window.clear()
draw_card()
# Start the app running!
pyglet.app.run()
|
3: Add card match v3 python fileimport pyglet
def draw_card():
pyglet.graphics.draw(4,
pyglet.gl.GL_QUADS,
('v2i', (get_card_vertices()))
)
def get_card_width():
return 300
def get_card_height():
return 200
def get_card_vertices():
card_width = get_card_width()
card_height = get_card_height()
card_vertices = [
0, 0,
0, card_height,
card_width, card_height,
card_width, 0
]
return card_vertices
def on_mouse_press(x, y, button, modifiers):
print("Mouse Pressed")
if x < get_card_width() and y < get_card_height():
print("Mouse inside card")
else:
print("Mouse outside card")
# Create a Pyglet Window
window = pyglet.window.Window()
window.push_handlers(on_mouse_press)
# Set up our window event handlers, we need to do this before we start our app running
@window.event
def on_draw():
window.clear()
draw_card()
# Start the app running!
pyglet.app.run()
|
<commit_before><commit_msg>3: Add card match v3 python file<commit_after>import pyglet
def draw_card():
pyglet.graphics.draw(4,
pyglet.gl.GL_QUADS,
('v2i', (get_card_vertices()))
)
def get_card_width():
return 300
def get_card_height():
return 200
def get_card_vertices():
card_width = get_card_width()
card_height = get_card_height()
card_vertices = [
0, 0,
0, card_height,
card_width, card_height,
card_width, 0
]
return card_vertices
def on_mouse_press(x, y, button, modifiers):
print("Mouse Pressed")
if x < get_card_width() and y < get_card_height():
print("Mouse inside card")
else:
print("Mouse outside card")
# Create a Pyglet Window
window = pyglet.window.Window()
window.push_handlers(on_mouse_press)
# Set up our window event handlers, we need to do this before we start our app running
@window.event
def on_draw():
window.clear()
draw_card()
# Start the app running!
pyglet.app.run()
|
|
ff1b1f8c61ea14c598443b54024357dc05d4dda2
|
shapes.py
|
shapes.py
|
#Create a function to get a mesh file 'f'
def getMesh(fname):
#Open the file
f = open(fname, "r")
#Store the lines as a list
f = list(f)
#Strip newlines from the list
for l in range(len(f)):
f[l] = f[l].replace("\n","")
#Store the number of vertices, edges and sides
v = int(f[0])
e = int(f[1])
s = int(f[2])
#Create empty lists to hold the data
vertices = []
edges = []
sides = []
#Loop over all of the vertices and add them to the list
for i in range(3, v + 3):
#Split the vertex into a list of coordinates
vertex = f[i].split(",")
#Turn the coordinates into integers and append to the vertex
vertices.append((int(vertex[0]), int(vertex[1]), int(vertex[2])))
#Loop over all of the edges and add them to the list
for i in range(v + 3, e + v + 3):
#Split the edge into a list of vertices
edge = f[i].split(",")
#Turn the vertices indexes into integers and add to the side
edges.append((int(edge[0]), int(edge[1])))
#Loop over all of the sides and add them to the list
for i in range(e + v + 3, s + e + v + 3):
#Split the side into a list of vertices
side = f[i].split(",")
#Create a new side list to hold the data
newside = []
#For each vertex index in the side, add it to the new side variable
for p in side:
newside.append(int(p))
#Add the side to the list of sides
sides.append(tuple(newside))
#Return the data
return (vertices, edges, sides)
|
Write function to read in vertices, edges and sides from file.
|
Write function to read in vertices, edges and sides from file.
|
Python
|
mit
|
thebillington/pyPhys3D
|
Write function to read in vertices, edges and sides from file.
|
#Create a function to get a mesh file 'f'
def getMesh(fname):
#Open the file
f = open(fname, "r")
#Store the lines as a list
f = list(f)
#Strip newlines from the list
for l in range(len(f)):
f[l] = f[l].replace("\n","")
#Store the number of vertices, edges and sides
v = int(f[0])
e = int(f[1])
s = int(f[2])
#Create empty lists to hold the data
vertices = []
edges = []
sides = []
#Loop over all of the vertices and add them to the list
for i in range(3, v + 3):
#Split the vertex into a list of coordinates
vertex = f[i].split(",")
#Turn the coordinates into integers and append to the vertex
vertices.append((int(vertex[0]), int(vertex[1]), int(vertex[2])))
#Loop over all of the edges and add them to the list
for i in range(v + 3, e + v + 3):
#Split the edge into a list of vertices
edge = f[i].split(",")
#Turn the vertices indexes into integers and add to the side
edges.append((int(edge[0]), int(edge[1])))
#Loop over all of the sides and add them to the list
for i in range(e + v + 3, s + e + v + 3):
#Split the side into a list of vertices
side = f[i].split(",")
#Create a new side list to hold the data
newside = []
#For each vertex index in the side, add it to the new side variable
for p in side:
newside.append(int(p))
#Add the side to the list of sides
sides.append(tuple(newside))
#Return the data
return (vertices, edges, sides)
|
<commit_before><commit_msg>Write function to read in vertices, edges and sides from file.<commit_after>
|
#Create a function to get a mesh file 'f'
def getMesh(fname):
#Open the file
f = open(fname, "r")
#Store the lines as a list
f = list(f)
#Strip newlines from the list
for l in range(len(f)):
f[l] = f[l].replace("\n","")
#Store the number of vertices, edges and sides
v = int(f[0])
e = int(f[1])
s = int(f[2])
#Create empty lists to hold the data
vertices = []
edges = []
sides = []
#Loop over all of the vertices and add them to the list
for i in range(3, v + 3):
#Split the vertex into a list of coordinates
vertex = f[i].split(",")
#Turn the coordinates into integers and append to the vertex
vertices.append((int(vertex[0]), int(vertex[1]), int(vertex[2])))
#Loop over all of the edges and add them to the list
for i in range(v + 3, e + v + 3):
#Split the edge into a list of vertices
edge = f[i].split(",")
#Turn the vertices indexes into integers and add to the side
edges.append((int(edge[0]), int(edge[1])))
#Loop over all of the sides and add them to the list
for i in range(e + v + 3, s + e + v + 3):
#Split the side into a list of vertices
side = f[i].split(",")
#Create a new side list to hold the data
newside = []
#For each vertex index in the side, add it to the new side variable
for p in side:
newside.append(int(p))
#Add the side to the list of sides
sides.append(tuple(newside))
#Return the data
return (vertices, edges, sides)
|
Write function to read in vertices, edges and sides from file.#Create a function to get a mesh file 'f'
def getMesh(fname):
#Open the file
f = open(fname, "r")
#Store the lines as a list
f = list(f)
#Strip newlines from the list
for l in range(len(f)):
f[l] = f[l].replace("\n","")
#Store the number of vertices, edges and sides
v = int(f[0])
e = int(f[1])
s = int(f[2])
#Create empty lists to hold the data
vertices = []
edges = []
sides = []
#Loop over all of the vertices and add them to the list
for i in range(3, v + 3):
#Split the vertex into a list of coordinates
vertex = f[i].split(",")
#Turn the coordinates into integers and append to the vertex
vertices.append((int(vertex[0]), int(vertex[1]), int(vertex[2])))
#Loop over all of the edges and add them to the list
for i in range(v + 3, e + v + 3):
#Split the edge into a list of vertices
edge = f[i].split(",")
#Turn the vertices indexes into integers and add to the side
edges.append((int(edge[0]), int(edge[1])))
#Loop over all of the sides and add them to the list
for i in range(e + v + 3, s + e + v + 3):
#Split the side into a list of vertices
side = f[i].split(",")
#Create a new side list to hold the data
newside = []
#For each vertex index in the side, add it to the new side variable
for p in side:
newside.append(int(p))
#Add the side to the list of sides
sides.append(tuple(newside))
#Return the data
return (vertices, edges, sides)
|
<commit_before><commit_msg>Write function to read in vertices, edges and sides from file.<commit_after>#Create a function to get a mesh file 'f'
def getMesh(fname):
#Open the file
f = open(fname, "r")
#Store the lines as a list
f = list(f)
#Strip newlines from the list
for l in range(len(f)):
f[l] = f[l].replace("\n","")
#Store the number of vertices, edges and sides
v = int(f[0])
e = int(f[1])
s = int(f[2])
#Create empty lists to hold the data
vertices = []
edges = []
sides = []
#Loop over all of the vertices and add them to the list
for i in range(3, v + 3):
#Split the vertex into a list of coordinates
vertex = f[i].split(",")
#Turn the coordinates into integers and append to the vertex
vertices.append((int(vertex[0]), int(vertex[1]), int(vertex[2])))
#Loop over all of the edges and add them to the list
for i in range(v + 3, e + v + 3):
#Split the edge into a list of vertices
edge = f[i].split(",")
#Turn the vertices indexes into integers and add to the side
edges.append((int(edge[0]), int(edge[1])))
#Loop over all of the sides and add them to the list
for i in range(e + v + 3, s + e + v + 3):
#Split the side into a list of vertices
side = f[i].split(",")
#Create a new side list to hold the data
newside = []
#For each vertex index in the side, add it to the new side variable
for p in side:
newside.append(int(p))
#Add the side to the list of sides
sides.append(tuple(newside))
#Return the data
return (vertices, edges, sides)
|
|
7a53db57a8394891e11d6bbeb051964960662dfb
|
semabot_tools.py
|
semabot_tools.py
|
from flow import Flow
from config import BOTNAME, BOTPW, ORG_ID
try:
flow = Flow(BOTNAME)
except flow.FlowError as e:
flow = Flow()
flow.create_device(BOTNAME, BOTPW)
print('Device for bot {} created'.format(BOTNAME))
def print_channels():
print('\033[1mYour bot "{}" has access to these channels:\033[0m\n'.format(BOTNAME))
for channel in flow.enumerate_channels(ORG_ID):
print('\033[91m\033[1m"{name}":\033[0m \033[94m{id}\033[0m'.format(**channel))
if __name__ == "__main__":
print_channels()
|
Add tool for printing channel IDs
|
Add tool for printing channel IDs
|
Python
|
mit
|
datamade/semabot,datamade/semabot
|
Add tool for printing channel IDs
|
from flow import Flow
from config import BOTNAME, BOTPW, ORG_ID
try:
flow = Flow(BOTNAME)
except flow.FlowError as e:
flow = Flow()
flow.create_device(BOTNAME, BOTPW)
print('Device for bot {} created'.format(BOTNAME))
def print_channels():
print('\033[1mYour bot "{}" has access to these channels:\033[0m\n'.format(BOTNAME))
for channel in flow.enumerate_channels(ORG_ID):
print('\033[91m\033[1m"{name}":\033[0m \033[94m{id}\033[0m'.format(**channel))
if __name__ == "__main__":
print_channels()
|
<commit_before><commit_msg>Add tool for printing channel IDs<commit_after>
|
from flow import Flow
from config import BOTNAME, BOTPW, ORG_ID
try:
flow = Flow(BOTNAME)
except flow.FlowError as e:
flow = Flow()
flow.create_device(BOTNAME, BOTPW)
print('Device for bot {} created'.format(BOTNAME))
def print_channels():
print('\033[1mYour bot "{}" has access to these channels:\033[0m\n'.format(BOTNAME))
for channel in flow.enumerate_channels(ORG_ID):
print('\033[91m\033[1m"{name}":\033[0m \033[94m{id}\033[0m'.format(**channel))
if __name__ == "__main__":
print_channels()
|
Add tool for printing channel IDsfrom flow import Flow
from config import BOTNAME, BOTPW, ORG_ID
try:
flow = Flow(BOTNAME)
except flow.FlowError as e:
flow = Flow()
flow.create_device(BOTNAME, BOTPW)
print('Device for bot {} created'.format(BOTNAME))
def print_channels():
print('\033[1mYour bot "{}" has access to these channels:\033[0m\n'.format(BOTNAME))
for channel in flow.enumerate_channels(ORG_ID):
print('\033[91m\033[1m"{name}":\033[0m \033[94m{id}\033[0m'.format(**channel))
if __name__ == "__main__":
print_channels()
|
<commit_before><commit_msg>Add tool for printing channel IDs<commit_after>from flow import Flow
from config import BOTNAME, BOTPW, ORG_ID
try:
flow = Flow(BOTNAME)
except flow.FlowError as e:
flow = Flow()
flow.create_device(BOTNAME, BOTPW)
print('Device for bot {} created'.format(BOTNAME))
def print_channels():
print('\033[1mYour bot "{}" has access to these channels:\033[0m\n'.format(BOTNAME))
for channel in flow.enumerate_channels(ORG_ID):
print('\033[91m\033[1m"{name}":\033[0m \033[94m{id}\033[0m'.format(**channel))
if __name__ == "__main__":
print_channels()
|
|
411fa5e1a0af79bcd442544571fbea7ba24b8266
|
spec/Report_S09_spec.py
|
spec/Report_S09_spec.py
|
from expects import expect, equal
from primestg.report import Report
from ast import literal_eval
with description('Report S09 example'):
with before.all:
self.data_filename_no_event_data = \
'spec/data/ZIV0000034180_0_S09_0_20161216104003'
# self.data_filename_with_event_data_D1 = \
# 'spec/data/ZIV0000034180_0_S09_0_20161216090401'
# self.data_filename_with_event_data_D1_and_D2 = \
# 'spec/data/ZIV0000034180_0_S09_0_20161216080308'
# self.data_filename_empty = \
# 'spec/data/ZIV0000034180_0_S09_0_20161216100401'
with open(self.data_filename_no_event_data) as data_file:
self.report = Report(data_file)
with it('generates expected results for a value of the first meter of '
'first concentrator'):
expected_first_value_first_meter = [
{
'name': 'ZIV0034631235',
'timestamp': '2016-12-15 05:25:06',
'cnc_name': 'ZIV0000034180',
'season': 'W',
'event_code': 1,
'event_group': 6,
}
]
concentrator = self.report.concentrators[0]
meter = concentrator.meters[0]
values = meter.values
first_value_first_meter = []
for x in values:
if x['name'] == 'ZIV0034631235' and x['timestamp'] == \
'2016-12-15 05:25:06':
first_value_first_meter.append(x)
expect(first_value_first_meter)\
.to(equal(expected_first_value_first_meter))
with it('generates the expected results for the whole report'):
result_filename = '{}_result.txt'.format(self.
data_filename_no_event_data)
with open(result_filename) as result_file:
result_string = result_file.read()
self.expected_result = literal_eval(result_string)
result = self.report.values
expect(result).to(equal(self.expected_result))
|
Add first tests for S09 report reading
|
Add first tests for S09 report reading
|
Python
|
agpl-3.0
|
gisce/primestg
|
Add first tests for S09 report reading
|
from expects import expect, equal
from primestg.report import Report
from ast import literal_eval
with description('Report S09 example'):
with before.all:
self.data_filename_no_event_data = \
'spec/data/ZIV0000034180_0_S09_0_20161216104003'
# self.data_filename_with_event_data_D1 = \
# 'spec/data/ZIV0000034180_0_S09_0_20161216090401'
# self.data_filename_with_event_data_D1_and_D2 = \
# 'spec/data/ZIV0000034180_0_S09_0_20161216080308'
# self.data_filename_empty = \
# 'spec/data/ZIV0000034180_0_S09_0_20161216100401'
with open(self.data_filename_no_event_data) as data_file:
self.report = Report(data_file)
with it('generates expected results for a value of the first meter of '
'first concentrator'):
expected_first_value_first_meter = [
{
'name': 'ZIV0034631235',
'timestamp': '2016-12-15 05:25:06',
'cnc_name': 'ZIV0000034180',
'season': 'W',
'event_code': 1,
'event_group': 6,
}
]
concentrator = self.report.concentrators[0]
meter = concentrator.meters[0]
values = meter.values
first_value_first_meter = []
for x in values:
if x['name'] == 'ZIV0034631235' and x['timestamp'] == \
'2016-12-15 05:25:06':
first_value_first_meter.append(x)
expect(first_value_first_meter)\
.to(equal(expected_first_value_first_meter))
with it('generates the expected results for the whole report'):
result_filename = '{}_result.txt'.format(self.
data_filename_no_event_data)
with open(result_filename) as result_file:
result_string = result_file.read()
self.expected_result = literal_eval(result_string)
result = self.report.values
expect(result).to(equal(self.expected_result))
|
<commit_before><commit_msg>Add first tests for S09 report reading<commit_after>
|
from expects import expect, equal
from primestg.report import Report
from ast import literal_eval
with description('Report S09 example'):
with before.all:
self.data_filename_no_event_data = \
'spec/data/ZIV0000034180_0_S09_0_20161216104003'
# self.data_filename_with_event_data_D1 = \
# 'spec/data/ZIV0000034180_0_S09_0_20161216090401'
# self.data_filename_with_event_data_D1_and_D2 = \
# 'spec/data/ZIV0000034180_0_S09_0_20161216080308'
# self.data_filename_empty = \
# 'spec/data/ZIV0000034180_0_S09_0_20161216100401'
with open(self.data_filename_no_event_data) as data_file:
self.report = Report(data_file)
with it('generates expected results for a value of the first meter of '
'first concentrator'):
expected_first_value_first_meter = [
{
'name': 'ZIV0034631235',
'timestamp': '2016-12-15 05:25:06',
'cnc_name': 'ZIV0000034180',
'season': 'W',
'event_code': 1,
'event_group': 6,
}
]
concentrator = self.report.concentrators[0]
meter = concentrator.meters[0]
values = meter.values
first_value_first_meter = []
for x in values:
if x['name'] == 'ZIV0034631235' and x['timestamp'] == \
'2016-12-15 05:25:06':
first_value_first_meter.append(x)
expect(first_value_first_meter)\
.to(equal(expected_first_value_first_meter))
with it('generates the expected results for the whole report'):
result_filename = '{}_result.txt'.format(self.
data_filename_no_event_data)
with open(result_filename) as result_file:
result_string = result_file.read()
self.expected_result = literal_eval(result_string)
result = self.report.values
expect(result).to(equal(self.expected_result))
|
Add first tests for S09 report readingfrom expects import expect, equal
from primestg.report import Report
from ast import literal_eval
with description('Report S09 example'):
with before.all:
self.data_filename_no_event_data = \
'spec/data/ZIV0000034180_0_S09_0_20161216104003'
# self.data_filename_with_event_data_D1 = \
# 'spec/data/ZIV0000034180_0_S09_0_20161216090401'
# self.data_filename_with_event_data_D1_and_D2 = \
# 'spec/data/ZIV0000034180_0_S09_0_20161216080308'
# self.data_filename_empty = \
# 'spec/data/ZIV0000034180_0_S09_0_20161216100401'
with open(self.data_filename_no_event_data) as data_file:
self.report = Report(data_file)
with it('generates expected results for a value of the first meter of '
'first concentrator'):
expected_first_value_first_meter = [
{
'name': 'ZIV0034631235',
'timestamp': '2016-12-15 05:25:06',
'cnc_name': 'ZIV0000034180',
'season': 'W',
'event_code': 1,
'event_group': 6,
}
]
concentrator = self.report.concentrators[0]
meter = concentrator.meters[0]
values = meter.values
first_value_first_meter = []
for x in values:
if x['name'] == 'ZIV0034631235' and x['timestamp'] == \
'2016-12-15 05:25:06':
first_value_first_meter.append(x)
expect(first_value_first_meter)\
.to(equal(expected_first_value_first_meter))
with it('generates the expected results for the whole report'):
result_filename = '{}_result.txt'.format(self.
data_filename_no_event_data)
with open(result_filename) as result_file:
result_string = result_file.read()
self.expected_result = literal_eval(result_string)
result = self.report.values
expect(result).to(equal(self.expected_result))
|
<commit_before><commit_msg>Add first tests for S09 report reading<commit_after>from expects import expect, equal
from primestg.report import Report
from ast import literal_eval
with description('Report S09 example'):
with before.all:
self.data_filename_no_event_data = \
'spec/data/ZIV0000034180_0_S09_0_20161216104003'
# self.data_filename_with_event_data_D1 = \
# 'spec/data/ZIV0000034180_0_S09_0_20161216090401'
# self.data_filename_with_event_data_D1_and_D2 = \
# 'spec/data/ZIV0000034180_0_S09_0_20161216080308'
# self.data_filename_empty = \
# 'spec/data/ZIV0000034180_0_S09_0_20161216100401'
with open(self.data_filename_no_event_data) as data_file:
self.report = Report(data_file)
with it('generates expected results for a value of the first meter of '
'first concentrator'):
expected_first_value_first_meter = [
{
'name': 'ZIV0034631235',
'timestamp': '2016-12-15 05:25:06',
'cnc_name': 'ZIV0000034180',
'season': 'W',
'event_code': 1,
'event_group': 6,
}
]
concentrator = self.report.concentrators[0]
meter = concentrator.meters[0]
values = meter.values
first_value_first_meter = []
for x in values:
if x['name'] == 'ZIV0034631235' and x['timestamp'] == \
'2016-12-15 05:25:06':
first_value_first_meter.append(x)
expect(first_value_first_meter)\
.to(equal(expected_first_value_first_meter))
with it('generates the expected results for the whole report'):
result_filename = '{}_result.txt'.format(self.
data_filename_no_event_data)
with open(result_filename) as result_file:
result_string = result_file.read()
self.expected_result = literal_eval(result_string)
result = self.report.values
expect(result).to(equal(self.expected_result))
|
|
f27a0ef659058789a75671582248c1fb12c30b6c
|
src/mmw/apps/bigcz/clients/usgswqp/models.py
|
src/mmw/apps/bigcz/clients/usgswqp/models.py
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import,
division,
print_function,
unicode_literals)
from apps.bigcz.models import Resource
class USGSResource(Resource):
def __init__(self, id, description, author, links, title,
created_at, updated_at, geom, details_url, sample_mediums,
variables, service_org, service_code, service_url,
service_title, service_citation,
begin_date, end_date):
super(USGSResource, self).__init__(id, description, author, links,
title, created_at, updated_at,
geom)
self.details_url = details_url
self.sample_mediums = sample_mediums
self.variables = variables
self.service_org = service_org
self.service_code = service_code
self.service_url = service_url
self.service_title = service_title
self.service_citation = service_citation
self.begin_date = begin_date
self.end_date = end_date
|
Initialize USGSResource object model to be similar to cuahsi
|
Initialize USGSResource object model to be similar to cuahsi
|
Python
|
apache-2.0
|
WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed
|
Initialize USGSResource object model to be similar to cuahsi
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import,
division,
print_function,
unicode_literals)
from apps.bigcz.models import Resource
class USGSResource(Resource):
def __init__(self, id, description, author, links, title,
created_at, updated_at, geom, details_url, sample_mediums,
variables, service_org, service_code, service_url,
service_title, service_citation,
begin_date, end_date):
super(USGSResource, self).__init__(id, description, author, links,
title, created_at, updated_at,
geom)
self.details_url = details_url
self.sample_mediums = sample_mediums
self.variables = variables
self.service_org = service_org
self.service_code = service_code
self.service_url = service_url
self.service_title = service_title
self.service_citation = service_citation
self.begin_date = begin_date
self.end_date = end_date
|
<commit_before><commit_msg>Initialize USGSResource object model to be similar to cuahsi<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import,
division,
print_function,
unicode_literals)
from apps.bigcz.models import Resource
class USGSResource(Resource):
def __init__(self, id, description, author, links, title,
created_at, updated_at, geom, details_url, sample_mediums,
variables, service_org, service_code, service_url,
service_title, service_citation,
begin_date, end_date):
super(USGSResource, self).__init__(id, description, author, links,
title, created_at, updated_at,
geom)
self.details_url = details_url
self.sample_mediums = sample_mediums
self.variables = variables
self.service_org = service_org
self.service_code = service_code
self.service_url = service_url
self.service_title = service_title
self.service_citation = service_citation
self.begin_date = begin_date
self.end_date = end_date
|
Initialize USGSResource object model to be similar to cuahsi# -*- coding: utf-8 -*-
from __future__ import (absolute_import,
division,
print_function,
unicode_literals)
from apps.bigcz.models import Resource
class USGSResource(Resource):
def __init__(self, id, description, author, links, title,
created_at, updated_at, geom, details_url, sample_mediums,
variables, service_org, service_code, service_url,
service_title, service_citation,
begin_date, end_date):
super(USGSResource, self).__init__(id, description, author, links,
title, created_at, updated_at,
geom)
self.details_url = details_url
self.sample_mediums = sample_mediums
self.variables = variables
self.service_org = service_org
self.service_code = service_code
self.service_url = service_url
self.service_title = service_title
self.service_citation = service_citation
self.begin_date = begin_date
self.end_date = end_date
|
<commit_before><commit_msg>Initialize USGSResource object model to be similar to cuahsi<commit_after># -*- coding: utf-8 -*-
from __future__ import (absolute_import,
division,
print_function,
unicode_literals)
from apps.bigcz.models import Resource
class USGSResource(Resource):
def __init__(self, id, description, author, links, title,
created_at, updated_at, geom, details_url, sample_mediums,
variables, service_org, service_code, service_url,
service_title, service_citation,
begin_date, end_date):
super(USGSResource, self).__init__(id, description, author, links,
title, created_at, updated_at,
geom)
self.details_url = details_url
self.sample_mediums = sample_mediums
self.variables = variables
self.service_org = service_org
self.service_code = service_code
self.service_url = service_url
self.service_title = service_title
self.service_citation = service_citation
self.begin_date = begin_date
self.end_date = end_date
|
|
36950cb7192f81c27d7a78376274f0183e8ec999
|
spec/openpassword/fudge_wrapper.py
|
spec/openpassword/fudge_wrapper.py
|
import fudge
import inspect
class MethodNotAvailableInMockedObjectException(Exception):
pass
def getMock(class_to_mock):
return FudgeWrapper(class_to_mock)
class FudgeWrapper(fudge.Fake):
def __init__(self, class_to_mock):
self._class_to_mock = class_to_mock
self._declared_calls = {}
self._attributes = {}
super(FudgeWrapper, self).__init__(self._class_to_mock.__name__)
def provides(self, call_name):
self._check_method_availability_on_mocked_object(call_name)
super(FudgeWrapper, self).provides(call_name)
def _check_method_availability_on_mocked_object(self, call_name):
if call_name not in dir(self._class_to_mock):
raise MethodNotAvailableInMockedObjectException
|
Add wrapper for fudge to allow mock checks
|
Add wrapper for fudge to allow mock checks
Add wrapper to add mock checks on fudge, to prevent
the mock from getting out of sync with the real object.
|
Python
|
mit
|
openpassword/blimey,openpassword/blimey
|
Add wrapper for fudge to allow mock checks
Add wrapper to add mock checks on fudge, to prevent
the mock from getting out of sync with the real object.
|
import fudge
import inspect
class MethodNotAvailableInMockedObjectException(Exception):
pass
def getMock(class_to_mock):
return FudgeWrapper(class_to_mock)
class FudgeWrapper(fudge.Fake):
def __init__(self, class_to_mock):
self._class_to_mock = class_to_mock
self._declared_calls = {}
self._attributes = {}
super(FudgeWrapper, self).__init__(self._class_to_mock.__name__)
def provides(self, call_name):
self._check_method_availability_on_mocked_object(call_name)
super(FudgeWrapper, self).provides(call_name)
def _check_method_availability_on_mocked_object(self, call_name):
if call_name not in dir(self._class_to_mock):
raise MethodNotAvailableInMockedObjectException
|
<commit_before><commit_msg>Add wrapper for fudge to allow mock checks
Add wrapper to add mock checks on fudge, to prevent
the mock from getting out of sync with the real object.<commit_after>
|
import fudge
import inspect
class MethodNotAvailableInMockedObjectException(Exception):
pass
def getMock(class_to_mock):
return FudgeWrapper(class_to_mock)
class FudgeWrapper(fudge.Fake):
def __init__(self, class_to_mock):
self._class_to_mock = class_to_mock
self._declared_calls = {}
self._attributes = {}
super(FudgeWrapper, self).__init__(self._class_to_mock.__name__)
def provides(self, call_name):
self._check_method_availability_on_mocked_object(call_name)
super(FudgeWrapper, self).provides(call_name)
def _check_method_availability_on_mocked_object(self, call_name):
if call_name not in dir(self._class_to_mock):
raise MethodNotAvailableInMockedObjectException
|
Add wrapper for fudge to allow mock checks
Add wrapper to add mock checks on fudge, to prevent
the mock from getting out of sync with the real object.import fudge
import inspect
class MethodNotAvailableInMockedObjectException(Exception):
pass
def getMock(class_to_mock):
return FudgeWrapper(class_to_mock)
class FudgeWrapper(fudge.Fake):
def __init__(self, class_to_mock):
self._class_to_mock = class_to_mock
self._declared_calls = {}
self._attributes = {}
super(FudgeWrapper, self).__init__(self._class_to_mock.__name__)
def provides(self, call_name):
self._check_method_availability_on_mocked_object(call_name)
super(FudgeWrapper, self).provides(call_name)
def _check_method_availability_on_mocked_object(self, call_name):
if call_name not in dir(self._class_to_mock):
raise MethodNotAvailableInMockedObjectException
|
<commit_before><commit_msg>Add wrapper for fudge to allow mock checks
Add wrapper to add mock checks on fudge, to prevent
the mock from getting out of sync with the real object.<commit_after>import fudge
import inspect
class MethodNotAvailableInMockedObjectException(Exception):
pass
def getMock(class_to_mock):
return FudgeWrapper(class_to_mock)
class FudgeWrapper(fudge.Fake):
def __init__(self, class_to_mock):
self._class_to_mock = class_to_mock
self._declared_calls = {}
self._attributes = {}
super(FudgeWrapper, self).__init__(self._class_to_mock.__name__)
def provides(self, call_name):
self._check_method_availability_on_mocked_object(call_name)
super(FudgeWrapper, self).provides(call_name)
def _check_method_availability_on_mocked_object(self, call_name):
if call_name not in dir(self._class_to_mock):
raise MethodNotAvailableInMockedObjectException
|
|
890f906ec023e25b85990bcfefca4353d06916ce
|
src/get_botid.py
|
src/get_botid.py
|
import os
from slackclient import SlackClient
"""
Returns the id of the movie slackbot
"""
SLACK_BOT_TOKEN = os.environ.get('SLACK_BOT_TOKEN')
BOT_NAME = 'moebot'
bot_found = False
try:
slack_client = SlackClient(SLACK_BOT_TOKEN)
except:
print "Connection error"
#calling the api to get a list of all users in your Slack team
api_call = slack_client.api_call("users.list")
if api_call.get('ok'):
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == BOT_NAME:
bot_found = True
break
else:
continue
if bot_found:
print "Bot ID for '" + user['name'] + "' is " + user.get('id')
#return str(user.get('id'))
else:
print "could not find bot user with the name " + BOT_NAME
#return ""
|
Add code for fetching bot id
|
Add code for fetching bot id
Python code for getting the id of your Slack bot
|
Python
|
mit
|
SaishRedkar/FilmyBot
|
Add code for fetching bot id
Python code for getting the id of your Slack bot
|
import os
from slackclient import SlackClient
"""
Returns the id of the movie slackbot
"""
SLACK_BOT_TOKEN = os.environ.get('SLACK_BOT_TOKEN')
BOT_NAME = 'moebot'
bot_found = False
try:
slack_client = SlackClient(SLACK_BOT_TOKEN)
except:
print "Connection error"
#calling the api to get a list of all users in your Slack team
api_call = slack_client.api_call("users.list")
if api_call.get('ok'):
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == BOT_NAME:
bot_found = True
break
else:
continue
if bot_found:
print "Bot ID for '" + user['name'] + "' is " + user.get('id')
#return str(user.get('id'))
else:
print "could not find bot user with the name " + BOT_NAME
#return ""
|
<commit_before><commit_msg>Add code for fetching bot id
Python code for getting the id of your Slack bot<commit_after>
|
import os
from slackclient import SlackClient
"""
Returns the id of the movie slackbot
"""
SLACK_BOT_TOKEN = os.environ.get('SLACK_BOT_TOKEN')
BOT_NAME = 'moebot'
bot_found = False
try:
slack_client = SlackClient(SLACK_BOT_TOKEN)
except:
print "Connection error"
#calling the api to get a list of all users in your Slack team
api_call = slack_client.api_call("users.list")
if api_call.get('ok'):
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == BOT_NAME:
bot_found = True
break
else:
continue
if bot_found:
print "Bot ID for '" + user['name'] + "' is " + user.get('id')
#return str(user.get('id'))
else:
print "could not find bot user with the name " + BOT_NAME
#return ""
|
Add code for fetching bot id
Python code for getting the id of your Slack botimport os
from slackclient import SlackClient
"""
Returns the id of the movie slackbot
"""
SLACK_BOT_TOKEN = os.environ.get('SLACK_BOT_TOKEN')
BOT_NAME = 'moebot'
bot_found = False
try:
slack_client = SlackClient(SLACK_BOT_TOKEN)
except:
print "Connection error"
#calling the api to get a list of all users in your Slack team
api_call = slack_client.api_call("users.list")
if api_call.get('ok'):
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == BOT_NAME:
bot_found = True
break
else:
continue
if bot_found:
print "Bot ID for '" + user['name'] + "' is " + user.get('id')
#return str(user.get('id'))
else:
print "could not find bot user with the name " + BOT_NAME
#return ""
|
<commit_before><commit_msg>Add code for fetching bot id
Python code for getting the id of your Slack bot<commit_after>import os
from slackclient import SlackClient
"""
Returns the id of the movie slackbot
"""
SLACK_BOT_TOKEN = os.environ.get('SLACK_BOT_TOKEN')
BOT_NAME = 'moebot'
bot_found = False
try:
slack_client = SlackClient(SLACK_BOT_TOKEN)
except:
print "Connection error"
#calling the api to get a list of all users in your Slack team
api_call = slack_client.api_call("users.list")
if api_call.get('ok'):
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == BOT_NAME:
bot_found = True
break
else:
continue
if bot_found:
print "Bot ID for '" + user['name'] + "' is " + user.get('id')
#return str(user.get('id'))
else:
print "could not find bot user with the name " + BOT_NAME
#return ""
|
|
db7a52cb9392f3cb9dc82323ff60cf9871dafbda
|
rename_files.py
|
rename_files.py
|
import os
import string
def list_files(path):
# returns a list of names (with extension, without full path) of all files
# in folder path
files = []
for name in os.listdir(path):
if os.path.isfile(os.path.join(path, name)):
files.append(name)
return files
files = list_files("./")
for file in files:
if "#" in file:
file = string.replace(file, "#", "\\#")
new_file = string.replace(file, "\\#", "_")
os.system("mv " + file + " " + new_file)
print new_file
|
Add script to rename files in a directory.
|
Add script to rename files in a directory.
|
Python
|
isc
|
wablair/misc_scripts,wablair/misc_scripts,wablair/misc_scripts,wablair/misc_scripts
|
Add script to rename files in a directory.
|
import os
import string
def list_files(path):
# returns a list of names (with extension, without full path) of all files
# in folder path
files = []
for name in os.listdir(path):
if os.path.isfile(os.path.join(path, name)):
files.append(name)
return files
files = list_files("./")
for file in files:
if "#" in file:
file = string.replace(file, "#", "\\#")
new_file = string.replace(file, "\\#", "_")
os.system("mv " + file + " " + new_file)
print new_file
|
<commit_before><commit_msg>Add script to rename files in a directory.<commit_after>
|
import os
import string
def list_files(path):
# returns a list of names (with extension, without full path) of all files
# in folder path
files = []
for name in os.listdir(path):
if os.path.isfile(os.path.join(path, name)):
files.append(name)
return files
files = list_files("./")
for file in files:
if "#" in file:
file = string.replace(file, "#", "\\#")
new_file = string.replace(file, "\\#", "_")
os.system("mv " + file + " " + new_file)
print new_file
|
Add script to rename files in a directory.import os
import string
def list_files(path):
# returns a list of names (with extension, without full path) of all files
# in folder path
files = []
for name in os.listdir(path):
if os.path.isfile(os.path.join(path, name)):
files.append(name)
return files
files = list_files("./")
for file in files:
if "#" in file:
file = string.replace(file, "#", "\\#")
new_file = string.replace(file, "\\#", "_")
os.system("mv " + file + " " + new_file)
print new_file
|
<commit_before><commit_msg>Add script to rename files in a directory.<commit_after>import os
import string
def list_files(path):
# returns a list of names (with extension, without full path) of all files
# in folder path
files = []
for name in os.listdir(path):
if os.path.isfile(os.path.join(path, name)):
files.append(name)
return files
files = list_files("./")
for file in files:
if "#" in file:
file = string.replace(file, "#", "\\#")
new_file = string.replace(file, "\\#", "_")
os.system("mv " + file + " " + new_file)
print new_file
|
|
8e8352d53e9c446d908e90424bc2fb0bf7d7f587
|
app_builder/app_builder_image/concat_roles.py
|
app_builder/app_builder_image/concat_roles.py
|
import glob
import os
import shutil
import subprocess
import yaml
def create_role(role):
ret = subprocess.check_output(
'ansible-galaxy init {}'.format(role).split())
if not ret.strip().endswith('created successfully'):
raise Exception('could not create role "{}"'.format(role))
def get_metadata(role):
main = open(os.path.join(role, 'meta/main.yml'))
return yaml.load(main)
def set_metadata(role, metadata):
new_main = os.path.join(role, 'meta/main.yml.new')
orig_main = os.path.join(role, 'meta/main.yml')
with open(new_main, 'w') as out:
yaml.dump(metadata, out, default_flow_style=False, explicit_start=True)
os.rename(new_main, orig_main)
def add_dependency(src_role, target_role):
"""Add metadata saying that 'target_role' depends on 'src_role'"""
md = get_metadata(target_role)
deps = md.setdefault('dependencies', [])
deps.append(os.path.join(target_role, 'roles', src_role))
set_metadata(target_role, md)
def sub_roles(role):
try:
return glob.glob(os.path.join(role, 'roles/*'))
except OSError:
return []
def fix_dependency(role, for_destination):
metadata = get_metadata(role)
deps = metadata.setdefault('dependencies', [])
def it():
for dep in deps:
print('dep: {}'.format(dep))
print('role: {}'.format(role))
print(' dep.endswith(role)?: {}'.format(dep.endswith(role)))
yield os.path.join(for_destination, 'roles', dep)
metadata['dependencies'] = list(it())
set_metadata(role, metadata)
def fix_dependencies(src_role, for_destination):
for role in sub_roles(src_role):
fix_dependencies(role, for_destination)
fix_dependency(src_role, for_destination)
def move(src_role, target_role, copy=False):
op = shutil.copytree if copy else shutil.move
try:
os.makedirs(os.path.join(target_role, 'roles'))
except OSError:
pass
fix_dependencies(src_role, for_destination=target_role)
op(src_role, os.path.join(target_role, 'roles', src_role))
add_dependency(src_role, target_role)
def concat(role1, role2, into, copy=False):
create_role(into)
move(role1, target_role=into, copy=copy)
move(role2, target_role=into, copy=copy)
|
Add script to compose roles
|
Add script to compose roles
|
Python
|
mit
|
waltermoreira/dockeransible,waltermoreira/dockeransible
|
Add script to compose roles
|
import glob
import os
import shutil
import subprocess
import yaml
def create_role(role):
ret = subprocess.check_output(
'ansible-galaxy init {}'.format(role).split())
if not ret.strip().endswith('created successfully'):
raise Exception('could not create role "{}"'.format(role))
def get_metadata(role):
main = open(os.path.join(role, 'meta/main.yml'))
return yaml.load(main)
def set_metadata(role, metadata):
new_main = os.path.join(role, 'meta/main.yml.new')
orig_main = os.path.join(role, 'meta/main.yml')
with open(new_main, 'w') as out:
yaml.dump(metadata, out, default_flow_style=False, explicit_start=True)
os.rename(new_main, orig_main)
def add_dependency(src_role, target_role):
"""Add metadata saying that 'target_role' depends on 'src_role'"""
md = get_metadata(target_role)
deps = md.setdefault('dependencies', [])
deps.append(os.path.join(target_role, 'roles', src_role))
set_metadata(target_role, md)
def sub_roles(role):
try:
return glob.glob(os.path.join(role, 'roles/*'))
except OSError:
return []
def fix_dependency(role, for_destination):
metadata = get_metadata(role)
deps = metadata.setdefault('dependencies', [])
def it():
for dep in deps:
print('dep: {}'.format(dep))
print('role: {}'.format(role))
print(' dep.endswith(role)?: {}'.format(dep.endswith(role)))
yield os.path.join(for_destination, 'roles', dep)
metadata['dependencies'] = list(it())
set_metadata(role, metadata)
def fix_dependencies(src_role, for_destination):
for role in sub_roles(src_role):
fix_dependencies(role, for_destination)
fix_dependency(src_role, for_destination)
def move(src_role, target_role, copy=False):
op = shutil.copytree if copy else shutil.move
try:
os.makedirs(os.path.join(target_role, 'roles'))
except OSError:
pass
fix_dependencies(src_role, for_destination=target_role)
op(src_role, os.path.join(target_role, 'roles', src_role))
add_dependency(src_role, target_role)
def concat(role1, role2, into, copy=False):
create_role(into)
move(role1, target_role=into, copy=copy)
move(role2, target_role=into, copy=copy)
|
<commit_before><commit_msg>Add script to compose roles<commit_after>
|
import glob
import os
import shutil
import subprocess
import yaml
def create_role(role):
ret = subprocess.check_output(
'ansible-galaxy init {}'.format(role).split())
if not ret.strip().endswith('created successfully'):
raise Exception('could not create role "{}"'.format(role))
def get_metadata(role):
main = open(os.path.join(role, 'meta/main.yml'))
return yaml.load(main)
def set_metadata(role, metadata):
new_main = os.path.join(role, 'meta/main.yml.new')
orig_main = os.path.join(role, 'meta/main.yml')
with open(new_main, 'w') as out:
yaml.dump(metadata, out, default_flow_style=False, explicit_start=True)
os.rename(new_main, orig_main)
def add_dependency(src_role, target_role):
"""Add metadata saying that 'target_role' depends on 'src_role'"""
md = get_metadata(target_role)
deps = md.setdefault('dependencies', [])
deps.append(os.path.join(target_role, 'roles', src_role))
set_metadata(target_role, md)
def sub_roles(role):
try:
return glob.glob(os.path.join(role, 'roles/*'))
except OSError:
return []
def fix_dependency(role, for_destination):
metadata = get_metadata(role)
deps = metadata.setdefault('dependencies', [])
def it():
for dep in deps:
print('dep: {}'.format(dep))
print('role: {}'.format(role))
print(' dep.endswith(role)?: {}'.format(dep.endswith(role)))
yield os.path.join(for_destination, 'roles', dep)
metadata['dependencies'] = list(it())
set_metadata(role, metadata)
def fix_dependencies(src_role, for_destination):
for role in sub_roles(src_role):
fix_dependencies(role, for_destination)
fix_dependency(src_role, for_destination)
def move(src_role, target_role, copy=False):
op = shutil.copytree if copy else shutil.move
try:
os.makedirs(os.path.join(target_role, 'roles'))
except OSError:
pass
fix_dependencies(src_role, for_destination=target_role)
op(src_role, os.path.join(target_role, 'roles', src_role))
add_dependency(src_role, target_role)
def concat(role1, role2, into, copy=False):
create_role(into)
move(role1, target_role=into, copy=copy)
move(role2, target_role=into, copy=copy)
|
Add script to compose rolesimport glob
import os
import shutil
import subprocess
import yaml
def create_role(role):
ret = subprocess.check_output(
'ansible-galaxy init {}'.format(role).split())
if not ret.strip().endswith('created successfully'):
raise Exception('could not create role "{}"'.format(role))
def get_metadata(role):
main = open(os.path.join(role, 'meta/main.yml'))
return yaml.load(main)
def set_metadata(role, metadata):
new_main = os.path.join(role, 'meta/main.yml.new')
orig_main = os.path.join(role, 'meta/main.yml')
with open(new_main, 'w') as out:
yaml.dump(metadata, out, default_flow_style=False, explicit_start=True)
os.rename(new_main, orig_main)
def add_dependency(src_role, target_role):
"""Add metadata saying that 'target_role' depends on 'src_role'"""
md = get_metadata(target_role)
deps = md.setdefault('dependencies', [])
deps.append(os.path.join(target_role, 'roles', src_role))
set_metadata(target_role, md)
def sub_roles(role):
try:
return glob.glob(os.path.join(role, 'roles/*'))
except OSError:
return []
def fix_dependency(role, for_destination):
metadata = get_metadata(role)
deps = metadata.setdefault('dependencies', [])
def it():
for dep in deps:
print('dep: {}'.format(dep))
print('role: {}'.format(role))
print(' dep.endswith(role)?: {}'.format(dep.endswith(role)))
yield os.path.join(for_destination, 'roles', dep)
metadata['dependencies'] = list(it())
set_metadata(role, metadata)
def fix_dependencies(src_role, for_destination):
for role in sub_roles(src_role):
fix_dependencies(role, for_destination)
fix_dependency(src_role, for_destination)
def move(src_role, target_role, copy=False):
op = shutil.copytree if copy else shutil.move
try:
os.makedirs(os.path.join(target_role, 'roles'))
except OSError:
pass
fix_dependencies(src_role, for_destination=target_role)
op(src_role, os.path.join(target_role, 'roles', src_role))
add_dependency(src_role, target_role)
def concat(role1, role2, into, copy=False):
create_role(into)
move(role1, target_role=into, copy=copy)
move(role2, target_role=into, copy=copy)
|
<commit_before><commit_msg>Add script to compose roles<commit_after>import glob
import os
import shutil
import subprocess
import yaml
def create_role(role):
ret = subprocess.check_output(
'ansible-galaxy init {}'.format(role).split())
if not ret.strip().endswith('created successfully'):
raise Exception('could not create role "{}"'.format(role))
def get_metadata(role):
main = open(os.path.join(role, 'meta/main.yml'))
return yaml.load(main)
def set_metadata(role, metadata):
new_main = os.path.join(role, 'meta/main.yml.new')
orig_main = os.path.join(role, 'meta/main.yml')
with open(new_main, 'w') as out:
yaml.dump(metadata, out, default_flow_style=False, explicit_start=True)
os.rename(new_main, orig_main)
def add_dependency(src_role, target_role):
"""Add metadata saying that 'target_role' depends on 'src_role'"""
md = get_metadata(target_role)
deps = md.setdefault('dependencies', [])
deps.append(os.path.join(target_role, 'roles', src_role))
set_metadata(target_role, md)
def sub_roles(role):
try:
return glob.glob(os.path.join(role, 'roles/*'))
except OSError:
return []
def fix_dependency(role, for_destination):
metadata = get_metadata(role)
deps = metadata.setdefault('dependencies', [])
def it():
for dep in deps:
print('dep: {}'.format(dep))
print('role: {}'.format(role))
print(' dep.endswith(role)?: {}'.format(dep.endswith(role)))
yield os.path.join(for_destination, 'roles', dep)
metadata['dependencies'] = list(it())
set_metadata(role, metadata)
def fix_dependencies(src_role, for_destination):
for role in sub_roles(src_role):
fix_dependencies(role, for_destination)
fix_dependency(src_role, for_destination)
def move(src_role, target_role, copy=False):
op = shutil.copytree if copy else shutil.move
try:
os.makedirs(os.path.join(target_role, 'roles'))
except OSError:
pass
fix_dependencies(src_role, for_destination=target_role)
op(src_role, os.path.join(target_role, 'roles', src_role))
add_dependency(src_role, target_role)
def concat(role1, role2, into, copy=False):
create_role(into)
move(role1, target_role=into, copy=copy)
move(role2, target_role=into, copy=copy)
|
|
5e270a34e2c7787459f307e957161aadd8d24476
|
run_checks.py
|
run_checks.py
|
import os
from pre_push import run_checks
filepath = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.join(filepath, "..")
if __name__ == "__main__":
run_checks(project_root)
|
Add separate script to run checks from jenkins
|
Add separate script to run checks from jenkins
|
Python
|
mit
|
kriskavalieri/nodejs-docker-boilerplate,kriskavalieri/nodejs-docker-boilerplate
|
Add separate script to run checks from jenkins
|
import os
from pre_push import run_checks
filepath = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.join(filepath, "..")
if __name__ == "__main__":
run_checks(project_root)
|
<commit_before><commit_msg>Add separate script to run checks from jenkins<commit_after>
|
import os
from pre_push import run_checks
filepath = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.join(filepath, "..")
if __name__ == "__main__":
run_checks(project_root)
|
Add separate script to run checks from jenkinsimport os
from pre_push import run_checks
filepath = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.join(filepath, "..")
if __name__ == "__main__":
run_checks(project_root)
|
<commit_before><commit_msg>Add separate script to run checks from jenkins<commit_after>import os
from pre_push import run_checks
filepath = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.join(filepath, "..")
if __name__ == "__main__":
run_checks(project_root)
|
|
779d8478fd05fe50ea7dccd55e98ae82ac609d32
|
s3Uploader.py
|
s3Uploader.py
|
# -*- coding: utf-8 -*-
import botocore
import boto3
from datetime import datetime
class s3Uploader():
def __init__(self, bucketName, objectName, filePath):
self.__bucketName = bucketName
self.__objectName = objectName
self.__filePath = filePath
self.__s3 = boto3.client('s3')
def upload(self):
if self.isExistObjectFor():
print("{name} already exist.".format(name=self.__objectName))
# Need refactoring...
print("Delete {objectName}.".format(objectName=self.__objectName))
self.__s3.delete_object(Bucket=self.__bucketName, Key=self.__objectName)
print("Re-Upload {objectName} to {bucketName}.".format(bucketName=self.__bucketName, objectName=self.__objectName))
self.uploadObject(self.__filePath)
else:
print("Upload {objectName} to {bucketName}.".format(bucketName=self.__bucketName, objectName=self.__objectName))
self.uploadObject(self.__filePath)
def uploadObject(self, path):
with open(path, 'rb') as fh:
self.__s3.put_object(Body=fh, Bucket=self.__bucketName, Key=self.__objectName)
def isExistObjectFor(self):
try:
self.__s3.head_object(Bucket=self.__bucketName, Key=self.__objectName)
return True
except botocore.exceptions.ClientError as e:
print(e)
return False
def isExistBucketFor(self):
try:
response = self.__s3.head_bucket(Bucket=self.__bucketName)
# response = s3.head_bucket(Bucket='test-lambda-on-java')
print(response)
return True
except botocore.exceptions.ClientError as e:
print("The {bucketName} does not found".format(bucketName=self.__bucketName))
print(e)
return False
|
Add s3 uploader class from awsSample repository.
|
Add s3 uploader class from awsSample repository.
|
Python
|
mit
|
hondasports/Bun-chan-Bot,hondasports/Bun-chan-Bot
|
Add s3 uploader class from awsSample repository.
|
# -*- coding: utf-8 -*-
import botocore
import boto3
from datetime import datetime
class s3Uploader():
def __init__(self, bucketName, objectName, filePath):
self.__bucketName = bucketName
self.__objectName = objectName
self.__filePath = filePath
self.__s3 = boto3.client('s3')
def upload(self):
if self.isExistObjectFor():
print("{name} already exist.".format(name=self.__objectName))
# Need refactoring...
print("Delete {objectName}.".format(objectName=self.__objectName))
self.__s3.delete_object(Bucket=self.__bucketName, Key=self.__objectName)
print("Re-Upload {objectName} to {bucketName}.".format(bucketName=self.__bucketName, objectName=self.__objectName))
self.uploadObject(self.__filePath)
else:
print("Upload {objectName} to {bucketName}.".format(bucketName=self.__bucketName, objectName=self.__objectName))
self.uploadObject(self.__filePath)
def uploadObject(self, path):
with open(path, 'rb') as fh:
self.__s3.put_object(Body=fh, Bucket=self.__bucketName, Key=self.__objectName)
def isExistObjectFor(self):
try:
self.__s3.head_object(Bucket=self.__bucketName, Key=self.__objectName)
return True
except botocore.exceptions.ClientError as e:
print(e)
return False
def isExistBucketFor(self):
try:
response = self.__s3.head_bucket(Bucket=self.__bucketName)
# response = s3.head_bucket(Bucket='test-lambda-on-java')
print(response)
return True
except botocore.exceptions.ClientError as e:
print("The {bucketName} does not found".format(bucketName=self.__bucketName))
print(e)
return False
|
<commit_before><commit_msg>Add s3 uploader class from awsSample repository.<commit_after>
|
# -*- coding: utf-8 -*-
import botocore
import boto3
from datetime import datetime
class s3Uploader():
def __init__(self, bucketName, objectName, filePath):
self.__bucketName = bucketName
self.__objectName = objectName
self.__filePath = filePath
self.__s3 = boto3.client('s3')
def upload(self):
if self.isExistObjectFor():
print("{name} already exist.".format(name=self.__objectName))
# Need refactoring...
print("Delete {objectName}.".format(objectName=self.__objectName))
self.__s3.delete_object(Bucket=self.__bucketName, Key=self.__objectName)
print("Re-Upload {objectName} to {bucketName}.".format(bucketName=self.__bucketName, objectName=self.__objectName))
self.uploadObject(self.__filePath)
else:
print("Upload {objectName} to {bucketName}.".format(bucketName=self.__bucketName, objectName=self.__objectName))
self.uploadObject(self.__filePath)
def uploadObject(self, path):
with open(path, 'rb') as fh:
self.__s3.put_object(Body=fh, Bucket=self.__bucketName, Key=self.__objectName)
def isExistObjectFor(self):
try:
self.__s3.head_object(Bucket=self.__bucketName, Key=self.__objectName)
return True
except botocore.exceptions.ClientError as e:
print(e)
return False
def isExistBucketFor(self):
try:
response = self.__s3.head_bucket(Bucket=self.__bucketName)
# response = s3.head_bucket(Bucket='test-lambda-on-java')
print(response)
return True
except botocore.exceptions.ClientError as e:
print("The {bucketName} does not found".format(bucketName=self.__bucketName))
print(e)
return False
|
Add s3 uploader class from awsSample repository.# -*- coding: utf-8 -*-
import botocore
import boto3
from datetime import datetime
class s3Uploader():
def __init__(self, bucketName, objectName, filePath):
self.__bucketName = bucketName
self.__objectName = objectName
self.__filePath = filePath
self.__s3 = boto3.client('s3')
def upload(self):
if self.isExistObjectFor():
print("{name} already exist.".format(name=self.__objectName))
# Need refactoring...
print("Delete {objectName}.".format(objectName=self.__objectName))
self.__s3.delete_object(Bucket=self.__bucketName, Key=self.__objectName)
print("Re-Upload {objectName} to {bucketName}.".format(bucketName=self.__bucketName, objectName=self.__objectName))
self.uploadObject(self.__filePath)
else:
print("Upload {objectName} to {bucketName}.".format(bucketName=self.__bucketName, objectName=self.__objectName))
self.uploadObject(self.__filePath)
def uploadObject(self, path):
with open(path, 'rb') as fh:
self.__s3.put_object(Body=fh, Bucket=self.__bucketName, Key=self.__objectName)
def isExistObjectFor(self):
try:
self.__s3.head_object(Bucket=self.__bucketName, Key=self.__objectName)
return True
except botocore.exceptions.ClientError as e:
print(e)
return False
def isExistBucketFor(self):
try:
response = self.__s3.head_bucket(Bucket=self.__bucketName)
# response = s3.head_bucket(Bucket='test-lambda-on-java')
print(response)
return True
except botocore.exceptions.ClientError as e:
print("The {bucketName} does not found".format(bucketName=self.__bucketName))
print(e)
return False
|
<commit_before><commit_msg>Add s3 uploader class from awsSample repository.<commit_after># -*- coding: utf-8 -*-
import botocore
import boto3
from datetime import datetime
class s3Uploader():
def __init__(self, bucketName, objectName, filePath):
self.__bucketName = bucketName
self.__objectName = objectName
self.__filePath = filePath
self.__s3 = boto3.client('s3')
def upload(self):
if self.isExistObjectFor():
print("{name} already exist.".format(name=self.__objectName))
# Need refactoring...
print("Delete {objectName}.".format(objectName=self.__objectName))
self.__s3.delete_object(Bucket=self.__bucketName, Key=self.__objectName)
print("Re-Upload {objectName} to {bucketName}.".format(bucketName=self.__bucketName, objectName=self.__objectName))
self.uploadObject(self.__filePath)
else:
print("Upload {objectName} to {bucketName}.".format(bucketName=self.__bucketName, objectName=self.__objectName))
self.uploadObject(self.__filePath)
def uploadObject(self, path):
with open(path, 'rb') as fh:
self.__s3.put_object(Body=fh, Bucket=self.__bucketName, Key=self.__objectName)
def isExistObjectFor(self):
try:
self.__s3.head_object(Bucket=self.__bucketName, Key=self.__objectName)
return True
except botocore.exceptions.ClientError as e:
print(e)
return False
def isExistBucketFor(self):
try:
response = self.__s3.head_bucket(Bucket=self.__bucketName)
# response = s3.head_bucket(Bucket='test-lambda-on-java')
print(response)
return True
except botocore.exceptions.ClientError as e:
print("The {bucketName} does not found".format(bucketName=self.__bucketName))
print(e)
return False
|
|
da3abf4adc5ac7095e47d07dc1c448c6e7fc20a1
|
tests/test_publish2cloud.py
|
tests/test_publish2cloud.py
|
import time
from mock import mock_open, patch
from publish2cloud import chunk_metadata
def test_chunk_metadata():
"""Test getting metadata from the chunk header of a list file."""
chunknum = int(time.time())
# Hash of test-track-digest256.dummytracker.org/
domain_hash = (b"q\xd8Q\xbe\x8b#\xad\xd9\xde\xdf\xa7B\x12\xf0D\xa2"
"\xf2\x1d\xcfx\xeaHi\x7f8%\xb5\x99\x83\xc1\x111")
data = b"a:%d:32:32\n" % chunknum + domain_hash
with patch("test_publish2cloud.open", mock_open(read_data=data)):
with open("base-fingerprinting-track-digest256", "rb") as fp:
metadata = chunk_metadata(fp)
assert metadata["type"] == "a"
assert metadata["num"] == str(chunknum)
assert metadata["hash_size"] == "32"
assert metadata["len"] == "32"
assert metadata["checksum"] == ("043493ecb63c5f143a372a5118d04a44df"
"188f238d2b18e6cd848ae413a01090")
|
Add unit test for chunk_metadata
|
Add unit test for chunk_metadata
|
Python
|
mpl-2.0
|
mozilla-services/shavar-list-creation
|
Add unit test for chunk_metadata
|
import time
from mock import mock_open, patch
from publish2cloud import chunk_metadata
def test_chunk_metadata():
"""Test getting metadata from the chunk header of a list file."""
chunknum = int(time.time())
# Hash of test-track-digest256.dummytracker.org/
domain_hash = (b"q\xd8Q\xbe\x8b#\xad\xd9\xde\xdf\xa7B\x12\xf0D\xa2"
"\xf2\x1d\xcfx\xeaHi\x7f8%\xb5\x99\x83\xc1\x111")
data = b"a:%d:32:32\n" % chunknum + domain_hash
with patch("test_publish2cloud.open", mock_open(read_data=data)):
with open("base-fingerprinting-track-digest256", "rb") as fp:
metadata = chunk_metadata(fp)
assert metadata["type"] == "a"
assert metadata["num"] == str(chunknum)
assert metadata["hash_size"] == "32"
assert metadata["len"] == "32"
assert metadata["checksum"] == ("043493ecb63c5f143a372a5118d04a44df"
"188f238d2b18e6cd848ae413a01090")
|
<commit_before><commit_msg>Add unit test for chunk_metadata<commit_after>
|
import time
from mock import mock_open, patch
from publish2cloud import chunk_metadata
def test_chunk_metadata():
"""Test getting metadata from the chunk header of a list file."""
chunknum = int(time.time())
# Hash of test-track-digest256.dummytracker.org/
domain_hash = (b"q\xd8Q\xbe\x8b#\xad\xd9\xde\xdf\xa7B\x12\xf0D\xa2"
"\xf2\x1d\xcfx\xeaHi\x7f8%\xb5\x99\x83\xc1\x111")
data = b"a:%d:32:32\n" % chunknum + domain_hash
with patch("test_publish2cloud.open", mock_open(read_data=data)):
with open("base-fingerprinting-track-digest256", "rb") as fp:
metadata = chunk_metadata(fp)
assert metadata["type"] == "a"
assert metadata["num"] == str(chunknum)
assert metadata["hash_size"] == "32"
assert metadata["len"] == "32"
assert metadata["checksum"] == ("043493ecb63c5f143a372a5118d04a44df"
"188f238d2b18e6cd848ae413a01090")
|
Add unit test for chunk_metadataimport time
from mock import mock_open, patch
from publish2cloud import chunk_metadata
def test_chunk_metadata():
"""Test getting metadata from the chunk header of a list file."""
chunknum = int(time.time())
# Hash of test-track-digest256.dummytracker.org/
domain_hash = (b"q\xd8Q\xbe\x8b#\xad\xd9\xde\xdf\xa7B\x12\xf0D\xa2"
"\xf2\x1d\xcfx\xeaHi\x7f8%\xb5\x99\x83\xc1\x111")
data = b"a:%d:32:32\n" % chunknum + domain_hash
with patch("test_publish2cloud.open", mock_open(read_data=data)):
with open("base-fingerprinting-track-digest256", "rb") as fp:
metadata = chunk_metadata(fp)
assert metadata["type"] == "a"
assert metadata["num"] == str(chunknum)
assert metadata["hash_size"] == "32"
assert metadata["len"] == "32"
assert metadata["checksum"] == ("043493ecb63c5f143a372a5118d04a44df"
"188f238d2b18e6cd848ae413a01090")
|
<commit_before><commit_msg>Add unit test for chunk_metadata<commit_after>import time
from mock import mock_open, patch
from publish2cloud import chunk_metadata
def test_chunk_metadata():
"""Test getting metadata from the chunk header of a list file."""
chunknum = int(time.time())
# Hash of test-track-digest256.dummytracker.org/
domain_hash = (b"q\xd8Q\xbe\x8b#\xad\xd9\xde\xdf\xa7B\x12\xf0D\xa2"
"\xf2\x1d\xcfx\xeaHi\x7f8%\xb5\x99\x83\xc1\x111")
data = b"a:%d:32:32\n" % chunknum + domain_hash
with patch("test_publish2cloud.open", mock_open(read_data=data)):
with open("base-fingerprinting-track-digest256", "rb") as fp:
metadata = chunk_metadata(fp)
assert metadata["type"] == "a"
assert metadata["num"] == str(chunknum)
assert metadata["hash_size"] == "32"
assert metadata["len"] == "32"
assert metadata["checksum"] == ("043493ecb63c5f143a372a5118d04a44df"
"188f238d2b18e6cd848ae413a01090")
|
|
c97d3657fe3d3091458bbffcc070970cf949a459
|
tests/test_routes_static.py
|
tests/test_routes_static.py
|
from .conftest import load
class TestStatic:
def test_get_index(self, client):
response = client.get("/")
assert response.status_code == 200
assert 'href="/api"' in load(response, as_json=False)
|
Add a test for getting the homepage
|
Add a test for getting the homepage
|
Python
|
mit
|
joshfriend/memegen,DanLindeman/memegen,joshfriend/memegen,DanLindeman/memegen,joshfriend/memegen,joshfriend/memegen,DanLindeman/memegen,DanLindeman/memegen
|
Add a test for getting the homepage
|
from .conftest import load
class TestStatic:
def test_get_index(self, client):
response = client.get("/")
assert response.status_code == 200
assert 'href="/api"' in load(response, as_json=False)
|
<commit_before><commit_msg>Add a test for getting the homepage<commit_after>
|
from .conftest import load
class TestStatic:
def test_get_index(self, client):
response = client.get("/")
assert response.status_code == 200
assert 'href="/api"' in load(response, as_json=False)
|
Add a test for getting the homepagefrom .conftest import load
class TestStatic:
def test_get_index(self, client):
response = client.get("/")
assert response.status_code == 200
assert 'href="/api"' in load(response, as_json=False)
|
<commit_before><commit_msg>Add a test for getting the homepage<commit_after>from .conftest import load
class TestStatic:
def test_get_index(self, client):
response = client.get("/")
assert response.status_code == 200
assert 'href="/api"' in load(response, as_json=False)
|
|
e97d8977b2c55c35030c3653dbddc7b4830b7f3f
|
tests/test_integration.py
|
tests/test_integration.py
|
import os
import tempfile
import vimrunner
from tasklib.task import TaskWarrior, Task
server = vimrunner.Server()
class TestIntegration(object):
def add_plugin(self, name):
plugin_base = os.path.expanduser('~/.vim/bundle/')
plugin_path = os.path.join(plugin_base, name)
self.client.add_plugin(plugin_path)
def write_buffer(self, lines, position=0):
result = self.client.write_buffer(position + 1, lines)
assert result == u"0"
def read_buffer(self, start=0, end=1000):
return self.client.read_buffer(
unicode(start+1),
unicode(end+1)
).splitlines()
def generate_data(self):
self.dir = tempfile.mkdtemp(dir='/tmp/')
tw = TaskWarrior(data_location=self.dir)
self.tasks = [
Task(tw, description="project random task 1", project="Random"),
Task(tw, description="project random task 2", project="Random"),
Task(tw, description="tag home task 1", tags=["home"]),
Task(tw, description="tag work task 1", tags=["work"]),
Task(tw, description="today task 1", due="now"),
]
for task in self.tasks:
task.save()
def setup(self):
self.generate_data()
self.client = server.start_gvim()
self.add_plugin('taskwiki')
self.add_plugin('vimwiki')
self.command('let g:taskwiki_data_location="{0}"'.format(self.dir))
self.client.edit(os.path.join(self.dir, 'testwiki.txt'))
self.command('set filetype=vimwiki')
def teardown(self):
self.client.quit()
def command(self, command):
return self.client.command(command)
class TestBurndown(TestIntegration):
def test_focus_burndown_daily(self):
self.command("TaskWikiBurndownDaily")
assert self.command(":py print vim.current.buffer").startswith("<buffer burndown.daily")
assert "Daily Burndown" in self.read_buffer()[0]
class TestViewports(TestIntegration):
def test_viewport_filling(self):
lines = ["=== Work tasks | +work ==="]
self.write_buffer(lines)
self.command("w")
assert self.read_buffer() == [
"=== Work tasks | +work ===",
"* [ ] tag work task 1 #{0}".format(self.tasks[3]['uuid'])
]
|
Add basic integration tests using vimrunner
|
test: Add basic integration tests using vimrunner
|
Python
|
mit
|
phha/taskwiki,Spirotot/taskwiki
|
test: Add basic integration tests using vimrunner
|
import os
import tempfile
import vimrunner
from tasklib.task import TaskWarrior, Task
server = vimrunner.Server()
class TestIntegration(object):
def add_plugin(self, name):
plugin_base = os.path.expanduser('~/.vim/bundle/')
plugin_path = os.path.join(plugin_base, name)
self.client.add_plugin(plugin_path)
def write_buffer(self, lines, position=0):
result = self.client.write_buffer(position + 1, lines)
assert result == u"0"
def read_buffer(self, start=0, end=1000):
return self.client.read_buffer(
unicode(start+1),
unicode(end+1)
).splitlines()
def generate_data(self):
self.dir = tempfile.mkdtemp(dir='/tmp/')
tw = TaskWarrior(data_location=self.dir)
self.tasks = [
Task(tw, description="project random task 1", project="Random"),
Task(tw, description="project random task 2", project="Random"),
Task(tw, description="tag home task 1", tags=["home"]),
Task(tw, description="tag work task 1", tags=["work"]),
Task(tw, description="today task 1", due="now"),
]
for task in self.tasks:
task.save()
def setup(self):
self.generate_data()
self.client = server.start_gvim()
self.add_plugin('taskwiki')
self.add_plugin('vimwiki')
self.command('let g:taskwiki_data_location="{0}"'.format(self.dir))
self.client.edit(os.path.join(self.dir, 'testwiki.txt'))
self.command('set filetype=vimwiki')
def teardown(self):
self.client.quit()
def command(self, command):
return self.client.command(command)
class TestBurndown(TestIntegration):
def test_focus_burndown_daily(self):
self.command("TaskWikiBurndownDaily")
assert self.command(":py print vim.current.buffer").startswith("<buffer burndown.daily")
assert "Daily Burndown" in self.read_buffer()[0]
class TestViewports(TestIntegration):
def test_viewport_filling(self):
lines = ["=== Work tasks | +work ==="]
self.write_buffer(lines)
self.command("w")
assert self.read_buffer() == [
"=== Work tasks | +work ===",
"* [ ] tag work task 1 #{0}".format(self.tasks[3]['uuid'])
]
|
<commit_before><commit_msg>test: Add basic integration tests using vimrunner<commit_after>
|
import os
import tempfile
import vimrunner
from tasklib.task import TaskWarrior, Task
server = vimrunner.Server()
class TestIntegration(object):
def add_plugin(self, name):
plugin_base = os.path.expanduser('~/.vim/bundle/')
plugin_path = os.path.join(plugin_base, name)
self.client.add_plugin(plugin_path)
def write_buffer(self, lines, position=0):
result = self.client.write_buffer(position + 1, lines)
assert result == u"0"
def read_buffer(self, start=0, end=1000):
return self.client.read_buffer(
unicode(start+1),
unicode(end+1)
).splitlines()
def generate_data(self):
self.dir = tempfile.mkdtemp(dir='/tmp/')
tw = TaskWarrior(data_location=self.dir)
self.tasks = [
Task(tw, description="project random task 1", project="Random"),
Task(tw, description="project random task 2", project="Random"),
Task(tw, description="tag home task 1", tags=["home"]),
Task(tw, description="tag work task 1", tags=["work"]),
Task(tw, description="today task 1", due="now"),
]
for task in self.tasks:
task.save()
def setup(self):
self.generate_data()
self.client = server.start_gvim()
self.add_plugin('taskwiki')
self.add_plugin('vimwiki')
self.command('let g:taskwiki_data_location="{0}"'.format(self.dir))
self.client.edit(os.path.join(self.dir, 'testwiki.txt'))
self.command('set filetype=vimwiki')
def teardown(self):
self.client.quit()
def command(self, command):
return self.client.command(command)
class TestBurndown(TestIntegration):
def test_focus_burndown_daily(self):
self.command("TaskWikiBurndownDaily")
assert self.command(":py print vim.current.buffer").startswith("<buffer burndown.daily")
assert "Daily Burndown" in self.read_buffer()[0]
class TestViewports(TestIntegration):
def test_viewport_filling(self):
lines = ["=== Work tasks | +work ==="]
self.write_buffer(lines)
self.command("w")
assert self.read_buffer() == [
"=== Work tasks | +work ===",
"* [ ] tag work task 1 #{0}".format(self.tasks[3]['uuid'])
]
|
test: Add basic integration tests using vimrunnerimport os
import tempfile
import vimrunner
from tasklib.task import TaskWarrior, Task
server = vimrunner.Server()
class TestIntegration(object):
def add_plugin(self, name):
plugin_base = os.path.expanduser('~/.vim/bundle/')
plugin_path = os.path.join(plugin_base, name)
self.client.add_plugin(plugin_path)
def write_buffer(self, lines, position=0):
result = self.client.write_buffer(position + 1, lines)
assert result == u"0"
def read_buffer(self, start=0, end=1000):
return self.client.read_buffer(
unicode(start+1),
unicode(end+1)
).splitlines()
def generate_data(self):
self.dir = tempfile.mkdtemp(dir='/tmp/')
tw = TaskWarrior(data_location=self.dir)
self.tasks = [
Task(tw, description="project random task 1", project="Random"),
Task(tw, description="project random task 2", project="Random"),
Task(tw, description="tag home task 1", tags=["home"]),
Task(tw, description="tag work task 1", tags=["work"]),
Task(tw, description="today task 1", due="now"),
]
for task in self.tasks:
task.save()
def setup(self):
self.generate_data()
self.client = server.start_gvim()
self.add_plugin('taskwiki')
self.add_plugin('vimwiki')
self.command('let g:taskwiki_data_location="{0}"'.format(self.dir))
self.client.edit(os.path.join(self.dir, 'testwiki.txt'))
self.command('set filetype=vimwiki')
def teardown(self):
self.client.quit()
def command(self, command):
return self.client.command(command)
class TestBurndown(TestIntegration):
def test_focus_burndown_daily(self):
self.command("TaskWikiBurndownDaily")
assert self.command(":py print vim.current.buffer").startswith("<buffer burndown.daily")
assert "Daily Burndown" in self.read_buffer()[0]
class TestViewports(TestIntegration):
def test_viewport_filling(self):
lines = ["=== Work tasks | +work ==="]
self.write_buffer(lines)
self.command("w")
assert self.read_buffer() == [
"=== Work tasks | +work ===",
"* [ ] tag work task 1 #{0}".format(self.tasks[3]['uuid'])
]
|
<commit_before><commit_msg>test: Add basic integration tests using vimrunner<commit_after>import os
import tempfile
import vimrunner
from tasklib.task import TaskWarrior, Task
server = vimrunner.Server()
class TestIntegration(object):
def add_plugin(self, name):
plugin_base = os.path.expanduser('~/.vim/bundle/')
plugin_path = os.path.join(plugin_base, name)
self.client.add_plugin(plugin_path)
def write_buffer(self, lines, position=0):
result = self.client.write_buffer(position + 1, lines)
assert result == u"0"
def read_buffer(self, start=0, end=1000):
return self.client.read_buffer(
unicode(start+1),
unicode(end+1)
).splitlines()
def generate_data(self):
self.dir = tempfile.mkdtemp(dir='/tmp/')
tw = TaskWarrior(data_location=self.dir)
self.tasks = [
Task(tw, description="project random task 1", project="Random"),
Task(tw, description="project random task 2", project="Random"),
Task(tw, description="tag home task 1", tags=["home"]),
Task(tw, description="tag work task 1", tags=["work"]),
Task(tw, description="today task 1", due="now"),
]
for task in self.tasks:
task.save()
def setup(self):
self.generate_data()
self.client = server.start_gvim()
self.add_plugin('taskwiki')
self.add_plugin('vimwiki')
self.command('let g:taskwiki_data_location="{0}"'.format(self.dir))
self.client.edit(os.path.join(self.dir, 'testwiki.txt'))
self.command('set filetype=vimwiki')
def teardown(self):
self.client.quit()
def command(self, command):
return self.client.command(command)
class TestBurndown(TestIntegration):
def test_focus_burndown_daily(self):
self.command("TaskWikiBurndownDaily")
assert self.command(":py print vim.current.buffer").startswith("<buffer burndown.daily")
assert "Daily Burndown" in self.read_buffer()[0]
class TestViewports(TestIntegration):
def test_viewport_filling(self):
lines = ["=== Work tasks | +work ==="]
self.write_buffer(lines)
self.command("w")
assert self.read_buffer() == [
"=== Work tasks | +work ===",
"* [ ] tag work task 1 #{0}".format(self.tasks[3]['uuid'])
]
|
|
4e161fd1ef64b9682d2d73ce413fdb68fbff9f85
|
tests/test_javascript_solutions.py
|
tests/test_javascript_solutions.py
|
import glob
import json
import os
import pytest
from helpers import solutions_dir
# NOTE: If we make solution_files a fixture instead of a normal attr/function,
# then we can't use it in pytest's parametrize
solution_files = glob.glob(os.path.join(solutions_dir("javascript"), "*.js"))
@pytest.mark.javascript
def test_javascript_solutions_exist():
assert solution_files
def id_func(param):
problem_name, ext = os.path.splitext(os.path.basename(param))
return problem_name
@pytest.mark.javascript
@pytest.mark.parametrize("solution_file", solution_files, ids=id_func)
def test_submit_file(solution_file, submit_solution):
result = submit_solution(solution_file)
assert result.get("success") is True, "Failed. Engine output:\n{:}".format(
json.dumps(result, indent=4)
)
|
Add separate test file for javascript
|
Add separate test file for javascript
|
Python
|
mit
|
project-lovelace/lovelace-engine,project-lovelace/lovelace-engine,project-lovelace/lovelace-engine
|
Add separate test file for javascript
|
import glob
import json
import os
import pytest
from helpers import solutions_dir
# NOTE: If we make solution_files a fixture instead of a normal attr/function,
# then we can't use it in pytest's parametrize
solution_files = glob.glob(os.path.join(solutions_dir("javascript"), "*.js"))
@pytest.mark.javascript
def test_javascript_solutions_exist():
assert solution_files
def id_func(param):
problem_name, ext = os.path.splitext(os.path.basename(param))
return problem_name
@pytest.mark.javascript
@pytest.mark.parametrize("solution_file", solution_files, ids=id_func)
def test_submit_file(solution_file, submit_solution):
result = submit_solution(solution_file)
assert result.get("success") is True, "Failed. Engine output:\n{:}".format(
json.dumps(result, indent=4)
)
|
<commit_before><commit_msg>Add separate test file for javascript<commit_after>
|
import glob
import json
import os
import pytest
from helpers import solutions_dir
# NOTE: If we make solution_files a fixture instead of a normal attr/function,
# then we can't use it in pytest's parametrize
solution_files = glob.glob(os.path.join(solutions_dir("javascript"), "*.js"))
@pytest.mark.javascript
def test_javascript_solutions_exist():
assert solution_files
def id_func(param):
problem_name, ext = os.path.splitext(os.path.basename(param))
return problem_name
@pytest.mark.javascript
@pytest.mark.parametrize("solution_file", solution_files, ids=id_func)
def test_submit_file(solution_file, submit_solution):
result = submit_solution(solution_file)
assert result.get("success") is True, "Failed. Engine output:\n{:}".format(
json.dumps(result, indent=4)
)
|
Add separate test file for javascriptimport glob
import json
import os
import pytest
from helpers import solutions_dir
# NOTE: If we make solution_files a fixture instead of a normal attr/function,
# then we can't use it in pytest's parametrize
solution_files = glob.glob(os.path.join(solutions_dir("javascript"), "*.js"))
@pytest.mark.javascript
def test_javascript_solutions_exist():
assert solution_files
def id_func(param):
problem_name, ext = os.path.splitext(os.path.basename(param))
return problem_name
@pytest.mark.javascript
@pytest.mark.parametrize("solution_file", solution_files, ids=id_func)
def test_submit_file(solution_file, submit_solution):
result = submit_solution(solution_file)
assert result.get("success") is True, "Failed. Engine output:\n{:}".format(
json.dumps(result, indent=4)
)
|
<commit_before><commit_msg>Add separate test file for javascript<commit_after>import glob
import json
import os
import pytest
from helpers import solutions_dir
# NOTE: If we make solution_files a fixture instead of a normal attr/function,
# then we can't use it in pytest's parametrize
solution_files = glob.glob(os.path.join(solutions_dir("javascript"), "*.js"))
@pytest.mark.javascript
def test_javascript_solutions_exist():
assert solution_files
def id_func(param):
problem_name, ext = os.path.splitext(os.path.basename(param))
return problem_name
@pytest.mark.javascript
@pytest.mark.parametrize("solution_file", solution_files, ids=id_func)
def test_submit_file(solution_file, submit_solution):
result = submit_solution(solution_file)
assert result.get("success") is True, "Failed. Engine output:\n{:}".format(
json.dumps(result, indent=4)
)
|
|
01625ec37806aa5fb9cdcf5e1ac92abf54aff4c9
|
plugins/CuraEngineBackend/ProcessGCodeJob.py
|
plugins/CuraEngineBackend/ProcessGCodeJob.py
|
from UM.Job import Job
from UM.Application import Application
import os
class ProcessGCodeJob(Job):
def __init__(self, message):
super().__init__()
self._message = message
def run(self):
with open(self._message.filename) as f:
data = f.read(None)
Application.getInstance().getController().getScene().gcode = data
os.remove(self._message.filename)
|
Add a job to handle processing of GCode from the backend
|
Add a job to handle processing of GCode from the backend
|
Python
|
agpl-3.0
|
onitake/Uranium,onitake/Uranium
|
Add a job to handle processing of GCode from the backend
|
from UM.Job import Job
from UM.Application import Application
import os
class ProcessGCodeJob(Job):
def __init__(self, message):
super().__init__()
self._message = message
def run(self):
with open(self._message.filename) as f:
data = f.read(None)
Application.getInstance().getController().getScene().gcode = data
os.remove(self._message.filename)
|
<commit_before><commit_msg>Add a job to handle processing of GCode from the backend<commit_after>
|
from UM.Job import Job
from UM.Application import Application
import os
class ProcessGCodeJob(Job):
def __init__(self, message):
super().__init__()
self._message = message
def run(self):
with open(self._message.filename) as f:
data = f.read(None)
Application.getInstance().getController().getScene().gcode = data
os.remove(self._message.filename)
|
Add a job to handle processing of GCode from the backendfrom UM.Job import Job
from UM.Application import Application
import os
class ProcessGCodeJob(Job):
def __init__(self, message):
super().__init__()
self._message = message
def run(self):
with open(self._message.filename) as f:
data = f.read(None)
Application.getInstance().getController().getScene().gcode = data
os.remove(self._message.filename)
|
<commit_before><commit_msg>Add a job to handle processing of GCode from the backend<commit_after>from UM.Job import Job
from UM.Application import Application
import os
class ProcessGCodeJob(Job):
def __init__(self, message):
super().__init__()
self._message = message
def run(self):
with open(self._message.filename) as f:
data = f.read(None)
Application.getInstance().getController().getScene().gcode = data
os.remove(self._message.filename)
|
|
bd315146db332bc92cf7d613ff8b8923a60d320f
|
src/manga2.py
|
src/manga2.py
|
import argparse
import itertools
from redux.helper.util import Util
from redux.site.mangafox import MangaFox
from redux.site.mangahere import MangaHere
from redux.site.mangapanda import MangaPanda
from redux.site.mangareader import MangaReader
def main():
parser = argparse.ArgumentParser(description='Download manga.')
subparsers = parser.add_subparsers()
add_list_subparser(subparsers)
add_download_subparser(subparsers)
args = parser.parse_args()
args.func(args)
def add_list_subparser(subparsers):
parser = subparsers.add_parser('list', help='list all chapters')
parser.add_argument('series', help='series name')
parser.set_defaults(func=itemize)
def add_download_subparser(subparsers):
parser = subparsers.add_parser('download', help='download some chapters')
parser.add_argument('series', help='series name')
parser.add_argument('chapters', help='a quoted string of comma delimited numbers or ranges')
parser.set_defaults(func=download)
def chapters(series):
chapters = Util.flatten([
site.series(series).chapters for site in [MangaFox, MangaHere, MangaPanda, MangaReader]
])
return itertools.groupby(
Util.natural_sort(chapters, key=lambda chapter: chapter.chapter),
lambda chapter: chapter.chapter
)
def itemize(args):
for chapter_number, chapter_objects in chapters(args.series):
titles = [chapter.title for chapter in chapter_objects if chapter.title.strip() != '']
print "(%s) %s" % (chapter_number, titles[0] if len(titles) > 0 else '')
def download(args):
return
if __name__ == '__main__':
main()
|
Add script using new library that can list chapters and titles of a series.
|
Add script using new library that can list chapters and titles of a series.
|
Python
|
mit
|
jiaweihli/manga_downloader,joaquinpf/manga_downloader,CharlieCorner/manga_downloader,alexforsale/manga_downloader
|
Add script using new library that can list chapters and titles of a series.
|
import argparse
import itertools
from redux.helper.util import Util
from redux.site.mangafox import MangaFox
from redux.site.mangahere import MangaHere
from redux.site.mangapanda import MangaPanda
from redux.site.mangareader import MangaReader
def main():
parser = argparse.ArgumentParser(description='Download manga.')
subparsers = parser.add_subparsers()
add_list_subparser(subparsers)
add_download_subparser(subparsers)
args = parser.parse_args()
args.func(args)
def add_list_subparser(subparsers):
parser = subparsers.add_parser('list', help='list all chapters')
parser.add_argument('series', help='series name')
parser.set_defaults(func=itemize)
def add_download_subparser(subparsers):
parser = subparsers.add_parser('download', help='download some chapters')
parser.add_argument('series', help='series name')
parser.add_argument('chapters', help='a quoted string of comma delimited numbers or ranges')
parser.set_defaults(func=download)
def chapters(series):
chapters = Util.flatten([
site.series(series).chapters for site in [MangaFox, MangaHere, MangaPanda, MangaReader]
])
return itertools.groupby(
Util.natural_sort(chapters, key=lambda chapter: chapter.chapter),
lambda chapter: chapter.chapter
)
def itemize(args):
for chapter_number, chapter_objects in chapters(args.series):
titles = [chapter.title for chapter in chapter_objects if chapter.title.strip() != '']
print "(%s) %s" % (chapter_number, titles[0] if len(titles) > 0 else '')
def download(args):
return
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script using new library that can list chapters and titles of a series.<commit_after>
|
import argparse
import itertools
from redux.helper.util import Util
from redux.site.mangafox import MangaFox
from redux.site.mangahere import MangaHere
from redux.site.mangapanda import MangaPanda
from redux.site.mangareader import MangaReader
def main():
parser = argparse.ArgumentParser(description='Download manga.')
subparsers = parser.add_subparsers()
add_list_subparser(subparsers)
add_download_subparser(subparsers)
args = parser.parse_args()
args.func(args)
def add_list_subparser(subparsers):
parser = subparsers.add_parser('list', help='list all chapters')
parser.add_argument('series', help='series name')
parser.set_defaults(func=itemize)
def add_download_subparser(subparsers):
parser = subparsers.add_parser('download', help='download some chapters')
parser.add_argument('series', help='series name')
parser.add_argument('chapters', help='a quoted string of comma delimited numbers or ranges')
parser.set_defaults(func=download)
def chapters(series):
chapters = Util.flatten([
site.series(series).chapters for site in [MangaFox, MangaHere, MangaPanda, MangaReader]
])
return itertools.groupby(
Util.natural_sort(chapters, key=lambda chapter: chapter.chapter),
lambda chapter: chapter.chapter
)
def itemize(args):
for chapter_number, chapter_objects in chapters(args.series):
titles = [chapter.title for chapter in chapter_objects if chapter.title.strip() != '']
print "(%s) %s" % (chapter_number, titles[0] if len(titles) > 0 else '')
def download(args):
return
if __name__ == '__main__':
main()
|
Add script using new library that can list chapters and titles of a series.import argparse
import itertools
from redux.helper.util import Util
from redux.site.mangafox import MangaFox
from redux.site.mangahere import MangaHere
from redux.site.mangapanda import MangaPanda
from redux.site.mangareader import MangaReader
def main():
parser = argparse.ArgumentParser(description='Download manga.')
subparsers = parser.add_subparsers()
add_list_subparser(subparsers)
add_download_subparser(subparsers)
args = parser.parse_args()
args.func(args)
def add_list_subparser(subparsers):
parser = subparsers.add_parser('list', help='list all chapters')
parser.add_argument('series', help='series name')
parser.set_defaults(func=itemize)
def add_download_subparser(subparsers):
parser = subparsers.add_parser('download', help='download some chapters')
parser.add_argument('series', help='series name')
parser.add_argument('chapters', help='a quoted string of comma delimited numbers or ranges')
parser.set_defaults(func=download)
def chapters(series):
chapters = Util.flatten([
site.series(series).chapters for site in [MangaFox, MangaHere, MangaPanda, MangaReader]
])
return itertools.groupby(
Util.natural_sort(chapters, key=lambda chapter: chapter.chapter),
lambda chapter: chapter.chapter
)
def itemize(args):
for chapter_number, chapter_objects in chapters(args.series):
titles = [chapter.title for chapter in chapter_objects if chapter.title.strip() != '']
print "(%s) %s" % (chapter_number, titles[0] if len(titles) > 0 else '')
def download(args):
return
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script using new library that can list chapters and titles of a series.<commit_after>import argparse
import itertools
from redux.helper.util import Util
from redux.site.mangafox import MangaFox
from redux.site.mangahere import MangaHere
from redux.site.mangapanda import MangaPanda
from redux.site.mangareader import MangaReader
def main():
parser = argparse.ArgumentParser(description='Download manga.')
subparsers = parser.add_subparsers()
add_list_subparser(subparsers)
add_download_subparser(subparsers)
args = parser.parse_args()
args.func(args)
def add_list_subparser(subparsers):
parser = subparsers.add_parser('list', help='list all chapters')
parser.add_argument('series', help='series name')
parser.set_defaults(func=itemize)
def add_download_subparser(subparsers):
parser = subparsers.add_parser('download', help='download some chapters')
parser.add_argument('series', help='series name')
parser.add_argument('chapters', help='a quoted string of comma delimited numbers or ranges')
parser.set_defaults(func=download)
def chapters(series):
chapters = Util.flatten([
site.series(series).chapters for site in [MangaFox, MangaHere, MangaPanda, MangaReader]
])
return itertools.groupby(
Util.natural_sort(chapters, key=lambda chapter: chapter.chapter),
lambda chapter: chapter.chapter
)
def itemize(args):
for chapter_number, chapter_objects in chapters(args.series):
titles = [chapter.title for chapter in chapter_objects if chapter.title.strip() != '']
print "(%s) %s" % (chapter_number, titles[0] if len(titles) > 0 else '')
def download(args):
return
if __name__ == '__main__':
main()
|
|
4e054b93bda5f7615c07142823fa111908ba157e
|
driver_station/src/ui/widgets/camera_widget.py
|
driver_station/src/ui/widgets/camera_widget.py
|
import cv_widget
class CameraWidget(cv_widget.CvWidget):
def __init__(self, fixed_size):
cv_widget.CvWidget.__init__(self, fixed_size)
def set_error(self):
pass
def set_target_data(self, data):
img, data = data
self.set_from_np(img)
|
Add in camera widget, not complete yet
|
Add in camera widget, not complete yet
|
Python
|
bsd-3-clause
|
frc1418/2014
|
Add in camera widget, not complete yet
|
import cv_widget
class CameraWidget(cv_widget.CvWidget):
def __init__(self, fixed_size):
cv_widget.CvWidget.__init__(self, fixed_size)
def set_error(self):
pass
def set_target_data(self, data):
img, data = data
self.set_from_np(img)
|
<commit_before><commit_msg>Add in camera widget, not complete yet<commit_after>
|
import cv_widget
class CameraWidget(cv_widget.CvWidget):
def __init__(self, fixed_size):
cv_widget.CvWidget.__init__(self, fixed_size)
def set_error(self):
pass
def set_target_data(self, data):
img, data = data
self.set_from_np(img)
|
Add in camera widget, not complete yet
import cv_widget
class CameraWidget(cv_widget.CvWidget):
def __init__(self, fixed_size):
cv_widget.CvWidget.__init__(self, fixed_size)
def set_error(self):
pass
def set_target_data(self, data):
img, data = data
self.set_from_np(img)
|
<commit_before><commit_msg>Add in camera widget, not complete yet<commit_after>
import cv_widget
class CameraWidget(cv_widget.CvWidget):
def __init__(self, fixed_size):
cv_widget.CvWidget.__init__(self, fixed_size)
def set_error(self):
pass
def set_target_data(self, data):
img, data = data
self.set_from_np(img)
|
|
3ff8b3de2b9f0d1de1414bad12d06d931e7919cd
|
utils/create_tree_from_metadata.py
|
utils/create_tree_from_metadata.py
|
#!/usr/bin/env python
"""
A utility to create a Girder hierarchy from metadata.json.
Useful in testing the monkeybrains plugin to set up a hierarchy,
then set_metadata.py can be run to add the metadata.
"""
import argparse
import json
import requests.packages.urllib3
from girder_client import GirderClient
requests.packages.urllib3.disable_warnings()
parser = argparse.ArgumentParser(
prog='create_tree_from_metadata',
description='Create a hierarchy in girder from the metadata.json.')
parser.add_argument('--username', required=False, default=None)
parser.add_argument('--password', required=False, default=None)
parser.add_argument('--parent-folder-id', required=True, default=None)
parser.add_argument('--scheme', required=False, default=None)
parser.add_argument('--host', required=False, default=None)
parser.add_argument('--port', required=False, default=None)
def main():
"""Create the folder hierarchy with metadata in a Girder instance."""
args = parser.parse_args()
g = GirderClient(host=args.host, port=args.port, scheme=args.scheme)
g.authenticate(args.username, args.password)
def create_folder_on_demand(parent_folder_id, folder_name):
existing_folders = list(
g.listFolder(parent_folder_id, name=folder_name))
if not len(existing_folders):
sought_folder = g.createFolder(parent_folder_id, name=folder_name)
else:
sought_folder = existing_folders[0]
return sought_folder
metadata_file = 'metadata.json'
with open(metadata_file) as json_file:
metadata = json.load(json_file)
parent_folder_id = args.parent_folder_id
for subject_id, subject_metadata in metadata.items():
subject_folder = create_folder_on_demand(parent_folder_id, subject_id)
for (scan_time, scan_date, scan_weight) in subject_metadata['scans']:
create_folder_on_demand(subject_folder['_id'], scan_time)
if __name__ == '__main__':
main()
|
Add utility to create test tree
|
Add utility to create test tree
|
Python
|
apache-2.0
|
girder/monkeybrains,girder/monkeybrains,girder/monkeybrains
|
Add utility to create test tree
|
#!/usr/bin/env python
"""
A utility to create a Girder hierarchy from metadata.json.
Useful in testing the monkeybrains plugin to set up a hierarchy,
then set_metadata.py can be run to add the metadata.
"""
import argparse
import json
import requests.packages.urllib3
from girder_client import GirderClient
requests.packages.urllib3.disable_warnings()
parser = argparse.ArgumentParser(
prog='create_tree_from_metadata',
description='Create a hierarchy in girder from the metadata.json.')
parser.add_argument('--username', required=False, default=None)
parser.add_argument('--password', required=False, default=None)
parser.add_argument('--parent-folder-id', required=True, default=None)
parser.add_argument('--scheme', required=False, default=None)
parser.add_argument('--host', required=False, default=None)
parser.add_argument('--port', required=False, default=None)
def main():
"""Create the folder hierarchy with metadata in a Girder instance."""
args = parser.parse_args()
g = GirderClient(host=args.host, port=args.port, scheme=args.scheme)
g.authenticate(args.username, args.password)
def create_folder_on_demand(parent_folder_id, folder_name):
existing_folders = list(
g.listFolder(parent_folder_id, name=folder_name))
if not len(existing_folders):
sought_folder = g.createFolder(parent_folder_id, name=folder_name)
else:
sought_folder = existing_folders[0]
return sought_folder
metadata_file = 'metadata.json'
with open(metadata_file) as json_file:
metadata = json.load(json_file)
parent_folder_id = args.parent_folder_id
for subject_id, subject_metadata in metadata.items():
subject_folder = create_folder_on_demand(parent_folder_id, subject_id)
for (scan_time, scan_date, scan_weight) in subject_metadata['scans']:
create_folder_on_demand(subject_folder['_id'], scan_time)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add utility to create test tree<commit_after>
|
#!/usr/bin/env python
"""
A utility to create a Girder hierarchy from metadata.json.
Useful in testing the monkeybrains plugin to set up a hierarchy,
then set_metadata.py can be run to add the metadata.
"""
import argparse
import json
import requests.packages.urllib3
from girder_client import GirderClient
requests.packages.urllib3.disable_warnings()
parser = argparse.ArgumentParser(
prog='create_tree_from_metadata',
description='Create a hierarchy in girder from the metadata.json.')
parser.add_argument('--username', required=False, default=None)
parser.add_argument('--password', required=False, default=None)
parser.add_argument('--parent-folder-id', required=True, default=None)
parser.add_argument('--scheme', required=False, default=None)
parser.add_argument('--host', required=False, default=None)
parser.add_argument('--port', required=False, default=None)
def main():
"""Create the folder hierarchy with metadata in a Girder instance."""
args = parser.parse_args()
g = GirderClient(host=args.host, port=args.port, scheme=args.scheme)
g.authenticate(args.username, args.password)
def create_folder_on_demand(parent_folder_id, folder_name):
existing_folders = list(
g.listFolder(parent_folder_id, name=folder_name))
if not len(existing_folders):
sought_folder = g.createFolder(parent_folder_id, name=folder_name)
else:
sought_folder = existing_folders[0]
return sought_folder
metadata_file = 'metadata.json'
with open(metadata_file) as json_file:
metadata = json.load(json_file)
parent_folder_id = args.parent_folder_id
for subject_id, subject_metadata in metadata.items():
subject_folder = create_folder_on_demand(parent_folder_id, subject_id)
for (scan_time, scan_date, scan_weight) in subject_metadata['scans']:
create_folder_on_demand(subject_folder['_id'], scan_time)
if __name__ == '__main__':
main()
|
Add utility to create test tree#!/usr/bin/env python
"""
A utility to create a Girder hierarchy from metadata.json.
Useful in testing the monkeybrains plugin to set up a hierarchy,
then set_metadata.py can be run to add the metadata.
"""
import argparse
import json
import requests.packages.urllib3
from girder_client import GirderClient
requests.packages.urllib3.disable_warnings()
parser = argparse.ArgumentParser(
prog='create_tree_from_metadata',
description='Create a hierarchy in girder from the metadata.json.')
parser.add_argument('--username', required=False, default=None)
parser.add_argument('--password', required=False, default=None)
parser.add_argument('--parent-folder-id', required=True, default=None)
parser.add_argument('--scheme', required=False, default=None)
parser.add_argument('--host', required=False, default=None)
parser.add_argument('--port', required=False, default=None)
def main():
"""Create the folder hierarchy with metadata in a Girder instance."""
args = parser.parse_args()
g = GirderClient(host=args.host, port=args.port, scheme=args.scheme)
g.authenticate(args.username, args.password)
def create_folder_on_demand(parent_folder_id, folder_name):
existing_folders = list(
g.listFolder(parent_folder_id, name=folder_name))
if not len(existing_folders):
sought_folder = g.createFolder(parent_folder_id, name=folder_name)
else:
sought_folder = existing_folders[0]
return sought_folder
metadata_file = 'metadata.json'
with open(metadata_file) as json_file:
metadata = json.load(json_file)
parent_folder_id = args.parent_folder_id
for subject_id, subject_metadata in metadata.items():
subject_folder = create_folder_on_demand(parent_folder_id, subject_id)
for (scan_time, scan_date, scan_weight) in subject_metadata['scans']:
create_folder_on_demand(subject_folder['_id'], scan_time)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add utility to create test tree<commit_after>#!/usr/bin/env python
"""
A utility to create a Girder hierarchy from metadata.json.
Useful in testing the monkeybrains plugin to set up a hierarchy,
then set_metadata.py can be run to add the metadata.
"""
import argparse
import json
import requests.packages.urllib3
from girder_client import GirderClient
requests.packages.urllib3.disable_warnings()
parser = argparse.ArgumentParser(
prog='create_tree_from_metadata',
description='Create a hierarchy in girder from the metadata.json.')
parser.add_argument('--username', required=False, default=None)
parser.add_argument('--password', required=False, default=None)
parser.add_argument('--parent-folder-id', required=True, default=None)
parser.add_argument('--scheme', required=False, default=None)
parser.add_argument('--host', required=False, default=None)
parser.add_argument('--port', required=False, default=None)
def main():
"""Create the folder hierarchy with metadata in a Girder instance."""
args = parser.parse_args()
g = GirderClient(host=args.host, port=args.port, scheme=args.scheme)
g.authenticate(args.username, args.password)
def create_folder_on_demand(parent_folder_id, folder_name):
existing_folders = list(
g.listFolder(parent_folder_id, name=folder_name))
if not len(existing_folders):
sought_folder = g.createFolder(parent_folder_id, name=folder_name)
else:
sought_folder = existing_folders[0]
return sought_folder
metadata_file = 'metadata.json'
with open(metadata_file) as json_file:
metadata = json.load(json_file)
parent_folder_id = args.parent_folder_id
for subject_id, subject_metadata in metadata.items():
subject_folder = create_folder_on_demand(parent_folder_id, subject_id)
for (scan_time, scan_date, scan_weight) in subject_metadata['scans']:
create_folder_on_demand(subject_folder['_id'], scan_time)
if __name__ == '__main__':
main()
|
|
97df2e612f58bf7e0badb6da8bd564a98ac74355
|
tools/gen_rtorrent_doc.py
|
tools/gen_rtorrent_doc.py
|
import sys
sys.path.insert(0, '..')
import model.rtorrent as rtorrent
import model.torrent as torrent
import model.peer as peer
for d in (rtorrent, torrent, peer):
for x, y in d._rpc_methods.iteritems():
print y[0], y[1]
#print y[0], '\n', y[1]
|
Add rtorrent plaintext doc generator.
|
Tools: Add rtorrent plaintext doc generator.
|
Python
|
agpl-3.0
|
MerlijnWajer/pyroTorrent,MerlijnWajer/pyroTorrent
|
Tools: Add rtorrent plaintext doc generator.
|
import sys
sys.path.insert(0, '..')
import model.rtorrent as rtorrent
import model.torrent as torrent
import model.peer as peer
for d in (rtorrent, torrent, peer):
for x, y in d._rpc_methods.iteritems():
print y[0], y[1]
#print y[0], '\n', y[1]
|
<commit_before><commit_msg>Tools: Add rtorrent plaintext doc generator.<commit_after>
|
import sys
sys.path.insert(0, '..')
import model.rtorrent as rtorrent
import model.torrent as torrent
import model.peer as peer
for d in (rtorrent, torrent, peer):
for x, y in d._rpc_methods.iteritems():
print y[0], y[1]
#print y[0], '\n', y[1]
|
Tools: Add rtorrent plaintext doc generator.import sys
sys.path.insert(0, '..')
import model.rtorrent as rtorrent
import model.torrent as torrent
import model.peer as peer
for d in (rtorrent, torrent, peer):
for x, y in d._rpc_methods.iteritems():
print y[0], y[1]
#print y[0], '\n', y[1]
|
<commit_before><commit_msg>Tools: Add rtorrent plaintext doc generator.<commit_after>import sys
sys.path.insert(0, '..')
import model.rtorrent as rtorrent
import model.torrent as torrent
import model.peer as peer
for d in (rtorrent, torrent, peer):
for x, y in d._rpc_methods.iteritems():
print y[0], y[1]
#print y[0], '\n', y[1]
|
|
a784fd2f12328c9a200c48e72d68ca1ff3709316
|
tests/IsGrammarGeneratingTest.py
|
tests/IsGrammarGeneratingTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import *
class IsGrammarGeneratingTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Add file for test if is grammar generating
|
Add file for test if is grammar generating
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add file for test if is grammar generating
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import *
class IsGrammarGeneratingTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file for test if is grammar generating<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import *
class IsGrammarGeneratingTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Add file for test if is grammar generating#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import *
class IsGrammarGeneratingTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file for test if is grammar generating<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import *
class IsGrammarGeneratingTest(TestCase):
pass
if __name__ == '__main__':
main()
|
|
2341e59f7e93865ebe6816db1c6c79c84a3d09cc
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from distutils.core import setup
setup(
name='regulations-parser',
url='https://github.com/cfpb/regulations-parser',
author='CFPB',
author_email='tech@cfpb.gov',
license='CC0',
version='0.1.0',
description='eCFR Parser for eRegulations',
long_description=open('README.md').read()
if os.path.exists('README.md') else '',
packages=['regparser', ],
include_package_data=True,
install_requires=[
'lxml',
'pyparsing',
'inflection',
'requests',
'GitPython',
'python-constraint',
],
setup_requires=[
'nose>=1.0'
],
test_suite='xtdiff.tests',
)
|
Make the parser an installable Python package
|
Make the parser an installable Python package
|
Python
|
cc0-1.0
|
grapesmoker/regulations-parser
|
Make the parser an installable Python package
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from distutils.core import setup
setup(
name='regulations-parser',
url='https://github.com/cfpb/regulations-parser',
author='CFPB',
author_email='tech@cfpb.gov',
license='CC0',
version='0.1.0',
description='eCFR Parser for eRegulations',
long_description=open('README.md').read()
if os.path.exists('README.md') else '',
packages=['regparser', ],
include_package_data=True,
install_requires=[
'lxml',
'pyparsing',
'inflection',
'requests',
'GitPython',
'python-constraint',
],
setup_requires=[
'nose>=1.0'
],
test_suite='xtdiff.tests',
)
|
<commit_before><commit_msg>Make the parser an installable Python package<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from distutils.core import setup
setup(
name='regulations-parser',
url='https://github.com/cfpb/regulations-parser',
author='CFPB',
author_email='tech@cfpb.gov',
license='CC0',
version='0.1.0',
description='eCFR Parser for eRegulations',
long_description=open('README.md').read()
if os.path.exists('README.md') else '',
packages=['regparser', ],
include_package_data=True,
install_requires=[
'lxml',
'pyparsing',
'inflection',
'requests',
'GitPython',
'python-constraint',
],
setup_requires=[
'nose>=1.0'
],
test_suite='xtdiff.tests',
)
|
Make the parser an installable Python package#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from distutils.core import setup
setup(
name='regulations-parser',
url='https://github.com/cfpb/regulations-parser',
author='CFPB',
author_email='tech@cfpb.gov',
license='CC0',
version='0.1.0',
description='eCFR Parser for eRegulations',
long_description=open('README.md').read()
if os.path.exists('README.md') else '',
packages=['regparser', ],
include_package_data=True,
install_requires=[
'lxml',
'pyparsing',
'inflection',
'requests',
'GitPython',
'python-constraint',
],
setup_requires=[
'nose>=1.0'
],
test_suite='xtdiff.tests',
)
|
<commit_before><commit_msg>Make the parser an installable Python package<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from distutils.core import setup
setup(
name='regulations-parser',
url='https://github.com/cfpb/regulations-parser',
author='CFPB',
author_email='tech@cfpb.gov',
license='CC0',
version='0.1.0',
description='eCFR Parser for eRegulations',
long_description=open('README.md').read()
if os.path.exists('README.md') else '',
packages=['regparser', ],
include_package_data=True,
install_requires=[
'lxml',
'pyparsing',
'inflection',
'requests',
'GitPython',
'python-constraint',
],
setup_requires=[
'nose>=1.0'
],
test_suite='xtdiff.tests',
)
|
|
fd423fa583c251c2c097bf6905b6803945698db6
|
resource/maya/userSetup.py
|
resource/maya/userSetup.py
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import maya.utils as utils
import pymel.core
def setup():
'''Configure Segue and add to a default menu.'''
pymel.core.menu(
label='Segue',
tearOff=True,
parent='MayaWindow'
)
pymel.core.menuItem(
label='Geometry Exporter',
command='''
import segue.frontend.exporter
import segue.backend.host.maya
import segue
processors = segue.discover_processors()
host = segue.backend.host.maya.MayaHost()
exporter = segue.frontend.exporter.ExporterWidget(
host=host, processors=processors
)
exporter.show()
'''.replace(' ', '')
)
# Run setup
utils.executeDeferred(setup)
|
Add default menu that can be enabled in Maya to launch exporter.
|
Add default menu that can be enabled in Maya to launch exporter.
|
Python
|
apache-2.0
|
4degrees/segue
|
Add default menu that can be enabled in Maya to launch exporter.
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import maya.utils as utils
import pymel.core
def setup():
'''Configure Segue and add to a default menu.'''
pymel.core.menu(
label='Segue',
tearOff=True,
parent='MayaWindow'
)
pymel.core.menuItem(
label='Geometry Exporter',
command='''
import segue.frontend.exporter
import segue.backend.host.maya
import segue
processors = segue.discover_processors()
host = segue.backend.host.maya.MayaHost()
exporter = segue.frontend.exporter.ExporterWidget(
host=host, processors=processors
)
exporter.show()
'''.replace(' ', '')
)
# Run setup
utils.executeDeferred(setup)
|
<commit_before><commit_msg>Add default menu that can be enabled in Maya to launch exporter.<commit_after>
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import maya.utils as utils
import pymel.core
def setup():
'''Configure Segue and add to a default menu.'''
pymel.core.menu(
label='Segue',
tearOff=True,
parent='MayaWindow'
)
pymel.core.menuItem(
label='Geometry Exporter',
command='''
import segue.frontend.exporter
import segue.backend.host.maya
import segue
processors = segue.discover_processors()
host = segue.backend.host.maya.MayaHost()
exporter = segue.frontend.exporter.ExporterWidget(
host=host, processors=processors
)
exporter.show()
'''.replace(' ', '')
)
# Run setup
utils.executeDeferred(setup)
|
Add default menu that can be enabled in Maya to launch exporter.# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import maya.utils as utils
import pymel.core
def setup():
'''Configure Segue and add to a default menu.'''
pymel.core.menu(
label='Segue',
tearOff=True,
parent='MayaWindow'
)
pymel.core.menuItem(
label='Geometry Exporter',
command='''
import segue.frontend.exporter
import segue.backend.host.maya
import segue
processors = segue.discover_processors()
host = segue.backend.host.maya.MayaHost()
exporter = segue.frontend.exporter.ExporterWidget(
host=host, processors=processors
)
exporter.show()
'''.replace(' ', '')
)
# Run setup
utils.executeDeferred(setup)
|
<commit_before><commit_msg>Add default menu that can be enabled in Maya to launch exporter.<commit_after># :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import maya.utils as utils
import pymel.core
def setup():
'''Configure Segue and add to a default menu.'''
pymel.core.menu(
label='Segue',
tearOff=True,
parent='MayaWindow'
)
pymel.core.menuItem(
label='Geometry Exporter',
command='''
import segue.frontend.exporter
import segue.backend.host.maya
import segue
processors = segue.discover_processors()
host = segue.backend.host.maya.MayaHost()
exporter = segue.frontend.exporter.ExporterWidget(
host=host, processors=processors
)
exporter.show()
'''.replace(' ', '')
)
# Run setup
utils.executeDeferred(setup)
|
|
2af82e36c9a60613b91ca88b9768eabd4378af11
|
tensorflow-samples/conv2d-numpy.py
|
tensorflow-samples/conv2d-numpy.py
|
from __future__ import print_function
import numpy as np
# Tensorflow is used to verify the results of numpy computations - you can
# remove its usage if you don't need the testing.
import tensorflow as tf
def conv2d_single_channel(input, w):
boundary_width = w.shape[0] // 2
padded_input = np.pad(input,
pad_width=boundary_width,
mode='constant',
constant_values=0)
output = np.zeros_like(input)
for i in range(output.shape[0]):
for j in range(output.shape[1]):
for fi in range(w.shape[0]):
for fj in range(w.shape[1]):
output[i, j] += padded_input[i + fi, j + fj] * w[fi, fj]
return output
def tf_conv2d_single_channel(input, w):
# We only have one item in our "batch", one input channel and one output
# channel; prepare the shapes TF expects for the conv2d op.
input_4d = tf.reshape(tf.constant(input, dtype=tf.float32),
[1, input.shape[0], input.shape[1], 1])
kernel_4d = tf.reshape(tf.constant(w, dtype=tf.float32),
[w.shape[0], w.shape[1], 1, 1])
output = tf.nn.conv2d(input_4d, kernel_4d,
strides=[1, 1, 1, 1], padding='SAME')
with tf.Session() as sess:
return sess.run(output)
if __name__ == '__main__':
inp = np.ones((6, 6))
w = np.zeros((3, 3))
w[0, 0] = 1
w[1, 1] = 1
w[2, 2] = 1
out = conv2d_single_channel(inp, w)
print(out)
#outtf = tf_conf2d_single_channel(inp, w)
#print(outtf[0, :, :, 0])
|
Add numpy implementation of simple 2d conv, with TF verification
|
Add numpy implementation of simple 2d conv, with TF verification
|
Python
|
unlicense
|
eliben/deep-learning-samples,eliben/deep-learning-samples
|
Add numpy implementation of simple 2d conv, with TF verification
|
from __future__ import print_function
import numpy as np
# Tensorflow is used to verify the results of numpy computations - you can
# remove its usage if you don't need the testing.
import tensorflow as tf
def conv2d_single_channel(input, w):
boundary_width = w.shape[0] // 2
padded_input = np.pad(input,
pad_width=boundary_width,
mode='constant',
constant_values=0)
output = np.zeros_like(input)
for i in range(output.shape[0]):
for j in range(output.shape[1]):
for fi in range(w.shape[0]):
for fj in range(w.shape[1]):
output[i, j] += padded_input[i + fi, j + fj] * w[fi, fj]
return output
def tf_conv2d_single_channel(input, w):
# We only have one item in our "batch", one input channel and one output
# channel; prepare the shapes TF expects for the conv2d op.
input_4d = tf.reshape(tf.constant(input, dtype=tf.float32),
[1, input.shape[0], input.shape[1], 1])
kernel_4d = tf.reshape(tf.constant(w, dtype=tf.float32),
[w.shape[0], w.shape[1], 1, 1])
output = tf.nn.conv2d(input_4d, kernel_4d,
strides=[1, 1, 1, 1], padding='SAME')
with tf.Session() as sess:
return sess.run(output)
if __name__ == '__main__':
inp = np.ones((6, 6))
w = np.zeros((3, 3))
w[0, 0] = 1
w[1, 1] = 1
w[2, 2] = 1
out = conv2d_single_channel(inp, w)
print(out)
#outtf = tf_conf2d_single_channel(inp, w)
#print(outtf[0, :, :, 0])
|
<commit_before><commit_msg>Add numpy implementation of simple 2d conv, with TF verification<commit_after>
|
from __future__ import print_function
import numpy as np
# Tensorflow is used to verify the results of numpy computations - you can
# remove its usage if you don't need the testing.
import tensorflow as tf
def conv2d_single_channel(input, w):
boundary_width = w.shape[0] // 2
padded_input = np.pad(input,
pad_width=boundary_width,
mode='constant',
constant_values=0)
output = np.zeros_like(input)
for i in range(output.shape[0]):
for j in range(output.shape[1]):
for fi in range(w.shape[0]):
for fj in range(w.shape[1]):
output[i, j] += padded_input[i + fi, j + fj] * w[fi, fj]
return output
def tf_conv2d_single_channel(input, w):
# We only have one item in our "batch", one input channel and one output
# channel; prepare the shapes TF expects for the conv2d op.
input_4d = tf.reshape(tf.constant(input, dtype=tf.float32),
[1, input.shape[0], input.shape[1], 1])
kernel_4d = tf.reshape(tf.constant(w, dtype=tf.float32),
[w.shape[0], w.shape[1], 1, 1])
output = tf.nn.conv2d(input_4d, kernel_4d,
strides=[1, 1, 1, 1], padding='SAME')
with tf.Session() as sess:
return sess.run(output)
if __name__ == '__main__':
inp = np.ones((6, 6))
w = np.zeros((3, 3))
w[0, 0] = 1
w[1, 1] = 1
w[2, 2] = 1
out = conv2d_single_channel(inp, w)
print(out)
#outtf = tf_conf2d_single_channel(inp, w)
#print(outtf[0, :, :, 0])
|
Add numpy implementation of simple 2d conv, with TF verificationfrom __future__ import print_function
import numpy as np
# Tensorflow is used to verify the results of numpy computations - you can
# remove its usage if you don't need the testing.
import tensorflow as tf
def conv2d_single_channel(input, w):
boundary_width = w.shape[0] // 2
padded_input = np.pad(input,
pad_width=boundary_width,
mode='constant',
constant_values=0)
output = np.zeros_like(input)
for i in range(output.shape[0]):
for j in range(output.shape[1]):
for fi in range(w.shape[0]):
for fj in range(w.shape[1]):
output[i, j] += padded_input[i + fi, j + fj] * w[fi, fj]
return output
def tf_conv2d_single_channel(input, w):
# We only have one item in our "batch", one input channel and one output
# channel; prepare the shapes TF expects for the conv2d op.
input_4d = tf.reshape(tf.constant(input, dtype=tf.float32),
[1, input.shape[0], input.shape[1], 1])
kernel_4d = tf.reshape(tf.constant(w, dtype=tf.float32),
[w.shape[0], w.shape[1], 1, 1])
output = tf.nn.conv2d(input_4d, kernel_4d,
strides=[1, 1, 1, 1], padding='SAME')
with tf.Session() as sess:
return sess.run(output)
if __name__ == '__main__':
inp = np.ones((6, 6))
w = np.zeros((3, 3))
w[0, 0] = 1
w[1, 1] = 1
w[2, 2] = 1
out = conv2d_single_channel(inp, w)
print(out)
#outtf = tf_conf2d_single_channel(inp, w)
#print(outtf[0, :, :, 0])
|
<commit_before><commit_msg>Add numpy implementation of simple 2d conv, with TF verification<commit_after>from __future__ import print_function
import numpy as np
# Tensorflow is used to verify the results of numpy computations - you can
# remove its usage if you don't need the testing.
import tensorflow as tf
def conv2d_single_channel(input, w):
boundary_width = w.shape[0] // 2
padded_input = np.pad(input,
pad_width=boundary_width,
mode='constant',
constant_values=0)
output = np.zeros_like(input)
for i in range(output.shape[0]):
for j in range(output.shape[1]):
for fi in range(w.shape[0]):
for fj in range(w.shape[1]):
output[i, j] += padded_input[i + fi, j + fj] * w[fi, fj]
return output
def tf_conv2d_single_channel(input, w):
# We only have one item in our "batch", one input channel and one output
# channel; prepare the shapes TF expects for the conv2d op.
input_4d = tf.reshape(tf.constant(input, dtype=tf.float32),
[1, input.shape[0], input.shape[1], 1])
kernel_4d = tf.reshape(tf.constant(w, dtype=tf.float32),
[w.shape[0], w.shape[1], 1, 1])
output = tf.nn.conv2d(input_4d, kernel_4d,
strides=[1, 1, 1, 1], padding='SAME')
with tf.Session() as sess:
return sess.run(output)
if __name__ == '__main__':
inp = np.ones((6, 6))
w = np.zeros((3, 3))
w[0, 0] = 1
w[1, 1] = 1
w[2, 2] = 1
out = conv2d_single_channel(inp, w)
print(out)
#outtf = tf_conf2d_single_channel(inp, w)
#print(outtf[0, :, :, 0])
|
|
7d08d7cc2a34e5e9a330c574a645cf94ab3d184b
|
python/read_yaml_safely.py
|
python/read_yaml_safely.py
|
"""
Read a YAML file or stream safely.
"""
from pathlib import Path
def read_yaml_safely(filelike):
""" Read a YAML file or stream with no RCE backdoors open (using the ‘safe’ loader).
``filelike`` can be a path or an open handle (supporting ``read()``).
Supports a fallback chain of parsers as follows:
PyYAML (C), PyYAML (Python), poyo, strictyaml.
"""
# https://github.com/anthonywritescode/wat-02-pyyaml/blob/master/slides.md
try:
from yaml.cyaml import CSafeLoader as SafeYamlLoader
except ImportError:
try:
from yaml import SafeLoader as SafeYamlLoader
except ImportError:
SafeYamlLoader = None
if SafeYamlLoader:
from yaml import load as yaml_load
yaml_parser = lambda stream: yaml_load(stream, Loader=SafeYamlLoader)
else:
try:
import poyo # poyo is optional! # pylint: disable=import-error
except ImportError:
try:
import strictyaml # strictyaml is optional! # pylint: disable=import-error
except ImportError:
raise RuntimeError("Please 'pip install' one of PyYAML, poyo, or strictyaml.")
else:
yaml_parser = strictyaml.load
else:
yaml_parser = poyo.parse_string
try:
yaml_text = filelike.read()
except AttributeError:
with Path(filelike).open('rb') as handle:
yaml_text = handle.read()
if not isinstance(yaml_text, str):
try:
yaml_text = yaml_text.decode('utf-8')
except UnicodeError:
yaml_text = yaml_text.decode('iso-8859-1')
try:
return yaml_parser(yaml_text)
except Exception as cause:
raise RuntimeError(str(cause)) from cause
if __name__ == '__main__':
from io import StringIO
from pprint import pprint
pprint(read_yaml_safely(StringIO('list: [1,2,3]')))
|
Read a YAML file or stream safely
|
Python3: Read a YAML file or stream safely
|
Python
|
unlicense
|
jhermann/waif,jhermann/waif
|
Python3: Read a YAML file or stream safely
|
"""
Read a YAML file or stream safely.
"""
from pathlib import Path
def read_yaml_safely(filelike):
""" Read a YAML file or stream with no RCE backdoors open (using the ‘safe’ loader).
``filelike`` can be a path or an open handle (supporting ``read()``).
Supports a fallback chain of parsers as follows:
PyYAML (C), PyYAML (Python), poyo, strictyaml.
"""
# https://github.com/anthonywritescode/wat-02-pyyaml/blob/master/slides.md
try:
from yaml.cyaml import CSafeLoader as SafeYamlLoader
except ImportError:
try:
from yaml import SafeLoader as SafeYamlLoader
except ImportError:
SafeYamlLoader = None
if SafeYamlLoader:
from yaml import load as yaml_load
yaml_parser = lambda stream: yaml_load(stream, Loader=SafeYamlLoader)
else:
try:
import poyo # poyo is optional! # pylint: disable=import-error
except ImportError:
try:
import strictyaml # strictyaml is optional! # pylint: disable=import-error
except ImportError:
raise RuntimeError("Please 'pip install' one of PyYAML, poyo, or strictyaml.")
else:
yaml_parser = strictyaml.load
else:
yaml_parser = poyo.parse_string
try:
yaml_text = filelike.read()
except AttributeError:
with Path(filelike).open('rb') as handle:
yaml_text = handle.read()
if not isinstance(yaml_text, str):
try:
yaml_text = yaml_text.decode('utf-8')
except UnicodeError:
yaml_text = yaml_text.decode('iso-8859-1')
try:
return yaml_parser(yaml_text)
except Exception as cause:
raise RuntimeError(str(cause)) from cause
if __name__ == '__main__':
from io import StringIO
from pprint import pprint
pprint(read_yaml_safely(StringIO('list: [1,2,3]')))
|
<commit_before><commit_msg>Python3: Read a YAML file or stream safely<commit_after>
|
"""
Read a YAML file or stream safely.
"""
from pathlib import Path
def read_yaml_safely(filelike):
""" Read a YAML file or stream with no RCE backdoors open (using the ‘safe’ loader).
``filelike`` can be a path or an open handle (supporting ``read()``).
Supports a fallback chain of parsers as follows:
PyYAML (C), PyYAML (Python), poyo, strictyaml.
"""
# https://github.com/anthonywritescode/wat-02-pyyaml/blob/master/slides.md
try:
from yaml.cyaml import CSafeLoader as SafeYamlLoader
except ImportError:
try:
from yaml import SafeLoader as SafeYamlLoader
except ImportError:
SafeYamlLoader = None
if SafeYamlLoader:
from yaml import load as yaml_load
yaml_parser = lambda stream: yaml_load(stream, Loader=SafeYamlLoader)
else:
try:
import poyo # poyo is optional! # pylint: disable=import-error
except ImportError:
try:
import strictyaml # strictyaml is optional! # pylint: disable=import-error
except ImportError:
raise RuntimeError("Please 'pip install' one of PyYAML, poyo, or strictyaml.")
else:
yaml_parser = strictyaml.load
else:
yaml_parser = poyo.parse_string
try:
yaml_text = filelike.read()
except AttributeError:
with Path(filelike).open('rb') as handle:
yaml_text = handle.read()
if not isinstance(yaml_text, str):
try:
yaml_text = yaml_text.decode('utf-8')
except UnicodeError:
yaml_text = yaml_text.decode('iso-8859-1')
try:
return yaml_parser(yaml_text)
except Exception as cause:
raise RuntimeError(str(cause)) from cause
if __name__ == '__main__':
from io import StringIO
from pprint import pprint
pprint(read_yaml_safely(StringIO('list: [1,2,3]')))
|
Python3: Read a YAML file or stream safely"""
Read a YAML file or stream safely.
"""
from pathlib import Path
def read_yaml_safely(filelike):
""" Read a YAML file or stream with no RCE backdoors open (using the ‘safe’ loader).
``filelike`` can be a path or an open handle (supporting ``read()``).
Supports a fallback chain of parsers as follows:
PyYAML (C), PyYAML (Python), poyo, strictyaml.
"""
# https://github.com/anthonywritescode/wat-02-pyyaml/blob/master/slides.md
try:
from yaml.cyaml import CSafeLoader as SafeYamlLoader
except ImportError:
try:
from yaml import SafeLoader as SafeYamlLoader
except ImportError:
SafeYamlLoader = None
if SafeYamlLoader:
from yaml import load as yaml_load
yaml_parser = lambda stream: yaml_load(stream, Loader=SafeYamlLoader)
else:
try:
import poyo # poyo is optional! # pylint: disable=import-error
except ImportError:
try:
import strictyaml # strictyaml is optional! # pylint: disable=import-error
except ImportError:
raise RuntimeError("Please 'pip install' one of PyYAML, poyo, or strictyaml.")
else:
yaml_parser = strictyaml.load
else:
yaml_parser = poyo.parse_string
try:
yaml_text = filelike.read()
except AttributeError:
with Path(filelike).open('rb') as handle:
yaml_text = handle.read()
if not isinstance(yaml_text, str):
try:
yaml_text = yaml_text.decode('utf-8')
except UnicodeError:
yaml_text = yaml_text.decode('iso-8859-1')
try:
return yaml_parser(yaml_text)
except Exception as cause:
raise RuntimeError(str(cause)) from cause
if __name__ == '__main__':
from io import StringIO
from pprint import pprint
pprint(read_yaml_safely(StringIO('list: [1,2,3]')))
|
<commit_before><commit_msg>Python3: Read a YAML file or stream safely<commit_after>"""
Read a YAML file or stream safely.
"""
from pathlib import Path
def read_yaml_safely(filelike):
""" Read a YAML file or stream with no RCE backdoors open (using the ‘safe’ loader).
``filelike`` can be a path or an open handle (supporting ``read()``).
Supports a fallback chain of parsers as follows:
PyYAML (C), PyYAML (Python), poyo, strictyaml.
"""
# https://github.com/anthonywritescode/wat-02-pyyaml/blob/master/slides.md
try:
from yaml.cyaml import CSafeLoader as SafeYamlLoader
except ImportError:
try:
from yaml import SafeLoader as SafeYamlLoader
except ImportError:
SafeYamlLoader = None
if SafeYamlLoader:
from yaml import load as yaml_load
yaml_parser = lambda stream: yaml_load(stream, Loader=SafeYamlLoader)
else:
try:
import poyo # poyo is optional! # pylint: disable=import-error
except ImportError:
try:
import strictyaml # strictyaml is optional! # pylint: disable=import-error
except ImportError:
raise RuntimeError("Please 'pip install' one of PyYAML, poyo, or strictyaml.")
else:
yaml_parser = strictyaml.load
else:
yaml_parser = poyo.parse_string
try:
yaml_text = filelike.read()
except AttributeError:
with Path(filelike).open('rb') as handle:
yaml_text = handle.read()
if not isinstance(yaml_text, str):
try:
yaml_text = yaml_text.decode('utf-8')
except UnicodeError:
yaml_text = yaml_text.decode('iso-8859-1')
try:
return yaml_parser(yaml_text)
except Exception as cause:
raise RuntimeError(str(cause)) from cause
if __name__ == '__main__':
from io import StringIO
from pprint import pprint
pprint(read_yaml_safely(StringIO('list: [1,2,3]')))
|
|
81adf67267e77202bc5890840631181ffb52fd3d
|
tests/font/BULLET.py
|
tests/font/BULLET.py
|
#!/usr/bin/env python
'''Test that font.Text horizontal alignment works.
Three labels will be rendered aligned left, center and right.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import unittest
from pyglet import font
import base_text
class TEST_HALIGN(base_text.TextTestBase):
font_name = ''
font_size = 60
text = u'\u2022'*5
if __name__ == '__main__':
unittest.main()
|
Test case for incorrect rendering of bullet noticed in a wydget test case.
|
Test case for incorrect rendering of bullet noticed in a wydget test case.
|
Python
|
bsd-3-clause
|
niklaskorz/pyglet,seeminglee/pyglet64,niklaskorz/pyglet,seeminglee/pyglet64,adamlwgriffiths/Pyglet,seeminglee/pyglet64,niklaskorz/pyglet,adamlwgriffiths/Pyglet,adamlwgriffiths/Pyglet,adamlwgriffiths/Pyglet,niklaskorz/pyglet
|
Test case for incorrect rendering of bullet noticed in a wydget test case.
|
#!/usr/bin/env python
'''Test that font.Text horizontal alignment works.
Three labels will be rendered aligned left, center and right.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import unittest
from pyglet import font
import base_text
class TEST_HALIGN(base_text.TextTestBase):
font_name = ''
font_size = 60
text = u'\u2022'*5
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test case for incorrect rendering of bullet noticed in a wydget test case.<commit_after>
|
#!/usr/bin/env python
'''Test that font.Text horizontal alignment works.
Three labels will be rendered aligned left, center and right.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import unittest
from pyglet import font
import base_text
class TEST_HALIGN(base_text.TextTestBase):
font_name = ''
font_size = 60
text = u'\u2022'*5
if __name__ == '__main__':
unittest.main()
|
Test case for incorrect rendering of bullet noticed in a wydget test case.#!/usr/bin/env python
'''Test that font.Text horizontal alignment works.
Three labels will be rendered aligned left, center and right.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import unittest
from pyglet import font
import base_text
class TEST_HALIGN(base_text.TextTestBase):
font_name = ''
font_size = 60
text = u'\u2022'*5
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test case for incorrect rendering of bullet noticed in a wydget test case.<commit_after>#!/usr/bin/env python
'''Test that font.Text horizontal alignment works.
Three labels will be rendered aligned left, center and right.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import unittest
from pyglet import font
import base_text
class TEST_HALIGN(base_text.TextTestBase):
font_name = ''
font_size = 60
text = u'\u2022'*5
if __name__ == '__main__':
unittest.main()
|
|
b7ede20d4e82b5aba701dd02c49ca8a5fe00e0ed
|
dimagi/utils/prime_views.py
|
dimagi/utils/prime_views.py
|
def prime_views(pool_size):
"""
Prime the views so that a very large import doesn't cause the index
to get too far behind
"""
# These have to be included here or ./manage.py runserver explodes on
# all pages of the app with single thread related errors
from gevent.pool import Pool
from dimagi.utils.management.commands import prime_views
prime_pool = Pool(pool_size)
prime_all = prime_views.Command()
prime_all.prime_everything(prime_pool, verbose=True)
prime_pool.join()
|
Move prime views method in
|
Move prime views method in
|
Python
|
bsd-3-clause
|
qedsoftware/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq
|
Move prime views method in
|
def prime_views(pool_size):
"""
Prime the views so that a very large import doesn't cause the index
to get too far behind
"""
# These have to be included here or ./manage.py runserver explodes on
# all pages of the app with single thread related errors
from gevent.pool import Pool
from dimagi.utils.management.commands import prime_views
prime_pool = Pool(pool_size)
prime_all = prime_views.Command()
prime_all.prime_everything(prime_pool, verbose=True)
prime_pool.join()
|
<commit_before><commit_msg>Move prime views method in<commit_after>
|
def prime_views(pool_size):
"""
Prime the views so that a very large import doesn't cause the index
to get too far behind
"""
# These have to be included here or ./manage.py runserver explodes on
# all pages of the app with single thread related errors
from gevent.pool import Pool
from dimagi.utils.management.commands import prime_views
prime_pool = Pool(pool_size)
prime_all = prime_views.Command()
prime_all.prime_everything(prime_pool, verbose=True)
prime_pool.join()
|
Move prime views method indef prime_views(pool_size):
"""
Prime the views so that a very large import doesn't cause the index
to get too far behind
"""
# These have to be included here or ./manage.py runserver explodes on
# all pages of the app with single thread related errors
from gevent.pool import Pool
from dimagi.utils.management.commands import prime_views
prime_pool = Pool(pool_size)
prime_all = prime_views.Command()
prime_all.prime_everything(prime_pool, verbose=True)
prime_pool.join()
|
<commit_before><commit_msg>Move prime views method in<commit_after>def prime_views(pool_size):
"""
Prime the views so that a very large import doesn't cause the index
to get too far behind
"""
# These have to be included here or ./manage.py runserver explodes on
# all pages of the app with single thread related errors
from gevent.pool import Pool
from dimagi.utils.management.commands import prime_views
prime_pool = Pool(pool_size)
prime_all = prime_views.Command()
prime_all.prime_everything(prime_pool, verbose=True)
prime_pool.join()
|
|
ae189397e3ecbdaeec80d8f44b47e7a749f9d2ac
|
sft/agent/ah/RunningAvg.py
|
sft/agent/ah/RunningAvg.py
|
import theano
from sft import Size
from sft.Actions import Actions
from sft.agent.ah.ActionHistory import ActionHistory
import numpy as np
class RunningAvg(ActionHistory):
def __init__(self, logger, n, factor):
self.logger = logger
self.n = n
self.factor = factor
def get_size(self):
return Size(self.n, self.ACTION_WIDTH)
def get_history(self, all_actions):
actions = np.zeros([self.n, self.ACTION_WIDTH], dtype=theano.config.floatX)
# take last n actions, this will be smaller or empty if there are not enough actions
for a in all_actions:
actions[0, :] = Actions.get_one_hot(a)
for i in range(1, self.n):
actions[i] = self.normalize(actions[i] + self.factor * actions[i-1])
self.logger.log_parameter("actions", str(actions))
return actions
def normalize(self, vec):
return vec / float(np.max(vec))
|
Implement running average action history
|
Implement running average action history
|
Python
|
mit
|
kevinkepp/search-for-this
|
Implement running average action history
|
import theano
from sft import Size
from sft.Actions import Actions
from sft.agent.ah.ActionHistory import ActionHistory
import numpy as np
class RunningAvg(ActionHistory):
def __init__(self, logger, n, factor):
self.logger = logger
self.n = n
self.factor = factor
def get_size(self):
return Size(self.n, self.ACTION_WIDTH)
def get_history(self, all_actions):
actions = np.zeros([self.n, self.ACTION_WIDTH], dtype=theano.config.floatX)
# take last n actions, this will be smaller or empty if there are not enough actions
for a in all_actions:
actions[0, :] = Actions.get_one_hot(a)
for i in range(1, self.n):
actions[i] = self.normalize(actions[i] + self.factor * actions[i-1])
self.logger.log_parameter("actions", str(actions))
return actions
def normalize(self, vec):
return vec / float(np.max(vec))
|
<commit_before><commit_msg>Implement running average action history<commit_after>
|
import theano
from sft import Size
from sft.Actions import Actions
from sft.agent.ah.ActionHistory import ActionHistory
import numpy as np
class RunningAvg(ActionHistory):
def __init__(self, logger, n, factor):
self.logger = logger
self.n = n
self.factor = factor
def get_size(self):
return Size(self.n, self.ACTION_WIDTH)
def get_history(self, all_actions):
actions = np.zeros([self.n, self.ACTION_WIDTH], dtype=theano.config.floatX)
# take last n actions, this will be smaller or empty if there are not enough actions
for a in all_actions:
actions[0, :] = Actions.get_one_hot(a)
for i in range(1, self.n):
actions[i] = self.normalize(actions[i] + self.factor * actions[i-1])
self.logger.log_parameter("actions", str(actions))
return actions
def normalize(self, vec):
return vec / float(np.max(vec))
|
Implement running average action historyimport theano
from sft import Size
from sft.Actions import Actions
from sft.agent.ah.ActionHistory import ActionHistory
import numpy as np
class RunningAvg(ActionHistory):
def __init__(self, logger, n, factor):
self.logger = logger
self.n = n
self.factor = factor
def get_size(self):
return Size(self.n, self.ACTION_WIDTH)
def get_history(self, all_actions):
actions = np.zeros([self.n, self.ACTION_WIDTH], dtype=theano.config.floatX)
# take last n actions, this will be smaller or empty if there are not enough actions
for a in all_actions:
actions[0, :] = Actions.get_one_hot(a)
for i in range(1, self.n):
actions[i] = self.normalize(actions[i] + self.factor * actions[i-1])
self.logger.log_parameter("actions", str(actions))
return actions
def normalize(self, vec):
return vec / float(np.max(vec))
|
<commit_before><commit_msg>Implement running average action history<commit_after>import theano
from sft import Size
from sft.Actions import Actions
from sft.agent.ah.ActionHistory import ActionHistory
import numpy as np
class RunningAvg(ActionHistory):
def __init__(self, logger, n, factor):
self.logger = logger
self.n = n
self.factor = factor
def get_size(self):
return Size(self.n, self.ACTION_WIDTH)
def get_history(self, all_actions):
actions = np.zeros([self.n, self.ACTION_WIDTH], dtype=theano.config.floatX)
# take last n actions, this will be smaller or empty if there are not enough actions
for a in all_actions:
actions[0, :] = Actions.get_one_hot(a)
for i in range(1, self.n):
actions[i] = self.normalize(actions[i] + self.factor * actions[i-1])
self.logger.log_parameter("actions", str(actions))
return actions
def normalize(self, vec):
return vec / float(np.max(vec))
|
|
29b1c862f603f3399ebe1e3acb7e14e776e2102f
|
performance_testing/result.py
|
performance_testing/result.py
|
import os
from datetime import datetime
class Result:
def __init__(self, directory):
date = datetime.fromtimestamp(time())
name = '%d-%d-%d_%d-%d-%d' % (datetime.year,
datetime.month,
datetime.day,
datetime.hour,
datetime.minute,
datetime.second)
self.file = File(directory, name)
class File:
def __init__(self, directory, name):
if not os.path.exists(directory):
os.makedirs(directory)
self.path = os.path.join(directory, name)
if not os.path.exists(self.path):
open(self.path, 'a').close()
def write_line(self, text):
stream = open(self.path, 'w')
stream.write('%s\n' % text)
stream.close()
|
Create Result class with file
|
Create Result class with file
|
Python
|
mit
|
BakeCode/performance-testing,BakeCode/performance-testing
|
Create Result class with file
|
import os
from datetime import datetime
class Result:
def __init__(self, directory):
date = datetime.fromtimestamp(time())
name = '%d-%d-%d_%d-%d-%d' % (datetime.year,
datetime.month,
datetime.day,
datetime.hour,
datetime.minute,
datetime.second)
self.file = File(directory, name)
class File:
def __init__(self, directory, name):
if not os.path.exists(directory):
os.makedirs(directory)
self.path = os.path.join(directory, name)
if not os.path.exists(self.path):
open(self.path, 'a').close()
def write_line(self, text):
stream = open(self.path, 'w')
stream.write('%s\n' % text)
stream.close()
|
<commit_before><commit_msg>Create Result class with file<commit_after>
|
import os
from datetime import datetime
class Result:
def __init__(self, directory):
date = datetime.fromtimestamp(time())
name = '%d-%d-%d_%d-%d-%d' % (datetime.year,
datetime.month,
datetime.day,
datetime.hour,
datetime.minute,
datetime.second)
self.file = File(directory, name)
class File:
def __init__(self, directory, name):
if not os.path.exists(directory):
os.makedirs(directory)
self.path = os.path.join(directory, name)
if not os.path.exists(self.path):
open(self.path, 'a').close()
def write_line(self, text):
stream = open(self.path, 'w')
stream.write('%s\n' % text)
stream.close()
|
Create Result class with fileimport os
from datetime import datetime
class Result:
def __init__(self, directory):
date = datetime.fromtimestamp(time())
name = '%d-%d-%d_%d-%d-%d' % (datetime.year,
datetime.month,
datetime.day,
datetime.hour,
datetime.minute,
datetime.second)
self.file = File(directory, name)
class File:
def __init__(self, directory, name):
if not os.path.exists(directory):
os.makedirs(directory)
self.path = os.path.join(directory, name)
if not os.path.exists(self.path):
open(self.path, 'a').close()
def write_line(self, text):
stream = open(self.path, 'w')
stream.write('%s\n' % text)
stream.close()
|
<commit_before><commit_msg>Create Result class with file<commit_after>import os
from datetime import datetime
class Result:
def __init__(self, directory):
date = datetime.fromtimestamp(time())
name = '%d-%d-%d_%d-%d-%d' % (datetime.year,
datetime.month,
datetime.day,
datetime.hour,
datetime.minute,
datetime.second)
self.file = File(directory, name)
class File:
def __init__(self, directory, name):
if not os.path.exists(directory):
os.makedirs(directory)
self.path = os.path.join(directory, name)
if not os.path.exists(self.path):
open(self.path, 'a').close()
def write_line(self, text):
stream = open(self.path, 'w')
stream.write('%s\n' % text)
stream.close()
|
|
5aa3d52b6c60f668f4d96031778b3dd2662435cf
|
AndroidGatewayPlugin/Testdriver/ManyConnectors.py
|
AndroidGatewayPlugin/Testdriver/ManyConnectors.py
|
from ammo import AndroidConnector
from ammo import AmmoMessages_pb2
import uuid
import sys
import time
import datetime
import math
import optparse
from twisted.internet import reactor
latencies = []
def onDataReceived(connector, msg):
receivedTime = time.time()
if msg.type == AmmoMessages_pb2.MessageWrapper.DATA_MESSAGE:
splitMessage = msg.data_message.data.split("/")
sequenceNumber = int(splitMessage[0])
sentTime = float(splitMessage[1])
timeDifference = receivedTime - sentTime
print "{0},{1:.9f}".format(sequenceNumber, timeDifference)
latencies.append(timeDifference)
if __name__ == "__main__":
print "Android Gateway Tester"
parser = optparse.OptionParser()
parser.add_option("-g", "--gateway", dest="gateway",
help="Gateway to connect to (default %default)",
default="127.0.0.1")
parser.add_option("-p", "--port", dest="port", type="int",
help="Gateway port to connect to (default %default)",
default=33289)
parser.add_option("-s", "--scope", dest="scope",
help="Subscription scope (either local or global; default %default)",
default="global")
(options, args) = parser.parse_args()
scope = AndroidConnector.MessageScope.GLOBAL
if options.scope == "local":
scope = AndroidConnector.MessageScope.LOCAL
elif options.scope == "global":
scope = AndroidConnector.MessageScope.GLOBAL
else:
print "scope must be one of: local global"
exit(-1)
connectors = []
while True:
print "Count:", len(connectors) + 1
deviceName = "device:test/" + uuid.uuid1().hex
userName = "user:test/" + uuid.uuid1().hex
connector = AndroidConnector.AndroidConnector(options.gateway, options.port, deviceName, userName, "")
connector.setMessageQueueEnabled(False)
connector.registerMessageCallback(onDataReceived)
try:
connector.start()
connector.waitForAuthentication()
time.sleep(0.05)
connectors.append(connector)
except KeyboardInterrupt:
print "Got ^C... Closing"
reactor.callFromThread(reactor.stop)
except:
print "Unexpected error... dying."
reactor.callFromThread(reactor.stop)
raise
|
Add testdriver to simulate large numbers of simultaneous connections
|
Add testdriver to simulate large numbers of simultaneous connections
|
Python
|
mit
|
isis-ammo/ammo-gateway,isis-ammo/ammo-gateway,isis-ammo/ammo-gateway,isis-ammo/ammo-gateway,isis-ammo/ammo-gateway,isis-ammo/ammo-gateway
|
Add testdriver to simulate large numbers of simultaneous connections
|
from ammo import AndroidConnector
from ammo import AmmoMessages_pb2
import uuid
import sys
import time
import datetime
import math
import optparse
from twisted.internet import reactor
latencies = []
def onDataReceived(connector, msg):
receivedTime = time.time()
if msg.type == AmmoMessages_pb2.MessageWrapper.DATA_MESSAGE:
splitMessage = msg.data_message.data.split("/")
sequenceNumber = int(splitMessage[0])
sentTime = float(splitMessage[1])
timeDifference = receivedTime - sentTime
print "{0},{1:.9f}".format(sequenceNumber, timeDifference)
latencies.append(timeDifference)
if __name__ == "__main__":
print "Android Gateway Tester"
parser = optparse.OptionParser()
parser.add_option("-g", "--gateway", dest="gateway",
help="Gateway to connect to (default %default)",
default="127.0.0.1")
parser.add_option("-p", "--port", dest="port", type="int",
help="Gateway port to connect to (default %default)",
default=33289)
parser.add_option("-s", "--scope", dest="scope",
help="Subscription scope (either local or global; default %default)",
default="global")
(options, args) = parser.parse_args()
scope = AndroidConnector.MessageScope.GLOBAL
if options.scope == "local":
scope = AndroidConnector.MessageScope.LOCAL
elif options.scope == "global":
scope = AndroidConnector.MessageScope.GLOBAL
else:
print "scope must be one of: local global"
exit(-1)
connectors = []
while True:
print "Count:", len(connectors) + 1
deviceName = "device:test/" + uuid.uuid1().hex
userName = "user:test/" + uuid.uuid1().hex
connector = AndroidConnector.AndroidConnector(options.gateway, options.port, deviceName, userName, "")
connector.setMessageQueueEnabled(False)
connector.registerMessageCallback(onDataReceived)
try:
connector.start()
connector.waitForAuthentication()
time.sleep(0.05)
connectors.append(connector)
except KeyboardInterrupt:
print "Got ^C... Closing"
reactor.callFromThread(reactor.stop)
except:
print "Unexpected error... dying."
reactor.callFromThread(reactor.stop)
raise
|
<commit_before><commit_msg>Add testdriver to simulate large numbers of simultaneous connections<commit_after>
|
from ammo import AndroidConnector
from ammo import AmmoMessages_pb2
import uuid
import sys
import time
import datetime
import math
import optparse
from twisted.internet import reactor
latencies = []
def onDataReceived(connector, msg):
receivedTime = time.time()
if msg.type == AmmoMessages_pb2.MessageWrapper.DATA_MESSAGE:
splitMessage = msg.data_message.data.split("/")
sequenceNumber = int(splitMessage[0])
sentTime = float(splitMessage[1])
timeDifference = receivedTime - sentTime
print "{0},{1:.9f}".format(sequenceNumber, timeDifference)
latencies.append(timeDifference)
if __name__ == "__main__":
print "Android Gateway Tester"
parser = optparse.OptionParser()
parser.add_option("-g", "--gateway", dest="gateway",
help="Gateway to connect to (default %default)",
default="127.0.0.1")
parser.add_option("-p", "--port", dest="port", type="int",
help="Gateway port to connect to (default %default)",
default=33289)
parser.add_option("-s", "--scope", dest="scope",
help="Subscription scope (either local or global; default %default)",
default="global")
(options, args) = parser.parse_args()
scope = AndroidConnector.MessageScope.GLOBAL
if options.scope == "local":
scope = AndroidConnector.MessageScope.LOCAL
elif options.scope == "global":
scope = AndroidConnector.MessageScope.GLOBAL
else:
print "scope must be one of: local global"
exit(-1)
connectors = []
while True:
print "Count:", len(connectors) + 1
deviceName = "device:test/" + uuid.uuid1().hex
userName = "user:test/" + uuid.uuid1().hex
connector = AndroidConnector.AndroidConnector(options.gateway, options.port, deviceName, userName, "")
connector.setMessageQueueEnabled(False)
connector.registerMessageCallback(onDataReceived)
try:
connector.start()
connector.waitForAuthentication()
time.sleep(0.05)
connectors.append(connector)
except KeyboardInterrupt:
print "Got ^C... Closing"
reactor.callFromThread(reactor.stop)
except:
print "Unexpected error... dying."
reactor.callFromThread(reactor.stop)
raise
|
Add testdriver to simulate large numbers of simultaneous connectionsfrom ammo import AndroidConnector
from ammo import AmmoMessages_pb2
import uuid
import sys
import time
import datetime
import math
import optparse
from twisted.internet import reactor
latencies = []
def onDataReceived(connector, msg):
receivedTime = time.time()
if msg.type == AmmoMessages_pb2.MessageWrapper.DATA_MESSAGE:
splitMessage = msg.data_message.data.split("/")
sequenceNumber = int(splitMessage[0])
sentTime = float(splitMessage[1])
timeDifference = receivedTime - sentTime
print "{0},{1:.9f}".format(sequenceNumber, timeDifference)
latencies.append(timeDifference)
if __name__ == "__main__":
print "Android Gateway Tester"
parser = optparse.OptionParser()
parser.add_option("-g", "--gateway", dest="gateway",
help="Gateway to connect to (default %default)",
default="127.0.0.1")
parser.add_option("-p", "--port", dest="port", type="int",
help="Gateway port to connect to (default %default)",
default=33289)
parser.add_option("-s", "--scope", dest="scope",
help="Subscription scope (either local or global; default %default)",
default="global")
(options, args) = parser.parse_args()
scope = AndroidConnector.MessageScope.GLOBAL
if options.scope == "local":
scope = AndroidConnector.MessageScope.LOCAL
elif options.scope == "global":
scope = AndroidConnector.MessageScope.GLOBAL
else:
print "scope must be one of: local global"
exit(-1)
connectors = []
while True:
print "Count:", len(connectors) + 1
deviceName = "device:test/" + uuid.uuid1().hex
userName = "user:test/" + uuid.uuid1().hex
connector = AndroidConnector.AndroidConnector(options.gateway, options.port, deviceName, userName, "")
connector.setMessageQueueEnabled(False)
connector.registerMessageCallback(onDataReceived)
try:
connector.start()
connector.waitForAuthentication()
time.sleep(0.05)
connectors.append(connector)
except KeyboardInterrupt:
print "Got ^C... Closing"
reactor.callFromThread(reactor.stop)
except:
print "Unexpected error... dying."
reactor.callFromThread(reactor.stop)
raise
|
<commit_before><commit_msg>Add testdriver to simulate large numbers of simultaneous connections<commit_after>from ammo import AndroidConnector
from ammo import AmmoMessages_pb2
import uuid
import sys
import time
import datetime
import math
import optparse
from twisted.internet import reactor
latencies = []
def onDataReceived(connector, msg):
receivedTime = time.time()
if msg.type == AmmoMessages_pb2.MessageWrapper.DATA_MESSAGE:
splitMessage = msg.data_message.data.split("/")
sequenceNumber = int(splitMessage[0])
sentTime = float(splitMessage[1])
timeDifference = receivedTime - sentTime
print "{0},{1:.9f}".format(sequenceNumber, timeDifference)
latencies.append(timeDifference)
if __name__ == "__main__":
print "Android Gateway Tester"
parser = optparse.OptionParser()
parser.add_option("-g", "--gateway", dest="gateway",
help="Gateway to connect to (default %default)",
default="127.0.0.1")
parser.add_option("-p", "--port", dest="port", type="int",
help="Gateway port to connect to (default %default)",
default=33289)
parser.add_option("-s", "--scope", dest="scope",
help="Subscription scope (either local or global; default %default)",
default="global")
(options, args) = parser.parse_args()
scope = AndroidConnector.MessageScope.GLOBAL
if options.scope == "local":
scope = AndroidConnector.MessageScope.LOCAL
elif options.scope == "global":
scope = AndroidConnector.MessageScope.GLOBAL
else:
print "scope must be one of: local global"
exit(-1)
connectors = []
while True:
print "Count:", len(connectors) + 1
deviceName = "device:test/" + uuid.uuid1().hex
userName = "user:test/" + uuid.uuid1().hex
connector = AndroidConnector.AndroidConnector(options.gateway, options.port, deviceName, userName, "")
connector.setMessageQueueEnabled(False)
connector.registerMessageCallback(onDataReceived)
try:
connector.start()
connector.waitForAuthentication()
time.sleep(0.05)
connectors.append(connector)
except KeyboardInterrupt:
print "Got ^C... Closing"
reactor.callFromThread(reactor.stop)
except:
print "Unexpected error... dying."
reactor.callFromThread(reactor.stop)
raise
|
|
a3fda3f65f810e18c099050aac65e6f277d5e41e
|
osf/migrations/0074_parse_citation_styles.py
|
osf/migrations/0074_parse_citation_styles.py
|
# This migration port `scripts/parse_citation_styles` to automatically parse citation styles.
# Additionally, this set the corresponding `has_bibliography` field to `False` for all citation formats whose CSL files do not
# include a bibliography section. As a result, all such citation formats would not show up in OSF
# citation widgets for users to choose.
#
# NOTE:
# As of December 6th, 2017, there are however THREE EXCEPTIONS:
# "Bluebook Law Review", "Bluebook Law Review(2)" and "Bluebook Inline" shares a
# special CSL file ('website/static/bluebook.cls'), in which a bibliography section is defined,
# in order to render bibliographies even though their official CSL files (located in CenterForOpenScience/styles repo)
# do not contain a bibliography section. Therefore, This migration also automatically set `has_bibliography` to `True` for all styles whose titles contain "Bluebook"
import logging
import os
from django.db import migrations
from lxml import etree
from osf.models.citation import CitationStyle
from website import settings
logger = logging.getLogger(__file__)
def get_style_files(path):
files = (os.path.join(path, x) for x in os.listdir(path))
return (f for f in files if os.path.isfile(f))
def parse_citation_styles(*args):
# drop all styles
CitationStyle.remove()
for style_file in get_style_files(settings.CITATION_STYLES_PATH):
with open(style_file, 'r') as f:
try:
root = etree.parse(f).getroot()
except etree.XMLSyntaxError:
continue
namespace = root.nsmap.get(None)
selector = '{{{ns}}}info/{{{ns}}}'.format(ns=namespace)
title = root.find(selector + 'title').text
# `has_bibliography` is set to `True` for Bluebook citation formats due to the special way we handle them.
has_bibliography = root.find('{{{ns}}}{tag}'.format(ns=namespace, tag='bibliography')) is not None or 'Bluebook' in title
# Required
fields = {
'_id': os.path.splitext(os.path.basename(style_file))[0],
'title': title,
'has_bibliography': has_bibliography,
}
# Optional
try:
fields['short_title'] = root.find(selector + "title-short").text
except AttributeError:
pass
try:
fields['summary'] = root.find(selector + 'summary').text
except AttributeError:
pass
style = CitationStyle(**fields)
style.save()
def revert(*args):
# The revert of this migration simply removes all CitationStyle instances.
CitationStyle.remove()
class Migration(migrations.Migration):
dependencies = [
('osf', '0073_citationstyle_has_bibliography'),
]
operations = [
migrations.RunPython(parse_citation_styles, revert),
]
|
Revert "[REVERT][OSF-3622] Add migration file to automatically parse citations""
|
Revert "[REVERT][OSF-3622] Add migration file to automatically parse citations""
|
Python
|
apache-2.0
|
caseyrollins/osf.io,icereval/osf.io,Johnetordoff/osf.io,sloria/osf.io,brianjgeiger/osf.io,HalcyonChimera/osf.io,TomBaxter/osf.io,cslzchen/osf.io,binoculars/osf.io,saradbowman/osf.io,felliott/osf.io,brianjgeiger/osf.io,icereval/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,leb2dg/osf.io,crcresearch/osf.io,chennan47/osf.io,adlius/osf.io,leb2dg/osf.io,caseyrollins/osf.io,adlius/osf.io,mfraezz/osf.io,mfraezz/osf.io,crcresearch/osf.io,cslzchen/osf.io,pattisdr/osf.io,aaxelb/osf.io,aaxelb/osf.io,leb2dg/osf.io,mfraezz/osf.io,icereval/osf.io,adlius/osf.io,Johnetordoff/osf.io,cslzchen/osf.io,chennan47/osf.io,laurenrevere/osf.io,laurenrevere/osf.io,aaxelb/osf.io,baylee-d/osf.io,leb2dg/osf.io,pattisdr/osf.io,erinspace/osf.io,erinspace/osf.io,baylee-d/osf.io,laurenrevere/osf.io,chennan47/osf.io,HalcyonChimera/osf.io,pattisdr/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,felliott/osf.io,felliott/osf.io,binoculars/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,adlius/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,crcresearch/osf.io,brianjgeiger/osf.io,sloria/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,caseyrollins/osf.io,sloria/osf.io,mattclark/osf.io,TomBaxter/osf.io,TomBaxter/osf.io,HalcyonChimera/osf.io,felliott/osf.io,baylee-d/osf.io,saradbowman/osf.io,erinspace/osf.io,mattclark/osf.io,binoculars/osf.io,mattclark/osf.io
|
Revert "[REVERT][OSF-3622] Add migration file to automatically parse citations""
|
# This migration port `scripts/parse_citation_styles` to automatically parse citation styles.
# Additionally, this set the corresponding `has_bibliography` field to `False` for all citation formats whose CSL files do not
# include a bibliography section. As a result, all such citation formats would not show up in OSF
# citation widgets for users to choose.
#
# NOTE:
# As of December 6th, 2017, there are however THREE EXCEPTIONS:
# "Bluebook Law Review", "Bluebook Law Review(2)" and "Bluebook Inline" shares a
# special CSL file ('website/static/bluebook.cls'), in which a bibliography section is defined,
# in order to render bibliographies even though their official CSL files (located in CenterForOpenScience/styles repo)
# do not contain a bibliography section. Therefore, This migration also automatically set `has_bibliography` to `True` for all styles whose titles contain "Bluebook"
import logging
import os
from django.db import migrations
from lxml import etree
from osf.models.citation import CitationStyle
from website import settings
logger = logging.getLogger(__file__)
def get_style_files(path):
files = (os.path.join(path, x) for x in os.listdir(path))
return (f for f in files if os.path.isfile(f))
def parse_citation_styles(*args):
# drop all styles
CitationStyle.remove()
for style_file in get_style_files(settings.CITATION_STYLES_PATH):
with open(style_file, 'r') as f:
try:
root = etree.parse(f).getroot()
except etree.XMLSyntaxError:
continue
namespace = root.nsmap.get(None)
selector = '{{{ns}}}info/{{{ns}}}'.format(ns=namespace)
title = root.find(selector + 'title').text
# `has_bibliography` is set to `True` for Bluebook citation formats due to the special way we handle them.
has_bibliography = root.find('{{{ns}}}{tag}'.format(ns=namespace, tag='bibliography')) is not None or 'Bluebook' in title
# Required
fields = {
'_id': os.path.splitext(os.path.basename(style_file))[0],
'title': title,
'has_bibliography': has_bibliography,
}
# Optional
try:
fields['short_title'] = root.find(selector + "title-short").text
except AttributeError:
pass
try:
fields['summary'] = root.find(selector + 'summary').text
except AttributeError:
pass
style = CitationStyle(**fields)
style.save()
def revert(*args):
# The revert of this migration simply removes all CitationStyle instances.
CitationStyle.remove()
class Migration(migrations.Migration):
dependencies = [
('osf', '0073_citationstyle_has_bibliography'),
]
operations = [
migrations.RunPython(parse_citation_styles, revert),
]
|
<commit_before><commit_msg>Revert "[REVERT][OSF-3622] Add migration file to automatically parse citations""<commit_after>
|
# This migration port `scripts/parse_citation_styles` to automatically parse citation styles.
# Additionally, this set the corresponding `has_bibliography` field to `False` for all citation formats whose CSL files do not
# include a bibliography section. As a result, all such citation formats would not show up in OSF
# citation widgets for users to choose.
#
# NOTE:
# As of December 6th, 2017, there are however THREE EXCEPTIONS:
# "Bluebook Law Review", "Bluebook Law Review(2)" and "Bluebook Inline" shares a
# special CSL file ('website/static/bluebook.cls'), in which a bibliography section is defined,
# in order to render bibliographies even though their official CSL files (located in CenterForOpenScience/styles repo)
# do not contain a bibliography section. Therefore, This migration also automatically set `has_bibliography` to `True` for all styles whose titles contain "Bluebook"
import logging
import os
from django.db import migrations
from lxml import etree
from osf.models.citation import CitationStyle
from website import settings
logger = logging.getLogger(__file__)
def get_style_files(path):
files = (os.path.join(path, x) for x in os.listdir(path))
return (f for f in files if os.path.isfile(f))
def parse_citation_styles(*args):
# drop all styles
CitationStyle.remove()
for style_file in get_style_files(settings.CITATION_STYLES_PATH):
with open(style_file, 'r') as f:
try:
root = etree.parse(f).getroot()
except etree.XMLSyntaxError:
continue
namespace = root.nsmap.get(None)
selector = '{{{ns}}}info/{{{ns}}}'.format(ns=namespace)
title = root.find(selector + 'title').text
# `has_bibliography` is set to `True` for Bluebook citation formats due to the special way we handle them.
has_bibliography = root.find('{{{ns}}}{tag}'.format(ns=namespace, tag='bibliography')) is not None or 'Bluebook' in title
# Required
fields = {
'_id': os.path.splitext(os.path.basename(style_file))[0],
'title': title,
'has_bibliography': has_bibliography,
}
# Optional
try:
fields['short_title'] = root.find(selector + "title-short").text
except AttributeError:
pass
try:
fields['summary'] = root.find(selector + 'summary').text
except AttributeError:
pass
style = CitationStyle(**fields)
style.save()
def revert(*args):
# The revert of this migration simply removes all CitationStyle instances.
CitationStyle.remove()
class Migration(migrations.Migration):
dependencies = [
('osf', '0073_citationstyle_has_bibliography'),
]
operations = [
migrations.RunPython(parse_citation_styles, revert),
]
|
Revert "[REVERT][OSF-3622] Add migration file to automatically parse citations""# This migration port `scripts/parse_citation_styles` to automatically parse citation styles.
# Additionally, this set the corresponding `has_bibliography` field to `False` for all citation formats whose CSL files do not
# include a bibliography section. As a result, all such citation formats would not show up in OSF
# citation widgets for users to choose.
#
# NOTE:
# As of December 6th, 2017, there are however THREE EXCEPTIONS:
# "Bluebook Law Review", "Bluebook Law Review(2)" and "Bluebook Inline" shares a
# special CSL file ('website/static/bluebook.cls'), in which a bibliography section is defined,
# in order to render bibliographies even though their official CSL files (located in CenterForOpenScience/styles repo)
# do not contain a bibliography section. Therefore, This migration also automatically set `has_bibliography` to `True` for all styles whose titles contain "Bluebook"
import logging
import os
from django.db import migrations
from lxml import etree
from osf.models.citation import CitationStyle
from website import settings
logger = logging.getLogger(__file__)
def get_style_files(path):
files = (os.path.join(path, x) for x in os.listdir(path))
return (f for f in files if os.path.isfile(f))
def parse_citation_styles(*args):
# drop all styles
CitationStyle.remove()
for style_file in get_style_files(settings.CITATION_STYLES_PATH):
with open(style_file, 'r') as f:
try:
root = etree.parse(f).getroot()
except etree.XMLSyntaxError:
continue
namespace = root.nsmap.get(None)
selector = '{{{ns}}}info/{{{ns}}}'.format(ns=namespace)
title = root.find(selector + 'title').text
# `has_bibliography` is set to `True` for Bluebook citation formats due to the special way we handle them.
has_bibliography = root.find('{{{ns}}}{tag}'.format(ns=namespace, tag='bibliography')) is not None or 'Bluebook' in title
# Required
fields = {
'_id': os.path.splitext(os.path.basename(style_file))[0],
'title': title,
'has_bibliography': has_bibliography,
}
# Optional
try:
fields['short_title'] = root.find(selector + "title-short").text
except AttributeError:
pass
try:
fields['summary'] = root.find(selector + 'summary').text
except AttributeError:
pass
style = CitationStyle(**fields)
style.save()
def revert(*args):
# The revert of this migration simply removes all CitationStyle instances.
CitationStyle.remove()
class Migration(migrations.Migration):
dependencies = [
('osf', '0073_citationstyle_has_bibliography'),
]
operations = [
migrations.RunPython(parse_citation_styles, revert),
]
|
<commit_before><commit_msg>Revert "[REVERT][OSF-3622] Add migration file to automatically parse citations""<commit_after># This migration port `scripts/parse_citation_styles` to automatically parse citation styles.
# Additionally, this set the corresponding `has_bibliography` field to `False` for all citation formats whose CSL files do not
# include a bibliography section. As a result, all such citation formats would not show up in OSF
# citation widgets for users to choose.
#
# NOTE:
# As of December 6th, 2017, there are however THREE EXCEPTIONS:
# "Bluebook Law Review", "Bluebook Law Review(2)" and "Bluebook Inline" shares a
# special CSL file ('website/static/bluebook.cls'), in which a bibliography section is defined,
# in order to render bibliographies even though their official CSL files (located in CenterForOpenScience/styles repo)
# do not contain a bibliography section. Therefore, This migration also automatically set `has_bibliography` to `True` for all styles whose titles contain "Bluebook"
import logging
import os
from django.db import migrations
from lxml import etree
from osf.models.citation import CitationStyle
from website import settings
logger = logging.getLogger(__file__)
def get_style_files(path):
files = (os.path.join(path, x) for x in os.listdir(path))
return (f for f in files if os.path.isfile(f))
def parse_citation_styles(*args):
# drop all styles
CitationStyle.remove()
for style_file in get_style_files(settings.CITATION_STYLES_PATH):
with open(style_file, 'r') as f:
try:
root = etree.parse(f).getroot()
except etree.XMLSyntaxError:
continue
namespace = root.nsmap.get(None)
selector = '{{{ns}}}info/{{{ns}}}'.format(ns=namespace)
title = root.find(selector + 'title').text
# `has_bibliography` is set to `True` for Bluebook citation formats due to the special way we handle them.
has_bibliography = root.find('{{{ns}}}{tag}'.format(ns=namespace, tag='bibliography')) is not None or 'Bluebook' in title
# Required
fields = {
'_id': os.path.splitext(os.path.basename(style_file))[0],
'title': title,
'has_bibliography': has_bibliography,
}
# Optional
try:
fields['short_title'] = root.find(selector + "title-short").text
except AttributeError:
pass
try:
fields['summary'] = root.find(selector + 'summary').text
except AttributeError:
pass
style = CitationStyle(**fields)
style.save()
def revert(*args):
# The revert of this migration simply removes all CitationStyle instances.
CitationStyle.remove()
class Migration(migrations.Migration):
dependencies = [
('osf', '0073_citationstyle_has_bibliography'),
]
operations = [
migrations.RunPython(parse_citation_styles, revert),
]
|
|
23e75c71fd819f4d5aef4eff59b3c9fcf65b6339
|
CodeFights/arrayReplace.py
|
CodeFights/arrayReplace.py
|
#!/usr/local/bin/python
# Code Fights Add Border Problem
def arrayReplace(inputArray, elemToReplace, substitutionElem):
pass
def main():
pass
if __name__ == '__main__':
main()
|
Set up Code Fights array replace problem
|
Set up Code Fights array replace problem
|
Python
|
mit
|
HKuz/Test_Code
|
Set up Code Fights array replace problem
|
#!/usr/local/bin/python
# Code Fights Add Border Problem
def arrayReplace(inputArray, elemToReplace, substitutionElem):
pass
def main():
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Set up Code Fights array replace problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Add Border Problem
def arrayReplace(inputArray, elemToReplace, substitutionElem):
pass
def main():
pass
if __name__ == '__main__':
main()
|
Set up Code Fights array replace problem#!/usr/local/bin/python
# Code Fights Add Border Problem
def arrayReplace(inputArray, elemToReplace, substitutionElem):
pass
def main():
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Set up Code Fights array replace problem<commit_after>#!/usr/local/bin/python
# Code Fights Add Border Problem
def arrayReplace(inputArray, elemToReplace, substitutionElem):
pass
def main():
pass
if __name__ == '__main__':
main()
|
|
0a0096823f391e232f22ce80ace1e2ae751cbc4e
|
tests/services/ticketing/test_revocation.py
|
tests/services/ticketing/test_revocation.py
|
"""
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from byceps.services.seating import category_service
from byceps.services.ticketing import ticket_service
from tests.base import AbstractAppTestCase
class TicketRevocationTestCase(AbstractAppTestCase):
def setUp(self):
super().setUp()
self.create_brand_and_party()
self.category_id = self.create_category('Premium').id
self.owner_id = self.create_user('Ticket_Owner').id
def test_revoke_ticket(self):
ticket_before = ticket_service.create_ticket(self.category_id, self.owner_id)
self.assertNotRevoked(ticket_before)
ticket_id = ticket_before.id
ticket_service.revoke_ticket(ticket_id)
ticket_after = ticket_service.find_ticket(ticket_id)
self.assertRevoked(ticket_after)
def test_revoke_tickets(self):
tickets_before = ticket_service.create_tickets(self.category_id, self.owner_id, 3)
for ticket in tickets_before:
self.assertNotRevoked(ticket)
ticket_ids = {ticket.id for ticket in tickets_before}
ticket_service.revoke_tickets(ticket_ids)
tickets_after = ticket_service.find_tickets(ticket_ids)
for ticket in tickets_after:
self.assertRevoked(ticket)
# -------------------------------------------------------------------- #
# helpers
def create_category(self, title):
return category_service.create_category(self.party.id, title)
def assertNotRevoked(self, ticket):
self.assertFalse(ticket.revoked)
def assertRevoked(self, ticket):
self.assertTrue(ticket.revoked)
|
Add ticket revocation service tests
|
Add ticket revocation service tests
Forgot to actually add them earlier.
|
Python
|
bsd-3-clause
|
m-ober/byceps,m-ober/byceps,homeworkprod/byceps,m-ober/byceps,homeworkprod/byceps,homeworkprod/byceps
|
Add ticket revocation service tests
Forgot to actually add them earlier.
|
"""
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from byceps.services.seating import category_service
from byceps.services.ticketing import ticket_service
from tests.base import AbstractAppTestCase
class TicketRevocationTestCase(AbstractAppTestCase):
def setUp(self):
super().setUp()
self.create_brand_and_party()
self.category_id = self.create_category('Premium').id
self.owner_id = self.create_user('Ticket_Owner').id
def test_revoke_ticket(self):
ticket_before = ticket_service.create_ticket(self.category_id, self.owner_id)
self.assertNotRevoked(ticket_before)
ticket_id = ticket_before.id
ticket_service.revoke_ticket(ticket_id)
ticket_after = ticket_service.find_ticket(ticket_id)
self.assertRevoked(ticket_after)
def test_revoke_tickets(self):
tickets_before = ticket_service.create_tickets(self.category_id, self.owner_id, 3)
for ticket in tickets_before:
self.assertNotRevoked(ticket)
ticket_ids = {ticket.id for ticket in tickets_before}
ticket_service.revoke_tickets(ticket_ids)
tickets_after = ticket_service.find_tickets(ticket_ids)
for ticket in tickets_after:
self.assertRevoked(ticket)
# -------------------------------------------------------------------- #
# helpers
def create_category(self, title):
return category_service.create_category(self.party.id, title)
def assertNotRevoked(self, ticket):
self.assertFalse(ticket.revoked)
def assertRevoked(self, ticket):
self.assertTrue(ticket.revoked)
|
<commit_before><commit_msg>Add ticket revocation service tests
Forgot to actually add them earlier.<commit_after>
|
"""
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from byceps.services.seating import category_service
from byceps.services.ticketing import ticket_service
from tests.base import AbstractAppTestCase
class TicketRevocationTestCase(AbstractAppTestCase):
def setUp(self):
super().setUp()
self.create_brand_and_party()
self.category_id = self.create_category('Premium').id
self.owner_id = self.create_user('Ticket_Owner').id
def test_revoke_ticket(self):
ticket_before = ticket_service.create_ticket(self.category_id, self.owner_id)
self.assertNotRevoked(ticket_before)
ticket_id = ticket_before.id
ticket_service.revoke_ticket(ticket_id)
ticket_after = ticket_service.find_ticket(ticket_id)
self.assertRevoked(ticket_after)
def test_revoke_tickets(self):
tickets_before = ticket_service.create_tickets(self.category_id, self.owner_id, 3)
for ticket in tickets_before:
self.assertNotRevoked(ticket)
ticket_ids = {ticket.id for ticket in tickets_before}
ticket_service.revoke_tickets(ticket_ids)
tickets_after = ticket_service.find_tickets(ticket_ids)
for ticket in tickets_after:
self.assertRevoked(ticket)
# -------------------------------------------------------------------- #
# helpers
def create_category(self, title):
return category_service.create_category(self.party.id, title)
def assertNotRevoked(self, ticket):
self.assertFalse(ticket.revoked)
def assertRevoked(self, ticket):
self.assertTrue(ticket.revoked)
|
Add ticket revocation service tests
Forgot to actually add them earlier."""
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from byceps.services.seating import category_service
from byceps.services.ticketing import ticket_service
from tests.base import AbstractAppTestCase
class TicketRevocationTestCase(AbstractAppTestCase):
def setUp(self):
super().setUp()
self.create_brand_and_party()
self.category_id = self.create_category('Premium').id
self.owner_id = self.create_user('Ticket_Owner').id
def test_revoke_ticket(self):
ticket_before = ticket_service.create_ticket(self.category_id, self.owner_id)
self.assertNotRevoked(ticket_before)
ticket_id = ticket_before.id
ticket_service.revoke_ticket(ticket_id)
ticket_after = ticket_service.find_ticket(ticket_id)
self.assertRevoked(ticket_after)
def test_revoke_tickets(self):
tickets_before = ticket_service.create_tickets(self.category_id, self.owner_id, 3)
for ticket in tickets_before:
self.assertNotRevoked(ticket)
ticket_ids = {ticket.id for ticket in tickets_before}
ticket_service.revoke_tickets(ticket_ids)
tickets_after = ticket_service.find_tickets(ticket_ids)
for ticket in tickets_after:
self.assertRevoked(ticket)
# -------------------------------------------------------------------- #
# helpers
def create_category(self, title):
return category_service.create_category(self.party.id, title)
def assertNotRevoked(self, ticket):
self.assertFalse(ticket.revoked)
def assertRevoked(self, ticket):
self.assertTrue(ticket.revoked)
|
<commit_before><commit_msg>Add ticket revocation service tests
Forgot to actually add them earlier.<commit_after>"""
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from byceps.services.seating import category_service
from byceps.services.ticketing import ticket_service
from tests.base import AbstractAppTestCase
class TicketRevocationTestCase(AbstractAppTestCase):
def setUp(self):
super().setUp()
self.create_brand_and_party()
self.category_id = self.create_category('Premium').id
self.owner_id = self.create_user('Ticket_Owner').id
def test_revoke_ticket(self):
ticket_before = ticket_service.create_ticket(self.category_id, self.owner_id)
self.assertNotRevoked(ticket_before)
ticket_id = ticket_before.id
ticket_service.revoke_ticket(ticket_id)
ticket_after = ticket_service.find_ticket(ticket_id)
self.assertRevoked(ticket_after)
def test_revoke_tickets(self):
tickets_before = ticket_service.create_tickets(self.category_id, self.owner_id, 3)
for ticket in tickets_before:
self.assertNotRevoked(ticket)
ticket_ids = {ticket.id for ticket in tickets_before}
ticket_service.revoke_tickets(ticket_ids)
tickets_after = ticket_service.find_tickets(ticket_ids)
for ticket in tickets_after:
self.assertRevoked(ticket)
# -------------------------------------------------------------------- #
# helpers
def create_category(self, title):
return category_service.create_category(self.party.id, title)
def assertNotRevoked(self, ticket):
self.assertFalse(ticket.revoked)
def assertRevoked(self, ticket):
self.assertTrue(ticket.revoked)
|
|
579718f1546df1e539cd2f7fbaf1617f06412eca
|
tools/find_deprecated_escaped_characters.py
|
tools/find_deprecated_escaped_characters.py
|
#! /usr/bin/env python
"""
Look for escape sequences deprecated in Python 3.6.
Python 3.6 deprecates a number of non-escape sequences starting with `\` that
were accepted before. For instance, '\(' was previously accepted but must now
be written as '\\(' or r'\('.
"""
from __future__ import division, absolute_import, print_function
import sys
def main(root):
"""Find deprecated escape sequences.
Checks for deprecated escape sequences in ``*.py files``. If `root` is a
file, that file is checked, if `root` is a directory all ``*.py`` files
found in a recursive descent are checked.
If a deprecated escape sequence is found, the file and line where found is
printed. Note that for multiline strings the line where the string ends is
printed and the error(s) are somewhere in the body of the string.
Parameters
----------
root : str
File or directory to check.
Returns
-------
None
"""
count = 0
if sys.version_info[:2] >= (3, 6):
import ast
import tokenize
import warnings
from pathlib import Path
base = Path(root)
paths = base.rglob("*.py") if base.is_dir() else [base]
for path in paths:
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as f:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
tree = ast.parse(f.read())
if w:
print("file: ", str(path))
for e in w:
print('line: ', e.lineno, ': ', e.message)
print()
count += len(w)
else:
raise RuntimeError("Python version must be >= 3.6")
print("Errors Found", count)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(description="Find deprecated escaped characters")
parser.add_argument('root', help='directory or file to be checked')
args = parser.parse_args()
main(args.root)
|
Add tool to check for deprecated escaped characters.
|
ENH: Add tool to check for deprecated escaped characters.
Python 3.6 deprecates a number of escaped characters that were accepted
before. For instance, '\(' was previously accepted but must now be
written as '\\(' or r'\('.
[ci skip]
|
Python
|
bsd-3-clause
|
madphysicist/numpy,simongibbons/numpy,Eric89GXL/numpy,pizzathief/numpy,MSeifert04/numpy,seberg/numpy,numpy/numpy,jakirkham/numpy,pdebuyl/numpy,seberg/numpy,jakirkham/numpy,mhvk/numpy,ssanderson/numpy,madphysicist/numpy,grlee77/numpy,ssanderson/numpy,WarrenWeckesser/numpy,pbrod/numpy,WarrenWeckesser/numpy,MSeifert04/numpy,shoyer/numpy,tynn/numpy,Eric89GXL/numpy,ahaldane/numpy,numpy/numpy,rgommers/numpy,mhvk/numpy,MSeifert04/numpy,bringingheavendown/numpy,mattip/numpy,charris/numpy,WarrenWeckesser/numpy,WarrenWeckesser/numpy,ahaldane/numpy,MSeifert04/numpy,rgommers/numpy,chatcannon/numpy,pdebuyl/numpy,abalkin/numpy,endolith/numpy,bertrand-l/numpy,mhvk/numpy,charris/numpy,ahaldane/numpy,solarjoe/numpy,bringingheavendown/numpy,grlee77/numpy,Eric89GXL/numpy,behzadnouri/numpy,b-carter/numpy,pbrod/numpy,mhvk/numpy,ahaldane/numpy,mattip/numpy,gfyoung/numpy,jakirkham/numpy,bertrand-l/numpy,pbrod/numpy,anntzer/numpy,pbrod/numpy,jakirkham/numpy,Eric89GXL/numpy,gfyoung/numpy,simongibbons/numpy,pizzathief/numpy,shoyer/numpy,gfyoung/numpy,WarrenWeckesser/numpy,numpy/numpy,seberg/numpy,tynn/numpy,endolith/numpy,jorisvandenbossche/numpy,jorisvandenbossche/numpy,jorisvandenbossche/numpy,simongibbons/numpy,pizzathief/numpy,solarjoe/numpy,ssanderson/numpy,madphysicist/numpy,bringingheavendown/numpy,anntzer/numpy,pdebuyl/numpy,chatcannon/numpy,charris/numpy,numpy/numpy,mattip/numpy,mattip/numpy,ahaldane/numpy,MSeifert04/numpy,pbrod/numpy,rgommers/numpy,madphysicist/numpy,tynn/numpy,chatcannon/numpy,b-carter/numpy,rgommers/numpy,grlee77/numpy,jakirkham/numpy,anntzer/numpy,pizzathief/numpy,seberg/numpy,b-carter/numpy,shoyer/numpy,behzadnouri/numpy,shoyer/numpy,madphysicist/numpy,abalkin/numpy,shoyer/numpy,charris/numpy,grlee77/numpy,endolith/numpy,anntzer/numpy,mhvk/numpy,solarjoe/numpy,abalkin/numpy,pdebuyl/numpy,endolith/numpy,jorisvandenbossche/numpy,grlee77/numpy,pizzathief/numpy,simongibbons/numpy,simongibbons/numpy,bertrand-l/numpy,behzadnouri/numpy,jorisvandenbossche/numpy
|
ENH: Add tool to check for deprecated escaped characters.
Python 3.6 deprecates a number of escaped characters that were accepted
before. For instance, '\(' was previously accepted but must now be
written as '\\(' or r'\('.
[ci skip]
|
#! /usr/bin/env python
"""
Look for escape sequences deprecated in Python 3.6.
Python 3.6 deprecates a number of non-escape sequences starting with `\` that
were accepted before. For instance, '\(' was previously accepted but must now
be written as '\\(' or r'\('.
"""
from __future__ import division, absolute_import, print_function
import sys
def main(root):
"""Find deprecated escape sequences.
Checks for deprecated escape sequences in ``*.py files``. If `root` is a
file, that file is checked, if `root` is a directory all ``*.py`` files
found in a recursive descent are checked.
If a deprecated escape sequence is found, the file and line where found is
printed. Note that for multiline strings the line where the string ends is
printed and the error(s) are somewhere in the body of the string.
Parameters
----------
root : str
File or directory to check.
Returns
-------
None
"""
count = 0
if sys.version_info[:2] >= (3, 6):
import ast
import tokenize
import warnings
from pathlib import Path
base = Path(root)
paths = base.rglob("*.py") if base.is_dir() else [base]
for path in paths:
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as f:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
tree = ast.parse(f.read())
if w:
print("file: ", str(path))
for e in w:
print('line: ', e.lineno, ': ', e.message)
print()
count += len(w)
else:
raise RuntimeError("Python version must be >= 3.6")
print("Errors Found", count)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(description="Find deprecated escaped characters")
parser.add_argument('root', help='directory or file to be checked')
args = parser.parse_args()
main(args.root)
|
<commit_before><commit_msg>ENH: Add tool to check for deprecated escaped characters.
Python 3.6 deprecates a number of escaped characters that were accepted
before. For instance, '\(' was previously accepted but must now be
written as '\\(' or r'\('.
[ci skip]<commit_after>
|
#! /usr/bin/env python
"""
Look for escape sequences deprecated in Python 3.6.
Python 3.6 deprecates a number of non-escape sequences starting with `\` that
were accepted before. For instance, '\(' was previously accepted but must now
be written as '\\(' or r'\('.
"""
from __future__ import division, absolute_import, print_function
import sys
def main(root):
"""Find deprecated escape sequences.
Checks for deprecated escape sequences in ``*.py files``. If `root` is a
file, that file is checked, if `root` is a directory all ``*.py`` files
found in a recursive descent are checked.
If a deprecated escape sequence is found, the file and line where found is
printed. Note that for multiline strings the line where the string ends is
printed and the error(s) are somewhere in the body of the string.
Parameters
----------
root : str
File or directory to check.
Returns
-------
None
"""
count = 0
if sys.version_info[:2] >= (3, 6):
import ast
import tokenize
import warnings
from pathlib import Path
base = Path(root)
paths = base.rglob("*.py") if base.is_dir() else [base]
for path in paths:
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as f:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
tree = ast.parse(f.read())
if w:
print("file: ", str(path))
for e in w:
print('line: ', e.lineno, ': ', e.message)
print()
count += len(w)
else:
raise RuntimeError("Python version must be >= 3.6")
print("Errors Found", count)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(description="Find deprecated escaped characters")
parser.add_argument('root', help='directory or file to be checked')
args = parser.parse_args()
main(args.root)
|
ENH: Add tool to check for deprecated escaped characters.
Python 3.6 deprecates a number of escaped characters that were accepted
before. For instance, '\(' was previously accepted but must now be
written as '\\(' or r'\('.
[ci skip]#! /usr/bin/env python
"""
Look for escape sequences deprecated in Python 3.6.
Python 3.6 deprecates a number of non-escape sequences starting with `\` that
were accepted before. For instance, '\(' was previously accepted but must now
be written as '\\(' or r'\('.
"""
from __future__ import division, absolute_import, print_function
import sys
def main(root):
"""Find deprecated escape sequences.
Checks for deprecated escape sequences in ``*.py files``. If `root` is a
file, that file is checked, if `root` is a directory all ``*.py`` files
found in a recursive descent are checked.
If a deprecated escape sequence is found, the file and line where found is
printed. Note that for multiline strings the line where the string ends is
printed and the error(s) are somewhere in the body of the string.
Parameters
----------
root : str
File or directory to check.
Returns
-------
None
"""
count = 0
if sys.version_info[:2] >= (3, 6):
import ast
import tokenize
import warnings
from pathlib import Path
base = Path(root)
paths = base.rglob("*.py") if base.is_dir() else [base]
for path in paths:
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as f:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
tree = ast.parse(f.read())
if w:
print("file: ", str(path))
for e in w:
print('line: ', e.lineno, ': ', e.message)
print()
count += len(w)
else:
raise RuntimeError("Python version must be >= 3.6")
print("Errors Found", count)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(description="Find deprecated escaped characters")
parser.add_argument('root', help='directory or file to be checked')
args = parser.parse_args()
main(args.root)
|
<commit_before><commit_msg>ENH: Add tool to check for deprecated escaped characters.
Python 3.6 deprecates a number of escaped characters that were accepted
before. For instance, '\(' was previously accepted but must now be
written as '\\(' or r'\('.
[ci skip]<commit_after>#! /usr/bin/env python
"""
Look for escape sequences deprecated in Python 3.6.
Python 3.6 deprecates a number of non-escape sequences starting with `\` that
were accepted before. For instance, '\(' was previously accepted but must now
be written as '\\(' or r'\('.
"""
from __future__ import division, absolute_import, print_function
import sys
def main(root):
"""Find deprecated escape sequences.
Checks for deprecated escape sequences in ``*.py files``. If `root` is a
file, that file is checked, if `root` is a directory all ``*.py`` files
found in a recursive descent are checked.
If a deprecated escape sequence is found, the file and line where found is
printed. Note that for multiline strings the line where the string ends is
printed and the error(s) are somewhere in the body of the string.
Parameters
----------
root : str
File or directory to check.
Returns
-------
None
"""
count = 0
if sys.version_info[:2] >= (3, 6):
import ast
import tokenize
import warnings
from pathlib import Path
base = Path(root)
paths = base.rglob("*.py") if base.is_dir() else [base]
for path in paths:
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as f:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
tree = ast.parse(f.read())
if w:
print("file: ", str(path))
for e in w:
print('line: ', e.lineno, ': ', e.message)
print()
count += len(w)
else:
raise RuntimeError("Python version must be >= 3.6")
print("Errors Found", count)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(description="Find deprecated escaped characters")
parser.add_argument('root', help='directory or file to be checked')
args = parser.parse_args()
main(args.root)
|
|
f5e8a90a2e50816e18580fec3aba32bd17ebf2e0
|
classes.py
|
classes.py
|
class BucketList():
def __init__(self, name, description):
self.name = name
self.description = description
self.activities = []
self.no_of_activities = len(self.activities)
def add_activity(self, activity):
self.activities.append(activity)
return True
def delete_activity(self, activity):
pass
class Activity():
pass
class User():
def __init__(self, username, email, password):
self.username = username
self.email = email
self.password = password
|
Define the User class constructor
|
Define the User class constructor
|
Python
|
mit
|
mkiterian/bucket-list-app,mkiterian/bucket-list-app,mkiterian/bucket-list-app
|
Define the User class constructor
|
class BucketList():
def __init__(self, name, description):
self.name = name
self.description = description
self.activities = []
self.no_of_activities = len(self.activities)
def add_activity(self, activity):
self.activities.append(activity)
return True
def delete_activity(self, activity):
pass
class Activity():
pass
class User():
def __init__(self, username, email, password):
self.username = username
self.email = email
self.password = password
|
<commit_before><commit_msg>Define the User class constructor<commit_after>
|
class BucketList():
def __init__(self, name, description):
self.name = name
self.description = description
self.activities = []
self.no_of_activities = len(self.activities)
def add_activity(self, activity):
self.activities.append(activity)
return True
def delete_activity(self, activity):
pass
class Activity():
pass
class User():
def __init__(self, username, email, password):
self.username = username
self.email = email
self.password = password
|
Define the User class constructorclass BucketList():
def __init__(self, name, description):
self.name = name
self.description = description
self.activities = []
self.no_of_activities = len(self.activities)
def add_activity(self, activity):
self.activities.append(activity)
return True
def delete_activity(self, activity):
pass
class Activity():
pass
class User():
def __init__(self, username, email, password):
self.username = username
self.email = email
self.password = password
|
<commit_before><commit_msg>Define the User class constructor<commit_after>class BucketList():
def __init__(self, name, description):
self.name = name
self.description = description
self.activities = []
self.no_of_activities = len(self.activities)
def add_activity(self, activity):
self.activities.append(activity)
return True
def delete_activity(self, activity):
pass
class Activity():
pass
class User():
def __init__(self, username, email, password):
self.username = username
self.email = email
self.password = password
|
|
8e01a902baa1be4574be2edef03ad72bdcdd8439
|
py/sum-of-left-leaves.py
|
py/sum-of-left-leaves.py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sumOfLeftLeaves(self, root):
"""
:type root: TreeNode
:rtype: int
"""
ans = 0
if root:
if root.left:
if root.left.left is None and root.left.right is None:
ans += root.left.val
else:
ans += self.sumOfLeftLeaves(root.left)
ans += self.sumOfLeftLeaves(root.right)
return ans
|
Add py solution for 404. Sum of Left Leaves
|
Add py solution for 404. Sum of Left Leaves
404. Sum of Left Leaves: https://leetcode.com/problems/sum-of-left-leaves/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 404. Sum of Left Leaves
404. Sum of Left Leaves: https://leetcode.com/problems/sum-of-left-leaves/
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sumOfLeftLeaves(self, root):
"""
:type root: TreeNode
:rtype: int
"""
ans = 0
if root:
if root.left:
if root.left.left is None and root.left.right is None:
ans += root.left.val
else:
ans += self.sumOfLeftLeaves(root.left)
ans += self.sumOfLeftLeaves(root.right)
return ans
|
<commit_before><commit_msg>Add py solution for 404. Sum of Left Leaves
404. Sum of Left Leaves: https://leetcode.com/problems/sum-of-left-leaves/<commit_after>
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sumOfLeftLeaves(self, root):
"""
:type root: TreeNode
:rtype: int
"""
ans = 0
if root:
if root.left:
if root.left.left is None and root.left.right is None:
ans += root.left.val
else:
ans += self.sumOfLeftLeaves(root.left)
ans += self.sumOfLeftLeaves(root.right)
return ans
|
Add py solution for 404. Sum of Left Leaves
404. Sum of Left Leaves: https://leetcode.com/problems/sum-of-left-leaves/# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sumOfLeftLeaves(self, root):
"""
:type root: TreeNode
:rtype: int
"""
ans = 0
if root:
if root.left:
if root.left.left is None and root.left.right is None:
ans += root.left.val
else:
ans += self.sumOfLeftLeaves(root.left)
ans += self.sumOfLeftLeaves(root.right)
return ans
|
<commit_before><commit_msg>Add py solution for 404. Sum of Left Leaves
404. Sum of Left Leaves: https://leetcode.com/problems/sum-of-left-leaves/<commit_after># Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sumOfLeftLeaves(self, root):
"""
:type root: TreeNode
:rtype: int
"""
ans = 0
if root:
if root.left:
if root.left.left is None and root.left.right is None:
ans += root.left.val
else:
ans += self.sumOfLeftLeaves(root.left)
ans += self.sumOfLeftLeaves(root.right)
return ans
|
|
ad187547de905f8f5baba51f3a431c2cfb4a2704
|
tests/test_01_read_write.py
|
tests/test_01_read_write.py
|
"""Read an image index from the index file and write it back.
"""
import os.path
import shutil
import filecmp
import pytest
import photo.index
from conftest import tmpdir, gettestdata
testimgs = [
"dsc_4623.jpg", "dsc_4664.jpg", "dsc_4831.jpg",
"dsc_5126.jpg", "dsc_5167.jpg"
]
testimgfiles = [ gettestdata(i) for i in testimgs ]
refindex = gettestdata("index-tagged.yaml")
@pytest.fixture(scope="module")
def imgdir(tmpdir):
for fname in testimgfiles:
shutil.copy(fname, tmpdir)
shutil.copy(refindex, os.path.join(tmpdir, ".index.yaml"))
return tmpdir
def test_read_write(imgdir):
"""Read the index file and write it out again.
"""
idx = photo.index.Index(idxfile=imgdir)
idx.write()
idxfile = os.path.join(imgdir, ".index.yaml")
assert filecmp.cmp(refindex, idxfile), "index file differs from reference"
|
Add a test to read an image index and write it back.
|
Add a test to read an image index and write it back.
|
Python
|
apache-2.0
|
RKrahl/photo-tools
|
Add a test to read an image index and write it back.
|
"""Read an image index from the index file and write it back.
"""
import os.path
import shutil
import filecmp
import pytest
import photo.index
from conftest import tmpdir, gettestdata
testimgs = [
"dsc_4623.jpg", "dsc_4664.jpg", "dsc_4831.jpg",
"dsc_5126.jpg", "dsc_5167.jpg"
]
testimgfiles = [ gettestdata(i) for i in testimgs ]
refindex = gettestdata("index-tagged.yaml")
@pytest.fixture(scope="module")
def imgdir(tmpdir):
for fname in testimgfiles:
shutil.copy(fname, tmpdir)
shutil.copy(refindex, os.path.join(tmpdir, ".index.yaml"))
return tmpdir
def test_read_write(imgdir):
"""Read the index file and write it out again.
"""
idx = photo.index.Index(idxfile=imgdir)
idx.write()
idxfile = os.path.join(imgdir, ".index.yaml")
assert filecmp.cmp(refindex, idxfile), "index file differs from reference"
|
<commit_before><commit_msg>Add a test to read an image index and write it back.<commit_after>
|
"""Read an image index from the index file and write it back.
"""
import os.path
import shutil
import filecmp
import pytest
import photo.index
from conftest import tmpdir, gettestdata
testimgs = [
"dsc_4623.jpg", "dsc_4664.jpg", "dsc_4831.jpg",
"dsc_5126.jpg", "dsc_5167.jpg"
]
testimgfiles = [ gettestdata(i) for i in testimgs ]
refindex = gettestdata("index-tagged.yaml")
@pytest.fixture(scope="module")
def imgdir(tmpdir):
for fname in testimgfiles:
shutil.copy(fname, tmpdir)
shutil.copy(refindex, os.path.join(tmpdir, ".index.yaml"))
return tmpdir
def test_read_write(imgdir):
"""Read the index file and write it out again.
"""
idx = photo.index.Index(idxfile=imgdir)
idx.write()
idxfile = os.path.join(imgdir, ".index.yaml")
assert filecmp.cmp(refindex, idxfile), "index file differs from reference"
|
Add a test to read an image index and write it back."""Read an image index from the index file and write it back.
"""
import os.path
import shutil
import filecmp
import pytest
import photo.index
from conftest import tmpdir, gettestdata
testimgs = [
"dsc_4623.jpg", "dsc_4664.jpg", "dsc_4831.jpg",
"dsc_5126.jpg", "dsc_5167.jpg"
]
testimgfiles = [ gettestdata(i) for i in testimgs ]
refindex = gettestdata("index-tagged.yaml")
@pytest.fixture(scope="module")
def imgdir(tmpdir):
for fname in testimgfiles:
shutil.copy(fname, tmpdir)
shutil.copy(refindex, os.path.join(tmpdir, ".index.yaml"))
return tmpdir
def test_read_write(imgdir):
"""Read the index file and write it out again.
"""
idx = photo.index.Index(idxfile=imgdir)
idx.write()
idxfile = os.path.join(imgdir, ".index.yaml")
assert filecmp.cmp(refindex, idxfile), "index file differs from reference"
|
<commit_before><commit_msg>Add a test to read an image index and write it back.<commit_after>"""Read an image index from the index file and write it back.
"""
import os.path
import shutil
import filecmp
import pytest
import photo.index
from conftest import tmpdir, gettestdata
testimgs = [
"dsc_4623.jpg", "dsc_4664.jpg", "dsc_4831.jpg",
"dsc_5126.jpg", "dsc_5167.jpg"
]
testimgfiles = [ gettestdata(i) for i in testimgs ]
refindex = gettestdata("index-tagged.yaml")
@pytest.fixture(scope="module")
def imgdir(tmpdir):
for fname in testimgfiles:
shutil.copy(fname, tmpdir)
shutil.copy(refindex, os.path.join(tmpdir, ".index.yaml"))
return tmpdir
def test_read_write(imgdir):
"""Read the index file and write it out again.
"""
idx = photo.index.Index(idxfile=imgdir)
idx.write()
idxfile = os.path.join(imgdir, ".index.yaml")
assert filecmp.cmp(refindex, idxfile), "index file differs from reference"
|
|
39e097c4edbc0c3eb3b8d0269d85666272531091
|
ops/cookiejar-empty-hack.py
|
ops/cookiejar-empty-hack.py
|
############################################################
# USE WITH CAUTION!
# May result in undesired behaviour, crashes, and wormholes.
############################################################
# Requirements:
# - Netcat with Unix socket support (e.g. the netcat-openbsd package on Debian/Ubuntu; not netcat-traditional; you can install both in parallel)
# - objgraph Python package. `pip3 install --user objgraph` (if you use venvs: in the venv that the AB process uses!)
# Usage:
# 1. "Pause" the affected job by increasing the delay to a large value (e.g. 3 minutes).
# 2. Wait until it's idle, i.e. not retrieving data anymore. (That's a bit tricky to verify. You could check the end of the log file or the open ArchiveBot/pipelines/tmp-* files of that process.
# 3. Figure out the PID of the process: `pgrep -f $jobid`
# 4. Run the script: `nc -U /tmp/manhole-$pid <cookiejar-empty-hack.py`
# 5. Wait for it to finish. Do not press ^C or similar.
# 6. "Unpause" the job by restoring the previous delay setting.
import objgraph
cjs = objgraph.by_type('wpull.cookie.BetterMozillaCookieJar')
if len(cjs) != 1:
print('Not exactly one cookie jar')
else:
# Ideally, we could just use .clear(), but that replaces the internal cookie dictionary and appears to break something in wpull.
#cjs[0].clear()
# So instead, explicitly delete the entry for each domain but keep the same object.
for domain in list(cjs[0]._cookies): # Copy needed to allow modification during iteration
del cjs[0]._cookies[domain]
exit()
#EOF
|
Add a hacky script for clearing a job's cookie jar
|
Add a hacky script for clearing a job's cookie jar
Cf. https://github.com/ArchiveTeam/wpull/issues/448
Based on https://gist.github.com/JustAnotherArchivist/8f5ac42a9c03302f222af3a58a863468
|
Python
|
mit
|
ArchiveTeam/ArchiveBot,ArchiveTeam/ArchiveBot,ArchiveTeam/ArchiveBot,ArchiveTeam/ArchiveBot,ArchiveTeam/ArchiveBot
|
Add a hacky script for clearing a job's cookie jar
Cf. https://github.com/ArchiveTeam/wpull/issues/448
Based on https://gist.github.com/JustAnotherArchivist/8f5ac42a9c03302f222af3a58a863468
|
############################################################
# USE WITH CAUTION!
# May result in undesired behaviour, crashes, and wormholes.
############################################################
# Requirements:
# - Netcat with Unix socket support (e.g. the netcat-openbsd package on Debian/Ubuntu; not netcat-traditional; you can install both in parallel)
# - objgraph Python package. `pip3 install --user objgraph` (if you use venvs: in the venv that the AB process uses!)
# Usage:
# 1. "Pause" the affected job by increasing the delay to a large value (e.g. 3 minutes).
# 2. Wait until it's idle, i.e. not retrieving data anymore. (That's a bit tricky to verify. You could check the end of the log file or the open ArchiveBot/pipelines/tmp-* files of that process.
# 3. Figure out the PID of the process: `pgrep -f $jobid`
# 4. Run the script: `nc -U /tmp/manhole-$pid <cookiejar-empty-hack.py`
# 5. Wait for it to finish. Do not press ^C or similar.
# 6. "Unpause" the job by restoring the previous delay setting.
import objgraph
cjs = objgraph.by_type('wpull.cookie.BetterMozillaCookieJar')
if len(cjs) != 1:
print('Not exactly one cookie jar')
else:
# Ideally, we could just use .clear(), but that replaces the internal cookie dictionary and appears to break something in wpull.
#cjs[0].clear()
# So instead, explicitly delete the entry for each domain but keep the same object.
for domain in list(cjs[0]._cookies): # Copy needed to allow modification during iteration
del cjs[0]._cookies[domain]
exit()
#EOF
|
<commit_before><commit_msg>Add a hacky script for clearing a job's cookie jar
Cf. https://github.com/ArchiveTeam/wpull/issues/448
Based on https://gist.github.com/JustAnotherArchivist/8f5ac42a9c03302f222af3a58a863468<commit_after>
|
############################################################
# USE WITH CAUTION!
# May result in undesired behaviour, crashes, and wormholes.
############################################################
# Requirements:
# - Netcat with Unix socket support (e.g. the netcat-openbsd package on Debian/Ubuntu; not netcat-traditional; you can install both in parallel)
# - objgraph Python package. `pip3 install --user objgraph` (if you use venvs: in the venv that the AB process uses!)
# Usage:
# 1. "Pause" the affected job by increasing the delay to a large value (e.g. 3 minutes).
# 2. Wait until it's idle, i.e. not retrieving data anymore. (That's a bit tricky to verify. You could check the end of the log file or the open ArchiveBot/pipelines/tmp-* files of that process.
# 3. Figure out the PID of the process: `pgrep -f $jobid`
# 4. Run the script: `nc -U /tmp/manhole-$pid <cookiejar-empty-hack.py`
# 5. Wait for it to finish. Do not press ^C or similar.
# 6. "Unpause" the job by restoring the previous delay setting.
import objgraph
cjs = objgraph.by_type('wpull.cookie.BetterMozillaCookieJar')
if len(cjs) != 1:
print('Not exactly one cookie jar')
else:
# Ideally, we could just use .clear(), but that replaces the internal cookie dictionary and appears to break something in wpull.
#cjs[0].clear()
# So instead, explicitly delete the entry for each domain but keep the same object.
for domain in list(cjs[0]._cookies): # Copy needed to allow modification during iteration
del cjs[0]._cookies[domain]
exit()
#EOF
|
Add a hacky script for clearing a job's cookie jar
Cf. https://github.com/ArchiveTeam/wpull/issues/448
Based on https://gist.github.com/JustAnotherArchivist/8f5ac42a9c03302f222af3a58a863468############################################################
# USE WITH CAUTION!
# May result in undesired behaviour, crashes, and wormholes.
############################################################
# Requirements:
# - Netcat with Unix socket support (e.g. the netcat-openbsd package on Debian/Ubuntu; not netcat-traditional; you can install both in parallel)
# - objgraph Python package. `pip3 install --user objgraph` (if you use venvs: in the venv that the AB process uses!)
# Usage:
# 1. "Pause" the affected job by increasing the delay to a large value (e.g. 3 minutes).
# 2. Wait until it's idle, i.e. not retrieving data anymore. (That's a bit tricky to verify. You could check the end of the log file or the open ArchiveBot/pipelines/tmp-* files of that process.
# 3. Figure out the PID of the process: `pgrep -f $jobid`
# 4. Run the script: `nc -U /tmp/manhole-$pid <cookiejar-empty-hack.py`
# 5. Wait for it to finish. Do not press ^C or similar.
# 6. "Unpause" the job by restoring the previous delay setting.
import objgraph
cjs = objgraph.by_type('wpull.cookie.BetterMozillaCookieJar')
if len(cjs) != 1:
print('Not exactly one cookie jar')
else:
# Ideally, we could just use .clear(), but that replaces the internal cookie dictionary and appears to break something in wpull.
#cjs[0].clear()
# So instead, explicitly delete the entry for each domain but keep the same object.
for domain in list(cjs[0]._cookies): # Copy needed to allow modification during iteration
del cjs[0]._cookies[domain]
exit()
#EOF
|
<commit_before><commit_msg>Add a hacky script for clearing a job's cookie jar
Cf. https://github.com/ArchiveTeam/wpull/issues/448
Based on https://gist.github.com/JustAnotherArchivist/8f5ac42a9c03302f222af3a58a863468<commit_after>############################################################
# USE WITH CAUTION!
# May result in undesired behaviour, crashes, and wormholes.
############################################################
# Requirements:
# - Netcat with Unix socket support (e.g. the netcat-openbsd package on Debian/Ubuntu; not netcat-traditional; you can install both in parallel)
# - objgraph Python package. `pip3 install --user objgraph` (if you use venvs: in the venv that the AB process uses!)
# Usage:
# 1. "Pause" the affected job by increasing the delay to a large value (e.g. 3 minutes).
# 2. Wait until it's idle, i.e. not retrieving data anymore. (That's a bit tricky to verify. You could check the end of the log file or the open ArchiveBot/pipelines/tmp-* files of that process.
# 3. Figure out the PID of the process: `pgrep -f $jobid`
# 4. Run the script: `nc -U /tmp/manhole-$pid <cookiejar-empty-hack.py`
# 5. Wait for it to finish. Do not press ^C or similar.
# 6. "Unpause" the job by restoring the previous delay setting.
import objgraph
cjs = objgraph.by_type('wpull.cookie.BetterMozillaCookieJar')
if len(cjs) != 1:
print('Not exactly one cookie jar')
else:
# Ideally, we could just use .clear(), but that replaces the internal cookie dictionary and appears to break something in wpull.
#cjs[0].clear()
# So instead, explicitly delete the entry for each domain but keep the same object.
for domain in list(cjs[0]._cookies): # Copy needed to allow modification during iteration
del cjs[0]._cookies[domain]
exit()
#EOF
|
|
81a3cee4c1086d429fbc494935b82a0ea4539bb0
|
source/PYTHON/EXTENSIONS/BALL.py
|
source/PYTHON/EXTENSIONS/BALL.py
|
from BALLCore import *
try:
from VIEW import *
except:
print "Warning: Could not find VIEW.\nVerify that BALL is built with VIEW support and check your PYTHONPATH. Continuing..."
|
Add a script wrapping the splitted modules
|
Add a script wrapping the splitted modules
This script just imports both modules such that old scripts are still
running without issues.
|
Python
|
lgpl-2.1
|
tkemmer/ball,tkemmer/ball,tkemmer/ball,tkemmer/ball,tkemmer/ball,tkemmer/ball
|
Add a script wrapping the splitted modules
This script just imports both modules such that old scripts are still
running without issues.
|
from BALLCore import *
try:
from VIEW import *
except:
print "Warning: Could not find VIEW.\nVerify that BALL is built with VIEW support and check your PYTHONPATH. Continuing..."
|
<commit_before><commit_msg>Add a script wrapping the splitted modules
This script just imports both modules such that old scripts are still
running without issues.<commit_after>
|
from BALLCore import *
try:
from VIEW import *
except:
print "Warning: Could not find VIEW.\nVerify that BALL is built with VIEW support and check your PYTHONPATH. Continuing..."
|
Add a script wrapping the splitted modules
This script just imports both modules such that old scripts are still
running without issues.from BALLCore import *
try:
from VIEW import *
except:
print "Warning: Could not find VIEW.\nVerify that BALL is built with VIEW support and check your PYTHONPATH. Continuing..."
|
<commit_before><commit_msg>Add a script wrapping the splitted modules
This script just imports both modules such that old scripts are still
running without issues.<commit_after>from BALLCore import *
try:
from VIEW import *
except:
print "Warning: Could not find VIEW.\nVerify that BALL is built with VIEW support and check your PYTHONPATH. Continuing..."
|
|
1b8edb32664e63d1dc2ad9af295395f62d08f4bd
|
crackingcointsolutions/chapter2/exercisesix.py
|
crackingcointsolutions/chapter2/exercisesix.py
|
'''
Created on 22 Aug 2017
Palindrome: implement a function to check if a linked list is a palindrome
Assumptions: each node of the list contains a single letter.
1) Invert-copy list, gives also back list size
2) compare until middle: size/2
@author: igoroya
'''
from chapter2 import utils # TODO: try relative imports
def invert_list(my_list):
"returns inverted list and size of it"
inv_list = utils.SinglyLinkedList()
node = my_list.head_node
entries = 0
while node is not None:
inv_list.add_in_front(node.cargo)
entries += 1
node = node.next_node
return inv_list, entries
def are_same_until_middle(list1, list2, size):
node1 = list1.head_node
node2 = list2.head_node
for _ in range(int(1 + size/2)):
if node1.cargo != node2.cargo:
return False
node1 = node1.next_node
node2 = node2.next_node
return True
def is_palindrome(orig_list):
inv_list, size = invert_list(orig_list)
return are_same_until_middle(orig_list, inv_list, size)
if __name__ == '__main__':
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('a')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('b')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('b')
list1.append('a')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('a')
list1.append('b')
list1.append('a')
list1.append('a')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('c')
list1.append('d')
list1.append('e')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('c')
list1.append('b')
list1.append('a')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
|
Add exercise six of chapter two
|
Add exercise six of chapter two
|
Python
|
mit
|
igoroya/igor-oya-solutions-cracking-coding-interview
|
Add exercise six of chapter two
|
'''
Created on 22 Aug 2017
Palindrome: implement a function to check if a linked list is a palindrome
Assumptions: each node of the list contains a single letter.
1) Invert-copy list, gives also back list size
2) compare until middle: size/2
@author: igoroya
'''
from chapter2 import utils # TODO: try relative imports
def invert_list(my_list):
"returns inverted list and size of it"
inv_list = utils.SinglyLinkedList()
node = my_list.head_node
entries = 0
while node is not None:
inv_list.add_in_front(node.cargo)
entries += 1
node = node.next_node
return inv_list, entries
def are_same_until_middle(list1, list2, size):
node1 = list1.head_node
node2 = list2.head_node
for _ in range(int(1 + size/2)):
if node1.cargo != node2.cargo:
return False
node1 = node1.next_node
node2 = node2.next_node
return True
def is_palindrome(orig_list):
inv_list, size = invert_list(orig_list)
return are_same_until_middle(orig_list, inv_list, size)
if __name__ == '__main__':
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('a')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('b')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('b')
list1.append('a')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('a')
list1.append('b')
list1.append('a')
list1.append('a')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('c')
list1.append('d')
list1.append('e')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('c')
list1.append('b')
list1.append('a')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
|
<commit_before><commit_msg>Add exercise six of chapter two<commit_after>
|
'''
Created on 22 Aug 2017
Palindrome: implement a function to check if a linked list is a palindrome
Assumptions: each node of the list contains a single letter.
1) Invert-copy list, gives also back list size
2) compare until middle: size/2
@author: igoroya
'''
from chapter2 import utils # TODO: try relative imports
def invert_list(my_list):
"returns inverted list and size of it"
inv_list = utils.SinglyLinkedList()
node = my_list.head_node
entries = 0
while node is not None:
inv_list.add_in_front(node.cargo)
entries += 1
node = node.next_node
return inv_list, entries
def are_same_until_middle(list1, list2, size):
node1 = list1.head_node
node2 = list2.head_node
for _ in range(int(1 + size/2)):
if node1.cargo != node2.cargo:
return False
node1 = node1.next_node
node2 = node2.next_node
return True
def is_palindrome(orig_list):
inv_list, size = invert_list(orig_list)
return are_same_until_middle(orig_list, inv_list, size)
if __name__ == '__main__':
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('a')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('b')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('b')
list1.append('a')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('a')
list1.append('b')
list1.append('a')
list1.append('a')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('c')
list1.append('d')
list1.append('e')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('c')
list1.append('b')
list1.append('a')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
|
Add exercise six of chapter two'''
Created on 22 Aug 2017
Palindrome: implement a function to check if a linked list is a palindrome
Assumptions: each node of the list contains a single letter.
1) Invert-copy list, gives also back list size
2) compare until middle: size/2
@author: igoroya
'''
from chapter2 import utils # TODO: try relative imports
def invert_list(my_list):
"returns inverted list and size of it"
inv_list = utils.SinglyLinkedList()
node = my_list.head_node
entries = 0
while node is not None:
inv_list.add_in_front(node.cargo)
entries += 1
node = node.next_node
return inv_list, entries
def are_same_until_middle(list1, list2, size):
node1 = list1.head_node
node2 = list2.head_node
for _ in range(int(1 + size/2)):
if node1.cargo != node2.cargo:
return False
node1 = node1.next_node
node2 = node2.next_node
return True
def is_palindrome(orig_list):
inv_list, size = invert_list(orig_list)
return are_same_until_middle(orig_list, inv_list, size)
if __name__ == '__main__':
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('a')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('b')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('b')
list1.append('a')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('a')
list1.append('b')
list1.append('a')
list1.append('a')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('c')
list1.append('d')
list1.append('e')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('c')
list1.append('b')
list1.append('a')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
|
<commit_before><commit_msg>Add exercise six of chapter two<commit_after>'''
Created on 22 Aug 2017
Palindrome: implement a function to check if a linked list is a palindrome
Assumptions: each node of the list contains a single letter.
1) Invert-copy list, gives also back list size
2) compare until middle: size/2
@author: igoroya
'''
from chapter2 import utils # TODO: try relative imports
def invert_list(my_list):
"returns inverted list and size of it"
inv_list = utils.SinglyLinkedList()
node = my_list.head_node
entries = 0
while node is not None:
inv_list.add_in_front(node.cargo)
entries += 1
node = node.next_node
return inv_list, entries
def are_same_until_middle(list1, list2, size):
node1 = list1.head_node
node2 = list2.head_node
for _ in range(int(1 + size/2)):
if node1.cargo != node2.cargo:
return False
node1 = node1.next_node
node2 = node2.next_node
return True
def is_palindrome(orig_list):
inv_list, size = invert_list(orig_list)
return are_same_until_middle(orig_list, inv_list, size)
if __name__ == '__main__':
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('a')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('b')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('b')
list1.append('a')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('a')
list1.append('b')
list1.append('a')
list1.append('a')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('c')
list1.append('d')
list1.append('e')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
list1 = utils.SinglyLinkedList()
list1.append('a')
list1.append('b')
list1.append('c')
list1.append('b')
list1.append('a')
print("List {} is palindrome? {}".format(list1, is_palindrome(list1)))
print("----")
|
|
7faad2fff896946c2b9b8deeb202dc27b5e3f6d7
|
tests/test_object.py
|
tests/test_object.py
|
#!/usr/bin/env python
import unittest
class TestObject(unittest.TestCase):
def test_object(self):
import mlbgame
data = {
'string': 'string',
'int': '10',
'float': '10.1'
}
obj = mlbgame.object.Object(data)
self.assertIsInstance(obj.string, str)
self.assertIsInstance(obj.int, int)
self.assertIsInstance(obj.float, float)
self.assertEqual(obj.string, 'string')
self.assertEqual(obj.int, 10)
self.assertEqual(obj.float, 10.1)
|
Add test for object module
|
Add test for object module
|
Python
|
mit
|
panzarino/mlbgame,zachpanz88/mlbgame
|
Add test for object module
|
#!/usr/bin/env python
import unittest
class TestObject(unittest.TestCase):
def test_object(self):
import mlbgame
data = {
'string': 'string',
'int': '10',
'float': '10.1'
}
obj = mlbgame.object.Object(data)
self.assertIsInstance(obj.string, str)
self.assertIsInstance(obj.int, int)
self.assertIsInstance(obj.float, float)
self.assertEqual(obj.string, 'string')
self.assertEqual(obj.int, 10)
self.assertEqual(obj.float, 10.1)
|
<commit_before><commit_msg>Add test for object module<commit_after>
|
#!/usr/bin/env python
import unittest
class TestObject(unittest.TestCase):
def test_object(self):
import mlbgame
data = {
'string': 'string',
'int': '10',
'float': '10.1'
}
obj = mlbgame.object.Object(data)
self.assertIsInstance(obj.string, str)
self.assertIsInstance(obj.int, int)
self.assertIsInstance(obj.float, float)
self.assertEqual(obj.string, 'string')
self.assertEqual(obj.int, 10)
self.assertEqual(obj.float, 10.1)
|
Add test for object module#!/usr/bin/env python
import unittest
class TestObject(unittest.TestCase):
def test_object(self):
import mlbgame
data = {
'string': 'string',
'int': '10',
'float': '10.1'
}
obj = mlbgame.object.Object(data)
self.assertIsInstance(obj.string, str)
self.assertIsInstance(obj.int, int)
self.assertIsInstance(obj.float, float)
self.assertEqual(obj.string, 'string')
self.assertEqual(obj.int, 10)
self.assertEqual(obj.float, 10.1)
|
<commit_before><commit_msg>Add test for object module<commit_after>#!/usr/bin/env python
import unittest
class TestObject(unittest.TestCase):
def test_object(self):
import mlbgame
data = {
'string': 'string',
'int': '10',
'float': '10.1'
}
obj = mlbgame.object.Object(data)
self.assertIsInstance(obj.string, str)
self.assertIsInstance(obj.int, int)
self.assertIsInstance(obj.float, float)
self.assertEqual(obj.string, 'string')
self.assertEqual(obj.int, 10)
self.assertEqual(obj.float, 10.1)
|
|
90ba69c5fec5a337a016a111ab196277e7d35843
|
tools/add_to_yaml.py
|
tools/add_to_yaml.py
|
import os
import yaml
import yt
import sys
data_dir = '/mnt/data/renaissance'
rsl_page_root = os.environ.get(
'RSL_PAGE_ROOT', '/home/britton/rensimlab.github.io')
simyaml = os.path.join(rsl_page_root, '_data', 'simulations.yaml')
simulation_data = yaml.load(open(simyaml, 'r'))
if len(sys.argv) < 2:
sys.exit()
sim = sys.argv[1]
fn = os.path.join(data_dir, sim, "rs_%s.h5" % sim.lower())
es = yt.load(fn)
def_entry = {"halo_catalogs": False, "num_halos": 'N/A',
"on_rsl": False, "size": 'N/A'}
sim_entries = [{"z": float("%.1f" % z), "snapshot": os.path.dirname(myfn)}
for z, myfn in zip(es.data["redshift"],
es.data["filename"].astype(str))]
for entry in sim_entries:
entry.update(def_entry)
simulation_data[sim] = sim_entries
yaml.dump(simulation_data, open(simyaml, 'w'))
|
Add script to add a simulation to the yaml file.
|
Add script to add a simulation to the yaml file.
|
Python
|
mit
|
rensimlab/rensimlab.github.io,rensimlab/rensimlab.github.io,rensimlab/rensimlab.github.io
|
Add script to add a simulation to the yaml file.
|
import os
import yaml
import yt
import sys
data_dir = '/mnt/data/renaissance'
rsl_page_root = os.environ.get(
'RSL_PAGE_ROOT', '/home/britton/rensimlab.github.io')
simyaml = os.path.join(rsl_page_root, '_data', 'simulations.yaml')
simulation_data = yaml.load(open(simyaml, 'r'))
if len(sys.argv) < 2:
sys.exit()
sim = sys.argv[1]
fn = os.path.join(data_dir, sim, "rs_%s.h5" % sim.lower())
es = yt.load(fn)
def_entry = {"halo_catalogs": False, "num_halos": 'N/A',
"on_rsl": False, "size": 'N/A'}
sim_entries = [{"z": float("%.1f" % z), "snapshot": os.path.dirname(myfn)}
for z, myfn in zip(es.data["redshift"],
es.data["filename"].astype(str))]
for entry in sim_entries:
entry.update(def_entry)
simulation_data[sim] = sim_entries
yaml.dump(simulation_data, open(simyaml, 'w'))
|
<commit_before><commit_msg>Add script to add a simulation to the yaml file.<commit_after>
|
import os
import yaml
import yt
import sys
data_dir = '/mnt/data/renaissance'
rsl_page_root = os.environ.get(
'RSL_PAGE_ROOT', '/home/britton/rensimlab.github.io')
simyaml = os.path.join(rsl_page_root, '_data', 'simulations.yaml')
simulation_data = yaml.load(open(simyaml, 'r'))
if len(sys.argv) < 2:
sys.exit()
sim = sys.argv[1]
fn = os.path.join(data_dir, sim, "rs_%s.h5" % sim.lower())
es = yt.load(fn)
def_entry = {"halo_catalogs": False, "num_halos": 'N/A',
"on_rsl": False, "size": 'N/A'}
sim_entries = [{"z": float("%.1f" % z), "snapshot": os.path.dirname(myfn)}
for z, myfn in zip(es.data["redshift"],
es.data["filename"].astype(str))]
for entry in sim_entries:
entry.update(def_entry)
simulation_data[sim] = sim_entries
yaml.dump(simulation_data, open(simyaml, 'w'))
|
Add script to add a simulation to the yaml file.import os
import yaml
import yt
import sys
data_dir = '/mnt/data/renaissance'
rsl_page_root = os.environ.get(
'RSL_PAGE_ROOT', '/home/britton/rensimlab.github.io')
simyaml = os.path.join(rsl_page_root, '_data', 'simulations.yaml')
simulation_data = yaml.load(open(simyaml, 'r'))
if len(sys.argv) < 2:
sys.exit()
sim = sys.argv[1]
fn = os.path.join(data_dir, sim, "rs_%s.h5" % sim.lower())
es = yt.load(fn)
def_entry = {"halo_catalogs": False, "num_halos": 'N/A',
"on_rsl": False, "size": 'N/A'}
sim_entries = [{"z": float("%.1f" % z), "snapshot": os.path.dirname(myfn)}
for z, myfn in zip(es.data["redshift"],
es.data["filename"].astype(str))]
for entry in sim_entries:
entry.update(def_entry)
simulation_data[sim] = sim_entries
yaml.dump(simulation_data, open(simyaml, 'w'))
|
<commit_before><commit_msg>Add script to add a simulation to the yaml file.<commit_after>import os
import yaml
import yt
import sys
data_dir = '/mnt/data/renaissance'
rsl_page_root = os.environ.get(
'RSL_PAGE_ROOT', '/home/britton/rensimlab.github.io')
simyaml = os.path.join(rsl_page_root, '_data', 'simulations.yaml')
simulation_data = yaml.load(open(simyaml, 'r'))
if len(sys.argv) < 2:
sys.exit()
sim = sys.argv[1]
fn = os.path.join(data_dir, sim, "rs_%s.h5" % sim.lower())
es = yt.load(fn)
def_entry = {"halo_catalogs": False, "num_halos": 'N/A',
"on_rsl": False, "size": 'N/A'}
sim_entries = [{"z": float("%.1f" % z), "snapshot": os.path.dirname(myfn)}
for z, myfn in zip(es.data["redshift"],
es.data["filename"].astype(str))]
for entry in sim_entries:
entry.update(def_entry)
simulation_data[sim] = sim_entries
yaml.dump(simulation_data, open(simyaml, 'w'))
|
|
312be9e1d6a611ebcbe9e7110ffd77dc84a1633c
|
run_randoop.py
|
run_randoop.py
|
#!/usr/bin/python
import os,sys,common
import fetch
import time
def main():
if not os.path.exists(common.CORPUS_DIR) or not os.path.exists(common.LIBS_DIR):
print "Please run python fetch.py first to fetch the corpus and/or necessary tools."
sys.exit(1)
return
project = ""
if len(sys.argv) == 2:
project = sys.argv[1]
else:
print 'must supply single test name'
exit()
print "Running analysis on corpus."
print time.strftime('%X %x')
tools = ['randoop']
print "Cleaning {}".format(project)
common.clean_project(project)
print "Analyzing {}".format(project)
common.run_dljc(project, tools)
print time.strftime('%X %x')
if __name__ == "__main__":
main()
else:
print 'huh?'
|
Add script to run Randoop for evaluation over corpus
|
Add script to run Randoop for evaluation over corpus
|
Python
|
mit
|
aas-integration/integration-test2,aas-integration/integration-test2,markro49/integration-test2,markro49/integration-test2
|
Add script to run Randoop for evaluation over corpus
|
#!/usr/bin/python
import os,sys,common
import fetch
import time
def main():
if not os.path.exists(common.CORPUS_DIR) or not os.path.exists(common.LIBS_DIR):
print "Please run python fetch.py first to fetch the corpus and/or necessary tools."
sys.exit(1)
return
project = ""
if len(sys.argv) == 2:
project = sys.argv[1]
else:
print 'must supply single test name'
exit()
print "Running analysis on corpus."
print time.strftime('%X %x')
tools = ['randoop']
print "Cleaning {}".format(project)
common.clean_project(project)
print "Analyzing {}".format(project)
common.run_dljc(project, tools)
print time.strftime('%X %x')
if __name__ == "__main__":
main()
else:
print 'huh?'
|
<commit_before><commit_msg>Add script to run Randoop for evaluation over corpus<commit_after>
|
#!/usr/bin/python
import os,sys,common
import fetch
import time
def main():
if not os.path.exists(common.CORPUS_DIR) or not os.path.exists(common.LIBS_DIR):
print "Please run python fetch.py first to fetch the corpus and/or necessary tools."
sys.exit(1)
return
project = ""
if len(sys.argv) == 2:
project = sys.argv[1]
else:
print 'must supply single test name'
exit()
print "Running analysis on corpus."
print time.strftime('%X %x')
tools = ['randoop']
print "Cleaning {}".format(project)
common.clean_project(project)
print "Analyzing {}".format(project)
common.run_dljc(project, tools)
print time.strftime('%X %x')
if __name__ == "__main__":
main()
else:
print 'huh?'
|
Add script to run Randoop for evaluation over corpus#!/usr/bin/python
import os,sys,common
import fetch
import time
def main():
if not os.path.exists(common.CORPUS_DIR) or not os.path.exists(common.LIBS_DIR):
print "Please run python fetch.py first to fetch the corpus and/or necessary tools."
sys.exit(1)
return
project = ""
if len(sys.argv) == 2:
project = sys.argv[1]
else:
print 'must supply single test name'
exit()
print "Running analysis on corpus."
print time.strftime('%X %x')
tools = ['randoop']
print "Cleaning {}".format(project)
common.clean_project(project)
print "Analyzing {}".format(project)
common.run_dljc(project, tools)
print time.strftime('%X %x')
if __name__ == "__main__":
main()
else:
print 'huh?'
|
<commit_before><commit_msg>Add script to run Randoop for evaluation over corpus<commit_after>#!/usr/bin/python
import os,sys,common
import fetch
import time
def main():
if not os.path.exists(common.CORPUS_DIR) or not os.path.exists(common.LIBS_DIR):
print "Please run python fetch.py first to fetch the corpus and/or necessary tools."
sys.exit(1)
return
project = ""
if len(sys.argv) == 2:
project = sys.argv[1]
else:
print 'must supply single test name'
exit()
print "Running analysis on corpus."
print time.strftime('%X %x')
tools = ['randoop']
print "Cleaning {}".format(project)
common.clean_project(project)
print "Analyzing {}".format(project)
common.run_dljc(project, tools)
print time.strftime('%X %x')
if __name__ == "__main__":
main()
else:
print 'huh?'
|
|
8a35d9a8f502a303ac164c138c2820254e600345
|
girder/cluster_filesystem/server/dateutils.py
|
girder/cluster_filesystem/server/dateutils.py
|
import datetime as dt
def dateParser(timestring):
"""
Parse a datetime string from ls -l and return a standard isotime
ls -l returns dates in two different formats:
May 2 09:06 (for dates within 6 months from now)
May 2 2018 (for dates further away)
Best would be to return ls -l --full-time,
but unfortunately we have no control over the remote API
"""
recent_time_format = "%b %d %H:%M"
older_time_format = "%b %d %Y"
try:
date = dt.datetime.strptime(timestring, recent_time_format)
now = dt.datetime.now()
this_year = dt.datetime(year=now.year,
month=date.month, day=date.day,
hour=date.hour, minute=date.minute)
last_year = dt.datetime(year=now.year-1,
month=date.month, day=date.day,
hour=date.hour, minute=date.minute)
delta_this = abs((now-this_year).total_seconds())
delta_last = abs((now-last_year).total_seconds())
print(delta_this, delta_last)
if (delta_this > delta_last):
date = last_year
else:
date = this_year
print(date.microsecond)
except ValueError:
try:
date = dt.datetime.strptime(timestring, older_time_format)
except ValueError:
date = dt.datetime.now()
date = dt.datetime(year=date.year,
month=date.month, day=date.day,
hour=date.hour, minute=date.minute,
second=date.second, microsecond=0)
return date.isoformat()
|
Add function to convert a date from bash to iso format
|
Add function to convert a date from bash to iso format
|
Python
|
apache-2.0
|
Kitware/cumulus,Kitware/cumulus
|
Add function to convert a date from bash to iso format
|
import datetime as dt
def dateParser(timestring):
"""
Parse a datetime string from ls -l and return a standard isotime
ls -l returns dates in two different formats:
May 2 09:06 (for dates within 6 months from now)
May 2 2018 (for dates further away)
Best would be to return ls -l --full-time,
but unfortunately we have no control over the remote API
"""
recent_time_format = "%b %d %H:%M"
older_time_format = "%b %d %Y"
try:
date = dt.datetime.strptime(timestring, recent_time_format)
now = dt.datetime.now()
this_year = dt.datetime(year=now.year,
month=date.month, day=date.day,
hour=date.hour, minute=date.minute)
last_year = dt.datetime(year=now.year-1,
month=date.month, day=date.day,
hour=date.hour, minute=date.minute)
delta_this = abs((now-this_year).total_seconds())
delta_last = abs((now-last_year).total_seconds())
print(delta_this, delta_last)
if (delta_this > delta_last):
date = last_year
else:
date = this_year
print(date.microsecond)
except ValueError:
try:
date = dt.datetime.strptime(timestring, older_time_format)
except ValueError:
date = dt.datetime.now()
date = dt.datetime(year=date.year,
month=date.month, day=date.day,
hour=date.hour, minute=date.minute,
second=date.second, microsecond=0)
return date.isoformat()
|
<commit_before><commit_msg>Add function to convert a date from bash to iso format<commit_after>
|
import datetime as dt
def dateParser(timestring):
"""
Parse a datetime string from ls -l and return a standard isotime
ls -l returns dates in two different formats:
May 2 09:06 (for dates within 6 months from now)
May 2 2018 (for dates further away)
Best would be to return ls -l --full-time,
but unfortunately we have no control over the remote API
"""
recent_time_format = "%b %d %H:%M"
older_time_format = "%b %d %Y"
try:
date = dt.datetime.strptime(timestring, recent_time_format)
now = dt.datetime.now()
this_year = dt.datetime(year=now.year,
month=date.month, day=date.day,
hour=date.hour, minute=date.minute)
last_year = dt.datetime(year=now.year-1,
month=date.month, day=date.day,
hour=date.hour, minute=date.minute)
delta_this = abs((now-this_year).total_seconds())
delta_last = abs((now-last_year).total_seconds())
print(delta_this, delta_last)
if (delta_this > delta_last):
date = last_year
else:
date = this_year
print(date.microsecond)
except ValueError:
try:
date = dt.datetime.strptime(timestring, older_time_format)
except ValueError:
date = dt.datetime.now()
date = dt.datetime(year=date.year,
month=date.month, day=date.day,
hour=date.hour, minute=date.minute,
second=date.second, microsecond=0)
return date.isoformat()
|
Add function to convert a date from bash to iso formatimport datetime as dt
def dateParser(timestring):
"""
Parse a datetime string from ls -l and return a standard isotime
ls -l returns dates in two different formats:
May 2 09:06 (for dates within 6 months from now)
May 2 2018 (for dates further away)
Best would be to return ls -l --full-time,
but unfortunately we have no control over the remote API
"""
recent_time_format = "%b %d %H:%M"
older_time_format = "%b %d %Y"
try:
date = dt.datetime.strptime(timestring, recent_time_format)
now = dt.datetime.now()
this_year = dt.datetime(year=now.year,
month=date.month, day=date.day,
hour=date.hour, minute=date.minute)
last_year = dt.datetime(year=now.year-1,
month=date.month, day=date.day,
hour=date.hour, minute=date.minute)
delta_this = abs((now-this_year).total_seconds())
delta_last = abs((now-last_year).total_seconds())
print(delta_this, delta_last)
if (delta_this > delta_last):
date = last_year
else:
date = this_year
print(date.microsecond)
except ValueError:
try:
date = dt.datetime.strptime(timestring, older_time_format)
except ValueError:
date = dt.datetime.now()
date = dt.datetime(year=date.year,
month=date.month, day=date.day,
hour=date.hour, minute=date.minute,
second=date.second, microsecond=0)
return date.isoformat()
|
<commit_before><commit_msg>Add function to convert a date from bash to iso format<commit_after>import datetime as dt
def dateParser(timestring):
"""
Parse a datetime string from ls -l and return a standard isotime
ls -l returns dates in two different formats:
May 2 09:06 (for dates within 6 months from now)
May 2 2018 (for dates further away)
Best would be to return ls -l --full-time,
but unfortunately we have no control over the remote API
"""
recent_time_format = "%b %d %H:%M"
older_time_format = "%b %d %Y"
try:
date = dt.datetime.strptime(timestring, recent_time_format)
now = dt.datetime.now()
this_year = dt.datetime(year=now.year,
month=date.month, day=date.day,
hour=date.hour, minute=date.minute)
last_year = dt.datetime(year=now.year-1,
month=date.month, day=date.day,
hour=date.hour, minute=date.minute)
delta_this = abs((now-this_year).total_seconds())
delta_last = abs((now-last_year).total_seconds())
print(delta_this, delta_last)
if (delta_this > delta_last):
date = last_year
else:
date = this_year
print(date.microsecond)
except ValueError:
try:
date = dt.datetime.strptime(timestring, older_time_format)
except ValueError:
date = dt.datetime.now()
date = dt.datetime(year=date.year,
month=date.month, day=date.day,
hour=date.hour, minute=date.minute,
second=date.second, microsecond=0)
return date.isoformat()
|
|
b975a67714bd77121bd5289c2de272c280e6c805
|
instrument-classification/extract_features.py
|
instrument-classification/extract_features.py
|
"""
Extracts chromagram features from audio data.
Input: a 2D tensor with set of audio samples
Output: A tensor with a chromagram for each sample
"""
import argparse
import numpy as np
import sys
# TODO: package the chromagram script
sys.path.append('../tools/music-processing-experiments/')
from analysis import split_to_blocks
from files import load_wav
from time_intervals import block_labels
from spectrogram import create_window
from reassignment import chromagram
from generate_audio_samples import SingleToneDataset
def extract_chromagrams(data_dir, block_size, hop_size, bin_range, bin_division):
print('loading dataset from:', data_dir)
dataset = SingleToneDataset(data_dir)
print('dataset shape:', dataset.samples.shape)
window = create_window(block_size)
def compute_chromagram(i):
x = dataset.samples[i]
print('chromagram for audio sample', i)
x_blocks, x_times = split_to_blocks(x, block_size, hop_size, dataset.sample_rate)
return chromagram(x_blocks, window, dataset.sample_rate, to_log=True, bin_range=bin_range, bin_division=bin_division)
chromagrams = np.dstack(compute_chromagram(i)
for i in range(len(dataset.samples)))
chromagrams = np.rollaxis(chromagrams, 2)
print('chomagrams shape:', chromagrams.shape)
chromagram_file = data_dir + '/chromagrams.npz'
print('saving chromagrams to:', chromagram_file)
np.savez_compressed(chromagram_file, chromagrams)
return chromagrams
def parse_args():
parser = argparse.ArgumentParser(description='Extract chromagram features.')
parser.add_argument('data_dir', metavar='DATA_DIR', type=str, help='data directory (both audio/features)')
parser.add_argument('-b', '--block-size', type=int, default=4096, help='block size')
parser.add_argument('-p', '--hop-size', type=int, default=2048, help='hop size')
parser.add_argument('-r', '--bin-range', type=int, nargs=2, default=[-48, 67], help='chromagram bin range')
parser.add_argument('-d', '--bin-division', type=int, default=1, help='bins per semitone in chromagram')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
extract_chromagrams(args.data_dir, args.block_size, args.hop_size, args.bin_range, args.bin_division)
|
Add a script to extract chromagram features from the audio samples.
|
Add a script to extract chromagram features from the audio samples.
|
Python
|
mit
|
bzamecnik/ml,bzamecnik/ml,bzamecnik/ml-playground,bzamecnik/ml,bzamecnik/ml-playground
|
Add a script to extract chromagram features from the audio samples.
|
"""
Extracts chromagram features from audio data.
Input: a 2D tensor with set of audio samples
Output: A tensor with a chromagram for each sample
"""
import argparse
import numpy as np
import sys
# TODO: package the chromagram script
sys.path.append('../tools/music-processing-experiments/')
from analysis import split_to_blocks
from files import load_wav
from time_intervals import block_labels
from spectrogram import create_window
from reassignment import chromagram
from generate_audio_samples import SingleToneDataset
def extract_chromagrams(data_dir, block_size, hop_size, bin_range, bin_division):
print('loading dataset from:', data_dir)
dataset = SingleToneDataset(data_dir)
print('dataset shape:', dataset.samples.shape)
window = create_window(block_size)
def compute_chromagram(i):
x = dataset.samples[i]
print('chromagram for audio sample', i)
x_blocks, x_times = split_to_blocks(x, block_size, hop_size, dataset.sample_rate)
return chromagram(x_blocks, window, dataset.sample_rate, to_log=True, bin_range=bin_range, bin_division=bin_division)
chromagrams = np.dstack(compute_chromagram(i)
for i in range(len(dataset.samples)))
chromagrams = np.rollaxis(chromagrams, 2)
print('chomagrams shape:', chromagrams.shape)
chromagram_file = data_dir + '/chromagrams.npz'
print('saving chromagrams to:', chromagram_file)
np.savez_compressed(chromagram_file, chromagrams)
return chromagrams
def parse_args():
parser = argparse.ArgumentParser(description='Extract chromagram features.')
parser.add_argument('data_dir', metavar='DATA_DIR', type=str, help='data directory (both audio/features)')
parser.add_argument('-b', '--block-size', type=int, default=4096, help='block size')
parser.add_argument('-p', '--hop-size', type=int, default=2048, help='hop size')
parser.add_argument('-r', '--bin-range', type=int, nargs=2, default=[-48, 67], help='chromagram bin range')
parser.add_argument('-d', '--bin-division', type=int, default=1, help='bins per semitone in chromagram')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
extract_chromagrams(args.data_dir, args.block_size, args.hop_size, args.bin_range, args.bin_division)
|
<commit_before><commit_msg>Add a script to extract chromagram features from the audio samples.<commit_after>
|
"""
Extracts chromagram features from audio data.
Input: a 2D tensor with set of audio samples
Output: A tensor with a chromagram for each sample
"""
import argparse
import numpy as np
import sys
# TODO: package the chromagram script
sys.path.append('../tools/music-processing-experiments/')
from analysis import split_to_blocks
from files import load_wav
from time_intervals import block_labels
from spectrogram import create_window
from reassignment import chromagram
from generate_audio_samples import SingleToneDataset
def extract_chromagrams(data_dir, block_size, hop_size, bin_range, bin_division):
print('loading dataset from:', data_dir)
dataset = SingleToneDataset(data_dir)
print('dataset shape:', dataset.samples.shape)
window = create_window(block_size)
def compute_chromagram(i):
x = dataset.samples[i]
print('chromagram for audio sample', i)
x_blocks, x_times = split_to_blocks(x, block_size, hop_size, dataset.sample_rate)
return chromagram(x_blocks, window, dataset.sample_rate, to_log=True, bin_range=bin_range, bin_division=bin_division)
chromagrams = np.dstack(compute_chromagram(i)
for i in range(len(dataset.samples)))
chromagrams = np.rollaxis(chromagrams, 2)
print('chomagrams shape:', chromagrams.shape)
chromagram_file = data_dir + '/chromagrams.npz'
print('saving chromagrams to:', chromagram_file)
np.savez_compressed(chromagram_file, chromagrams)
return chromagrams
def parse_args():
parser = argparse.ArgumentParser(description='Extract chromagram features.')
parser.add_argument('data_dir', metavar='DATA_DIR', type=str, help='data directory (both audio/features)')
parser.add_argument('-b', '--block-size', type=int, default=4096, help='block size')
parser.add_argument('-p', '--hop-size', type=int, default=2048, help='hop size')
parser.add_argument('-r', '--bin-range', type=int, nargs=2, default=[-48, 67], help='chromagram bin range')
parser.add_argument('-d', '--bin-division', type=int, default=1, help='bins per semitone in chromagram')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
extract_chromagrams(args.data_dir, args.block_size, args.hop_size, args.bin_range, args.bin_division)
|
Add a script to extract chromagram features from the audio samples."""
Extracts chromagram features from audio data.
Input: a 2D tensor with set of audio samples
Output: A tensor with a chromagram for each sample
"""
import argparse
import numpy as np
import sys
# TODO: package the chromagram script
sys.path.append('../tools/music-processing-experiments/')
from analysis import split_to_blocks
from files import load_wav
from time_intervals import block_labels
from spectrogram import create_window
from reassignment import chromagram
from generate_audio_samples import SingleToneDataset
def extract_chromagrams(data_dir, block_size, hop_size, bin_range, bin_division):
print('loading dataset from:', data_dir)
dataset = SingleToneDataset(data_dir)
print('dataset shape:', dataset.samples.shape)
window = create_window(block_size)
def compute_chromagram(i):
x = dataset.samples[i]
print('chromagram for audio sample', i)
x_blocks, x_times = split_to_blocks(x, block_size, hop_size, dataset.sample_rate)
return chromagram(x_blocks, window, dataset.sample_rate, to_log=True, bin_range=bin_range, bin_division=bin_division)
chromagrams = np.dstack(compute_chromagram(i)
for i in range(len(dataset.samples)))
chromagrams = np.rollaxis(chromagrams, 2)
print('chomagrams shape:', chromagrams.shape)
chromagram_file = data_dir + '/chromagrams.npz'
print('saving chromagrams to:', chromagram_file)
np.savez_compressed(chromagram_file, chromagrams)
return chromagrams
def parse_args():
parser = argparse.ArgumentParser(description='Extract chromagram features.')
parser.add_argument('data_dir', metavar='DATA_DIR', type=str, help='data directory (both audio/features)')
parser.add_argument('-b', '--block-size', type=int, default=4096, help='block size')
parser.add_argument('-p', '--hop-size', type=int, default=2048, help='hop size')
parser.add_argument('-r', '--bin-range', type=int, nargs=2, default=[-48, 67], help='chromagram bin range')
parser.add_argument('-d', '--bin-division', type=int, default=1, help='bins per semitone in chromagram')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
extract_chromagrams(args.data_dir, args.block_size, args.hop_size, args.bin_range, args.bin_division)
|
<commit_before><commit_msg>Add a script to extract chromagram features from the audio samples.<commit_after>"""
Extracts chromagram features from audio data.
Input: a 2D tensor with set of audio samples
Output: A tensor with a chromagram for each sample
"""
import argparse
import numpy as np
import sys
# TODO: package the chromagram script
sys.path.append('../tools/music-processing-experiments/')
from analysis import split_to_blocks
from files import load_wav
from time_intervals import block_labels
from spectrogram import create_window
from reassignment import chromagram
from generate_audio_samples import SingleToneDataset
def extract_chromagrams(data_dir, block_size, hop_size, bin_range, bin_division):
print('loading dataset from:', data_dir)
dataset = SingleToneDataset(data_dir)
print('dataset shape:', dataset.samples.shape)
window = create_window(block_size)
def compute_chromagram(i):
x = dataset.samples[i]
print('chromagram for audio sample', i)
x_blocks, x_times = split_to_blocks(x, block_size, hop_size, dataset.sample_rate)
return chromagram(x_blocks, window, dataset.sample_rate, to_log=True, bin_range=bin_range, bin_division=bin_division)
chromagrams = np.dstack(compute_chromagram(i)
for i in range(len(dataset.samples)))
chromagrams = np.rollaxis(chromagrams, 2)
print('chomagrams shape:', chromagrams.shape)
chromagram_file = data_dir + '/chromagrams.npz'
print('saving chromagrams to:', chromagram_file)
np.savez_compressed(chromagram_file, chromagrams)
return chromagrams
def parse_args():
parser = argparse.ArgumentParser(description='Extract chromagram features.')
parser.add_argument('data_dir', metavar='DATA_DIR', type=str, help='data directory (both audio/features)')
parser.add_argument('-b', '--block-size', type=int, default=4096, help='block size')
parser.add_argument('-p', '--hop-size', type=int, default=2048, help='hop size')
parser.add_argument('-r', '--bin-range', type=int, nargs=2, default=[-48, 67], help='chromagram bin range')
parser.add_argument('-d', '--bin-division', type=int, default=1, help='bins per semitone in chromagram')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
extract_chromagrams(args.data_dir, args.block_size, args.hop_size, args.bin_range, args.bin_division)
|
|
8fc4d6db21ba3220794dbfb892c5556e28709ee5
|
py/reconstruct-original-digits-from-english.py
|
py/reconstruct-original-digits-from-english.py
|
class Solution(object):
def originalDigits(self, s):
"""
:type s: str
:rtype: str
"""
digits = ['ero', 'one', 'to', 'three', 'for', 'five', 'si', 'seven', 'eiht', 'nine']
alphabets = list(set('zeroonetwothreefourfivesixseveneightnine') - set('guwxz'))
mapping = {x: i for i, x in enumerate(alphabets)}
matrix = [[0] * 10 for _ in xrange(10)]
for i, d in enumerate(digits):
for c in d:
matrix[mapping[c]][i] += 1
vector = [0] * 10
for c in s:
v = mapping.get(c)
if v is not None:
vector[v] += 1
for i in xrange(10):
for j in xrange(i, 10):
if j > 0 and i != j:
matrix[i], matrix[j] = matrix[j], matrix[i]
vector[i], vector[j] = vector[j], vector[i]
if matrix[i][i] > 0:
break
for z in xrange(10):
if z != i and matrix[z][i] != 0:
mz, mi = matrix[z][i], matrix[i][i]
matrix[z] = [v * mi for v in matrix[z]]
matrix[i] = [v * mz for v in matrix[i]]
vz, vi = vector[z], vector[i]
vector[z] *= mi
vector[i] *= mz
matrix[z] = [matrix[z][k] - matrix[i][k] for k in xrange(10)]
vector[z] -= vector[i]
return ''.join(str(a) * b for a, b in zip(xrange(10), [vector[i] / matrix[i][i] for i in xrange(10)]))
|
Add py solution for 423. Reconstruct Original Digits from English
|
Add py solution for 423. Reconstruct Original Digits from English
423. Reconstruct Original Digits from English: https://leetcode.com/problems/reconstruct-original-digits-from-english/
Approach 1:
Use matrix operation to find the answer
P.S. the number in the matrix is so terrible :o
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 423. Reconstruct Original Digits from English
423. Reconstruct Original Digits from English: https://leetcode.com/problems/reconstruct-original-digits-from-english/
Approach 1:
Use matrix operation to find the answer
P.S. the number in the matrix is so terrible :o
|
class Solution(object):
def originalDigits(self, s):
"""
:type s: str
:rtype: str
"""
digits = ['ero', 'one', 'to', 'three', 'for', 'five', 'si', 'seven', 'eiht', 'nine']
alphabets = list(set('zeroonetwothreefourfivesixseveneightnine') - set('guwxz'))
mapping = {x: i for i, x in enumerate(alphabets)}
matrix = [[0] * 10 for _ in xrange(10)]
for i, d in enumerate(digits):
for c in d:
matrix[mapping[c]][i] += 1
vector = [0] * 10
for c in s:
v = mapping.get(c)
if v is not None:
vector[v] += 1
for i in xrange(10):
for j in xrange(i, 10):
if j > 0 and i != j:
matrix[i], matrix[j] = matrix[j], matrix[i]
vector[i], vector[j] = vector[j], vector[i]
if matrix[i][i] > 0:
break
for z in xrange(10):
if z != i and matrix[z][i] != 0:
mz, mi = matrix[z][i], matrix[i][i]
matrix[z] = [v * mi for v in matrix[z]]
matrix[i] = [v * mz for v in matrix[i]]
vz, vi = vector[z], vector[i]
vector[z] *= mi
vector[i] *= mz
matrix[z] = [matrix[z][k] - matrix[i][k] for k in xrange(10)]
vector[z] -= vector[i]
return ''.join(str(a) * b for a, b in zip(xrange(10), [vector[i] / matrix[i][i] for i in xrange(10)]))
|
<commit_before><commit_msg>Add py solution for 423. Reconstruct Original Digits from English
423. Reconstruct Original Digits from English: https://leetcode.com/problems/reconstruct-original-digits-from-english/
Approach 1:
Use matrix operation to find the answer
P.S. the number in the matrix is so terrible :o<commit_after>
|
class Solution(object):
def originalDigits(self, s):
"""
:type s: str
:rtype: str
"""
digits = ['ero', 'one', 'to', 'three', 'for', 'five', 'si', 'seven', 'eiht', 'nine']
alphabets = list(set('zeroonetwothreefourfivesixseveneightnine') - set('guwxz'))
mapping = {x: i for i, x in enumerate(alphabets)}
matrix = [[0] * 10 for _ in xrange(10)]
for i, d in enumerate(digits):
for c in d:
matrix[mapping[c]][i] += 1
vector = [0] * 10
for c in s:
v = mapping.get(c)
if v is not None:
vector[v] += 1
for i in xrange(10):
for j in xrange(i, 10):
if j > 0 and i != j:
matrix[i], matrix[j] = matrix[j], matrix[i]
vector[i], vector[j] = vector[j], vector[i]
if matrix[i][i] > 0:
break
for z in xrange(10):
if z != i and matrix[z][i] != 0:
mz, mi = matrix[z][i], matrix[i][i]
matrix[z] = [v * mi for v in matrix[z]]
matrix[i] = [v * mz for v in matrix[i]]
vz, vi = vector[z], vector[i]
vector[z] *= mi
vector[i] *= mz
matrix[z] = [matrix[z][k] - matrix[i][k] for k in xrange(10)]
vector[z] -= vector[i]
return ''.join(str(a) * b for a, b in zip(xrange(10), [vector[i] / matrix[i][i] for i in xrange(10)]))
|
Add py solution for 423. Reconstruct Original Digits from English
423. Reconstruct Original Digits from English: https://leetcode.com/problems/reconstruct-original-digits-from-english/
Approach 1:
Use matrix operation to find the answer
P.S. the number in the matrix is so terrible :oclass Solution(object):
def originalDigits(self, s):
"""
:type s: str
:rtype: str
"""
digits = ['ero', 'one', 'to', 'three', 'for', 'five', 'si', 'seven', 'eiht', 'nine']
alphabets = list(set('zeroonetwothreefourfivesixseveneightnine') - set('guwxz'))
mapping = {x: i for i, x in enumerate(alphabets)}
matrix = [[0] * 10 for _ in xrange(10)]
for i, d in enumerate(digits):
for c in d:
matrix[mapping[c]][i] += 1
vector = [0] * 10
for c in s:
v = mapping.get(c)
if v is not None:
vector[v] += 1
for i in xrange(10):
for j in xrange(i, 10):
if j > 0 and i != j:
matrix[i], matrix[j] = matrix[j], matrix[i]
vector[i], vector[j] = vector[j], vector[i]
if matrix[i][i] > 0:
break
for z in xrange(10):
if z != i and matrix[z][i] != 0:
mz, mi = matrix[z][i], matrix[i][i]
matrix[z] = [v * mi for v in matrix[z]]
matrix[i] = [v * mz for v in matrix[i]]
vz, vi = vector[z], vector[i]
vector[z] *= mi
vector[i] *= mz
matrix[z] = [matrix[z][k] - matrix[i][k] for k in xrange(10)]
vector[z] -= vector[i]
return ''.join(str(a) * b for a, b in zip(xrange(10), [vector[i] / matrix[i][i] for i in xrange(10)]))
|
<commit_before><commit_msg>Add py solution for 423. Reconstruct Original Digits from English
423. Reconstruct Original Digits from English: https://leetcode.com/problems/reconstruct-original-digits-from-english/
Approach 1:
Use matrix operation to find the answer
P.S. the number in the matrix is so terrible :o<commit_after>class Solution(object):
def originalDigits(self, s):
"""
:type s: str
:rtype: str
"""
digits = ['ero', 'one', 'to', 'three', 'for', 'five', 'si', 'seven', 'eiht', 'nine']
alphabets = list(set('zeroonetwothreefourfivesixseveneightnine') - set('guwxz'))
mapping = {x: i for i, x in enumerate(alphabets)}
matrix = [[0] * 10 for _ in xrange(10)]
for i, d in enumerate(digits):
for c in d:
matrix[mapping[c]][i] += 1
vector = [0] * 10
for c in s:
v = mapping.get(c)
if v is not None:
vector[v] += 1
for i in xrange(10):
for j in xrange(i, 10):
if j > 0 and i != j:
matrix[i], matrix[j] = matrix[j], matrix[i]
vector[i], vector[j] = vector[j], vector[i]
if matrix[i][i] > 0:
break
for z in xrange(10):
if z != i and matrix[z][i] != 0:
mz, mi = matrix[z][i], matrix[i][i]
matrix[z] = [v * mi for v in matrix[z]]
matrix[i] = [v * mz for v in matrix[i]]
vz, vi = vector[z], vector[i]
vector[z] *= mi
vector[i] *= mz
matrix[z] = [matrix[z][k] - matrix[i][k] for k in xrange(10)]
vector[z] -= vector[i]
return ''.join(str(a) * b for a, b in zip(xrange(10), [vector[i] / matrix[i][i] for i in xrange(10)]))
|
|
e3e545418dfab00b5d64530f4b1a7ed36e6b6edb
|
lib/ansible/runner/lookup_plugins/redis_kv.py
|
lib/ansible/runner/lookup_plugins/redis_kv.py
|
# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible import utils, errors
import os
HAVE_REDIS=False
try:
import redis # https://github.com/andymccurdy/redis-py/
HAVE_REDIS=True
except ImportError:
pass
import re
# ==============================================================
# REDISGET: Obtain value from a GET on a Redis key. Terms
# expected: 0 = URL, 1 = Key
# URL may be empty, in which case redis://localhost:6379 assumed
# --------------------------------------------------------------
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
if HAVE_REDIS == False:
raise errors.AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
def run(self, terms, **kwargs):
(url,key) = terms.split(',')
if url == "":
url = 'redis://localhost:6379'
# urlsplit on Python 2.6.1 is broken. Hmm. Probably also the reason
# Redis' from_url() doesn't work here.
p = '(?P<scheme>[^:]+)://?(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*'
try:
m = re.search(p, url)
host = m.group('host')
port = int(m.group('port'))
except AttributeError:
raise errors.AnsibleError("Bad URI in redis lookup")
try:
conn = redis.Redis(host=host, port=port)
res = conn.get(key)
if res is None:
res = ""
return res
except:
return "" # connection failed or key not found
|
Add Redis GET $LOOKUP plugin renamed
|
Add Redis GET $LOOKUP plugin
renamed
|
Python
|
mit
|
thaim/ansible,thaim/ansible
|
Add Redis GET $LOOKUP plugin
renamed
|
# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible import utils, errors
import os
HAVE_REDIS=False
try:
import redis # https://github.com/andymccurdy/redis-py/
HAVE_REDIS=True
except ImportError:
pass
import re
# ==============================================================
# REDISGET: Obtain value from a GET on a Redis key. Terms
# expected: 0 = URL, 1 = Key
# URL may be empty, in which case redis://localhost:6379 assumed
# --------------------------------------------------------------
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
if HAVE_REDIS == False:
raise errors.AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
def run(self, terms, **kwargs):
(url,key) = terms.split(',')
if url == "":
url = 'redis://localhost:6379'
# urlsplit on Python 2.6.1 is broken. Hmm. Probably also the reason
# Redis' from_url() doesn't work here.
p = '(?P<scheme>[^:]+)://?(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*'
try:
m = re.search(p, url)
host = m.group('host')
port = int(m.group('port'))
except AttributeError:
raise errors.AnsibleError("Bad URI in redis lookup")
try:
conn = redis.Redis(host=host, port=port)
res = conn.get(key)
if res is None:
res = ""
return res
except:
return "" # connection failed or key not found
|
<commit_before><commit_msg>Add Redis GET $LOOKUP plugin
renamed<commit_after>
|
# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible import utils, errors
import os
HAVE_REDIS=False
try:
import redis # https://github.com/andymccurdy/redis-py/
HAVE_REDIS=True
except ImportError:
pass
import re
# ==============================================================
# REDISGET: Obtain value from a GET on a Redis key. Terms
# expected: 0 = URL, 1 = Key
# URL may be empty, in which case redis://localhost:6379 assumed
# --------------------------------------------------------------
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
if HAVE_REDIS == False:
raise errors.AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
def run(self, terms, **kwargs):
(url,key) = terms.split(',')
if url == "":
url = 'redis://localhost:6379'
# urlsplit on Python 2.6.1 is broken. Hmm. Probably also the reason
# Redis' from_url() doesn't work here.
p = '(?P<scheme>[^:]+)://?(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*'
try:
m = re.search(p, url)
host = m.group('host')
port = int(m.group('port'))
except AttributeError:
raise errors.AnsibleError("Bad URI in redis lookup")
try:
conn = redis.Redis(host=host, port=port)
res = conn.get(key)
if res is None:
res = ""
return res
except:
return "" # connection failed or key not found
|
Add Redis GET $LOOKUP plugin
renamed# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible import utils, errors
import os
HAVE_REDIS=False
try:
import redis # https://github.com/andymccurdy/redis-py/
HAVE_REDIS=True
except ImportError:
pass
import re
# ==============================================================
# REDISGET: Obtain value from a GET on a Redis key. Terms
# expected: 0 = URL, 1 = Key
# URL may be empty, in which case redis://localhost:6379 assumed
# --------------------------------------------------------------
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
if HAVE_REDIS == False:
raise errors.AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
def run(self, terms, **kwargs):
(url,key) = terms.split(',')
if url == "":
url = 'redis://localhost:6379'
# urlsplit on Python 2.6.1 is broken. Hmm. Probably also the reason
# Redis' from_url() doesn't work here.
p = '(?P<scheme>[^:]+)://?(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*'
try:
m = re.search(p, url)
host = m.group('host')
port = int(m.group('port'))
except AttributeError:
raise errors.AnsibleError("Bad URI in redis lookup")
try:
conn = redis.Redis(host=host, port=port)
res = conn.get(key)
if res is None:
res = ""
return res
except:
return "" # connection failed or key not found
|
<commit_before><commit_msg>Add Redis GET $LOOKUP plugin
renamed<commit_after># (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible import utils, errors
import os
HAVE_REDIS=False
try:
import redis # https://github.com/andymccurdy/redis-py/
HAVE_REDIS=True
except ImportError:
pass
import re
# ==============================================================
# REDISGET: Obtain value from a GET on a Redis key. Terms
# expected: 0 = URL, 1 = Key
# URL may be empty, in which case redis://localhost:6379 assumed
# --------------------------------------------------------------
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
if HAVE_REDIS == False:
raise errors.AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
def run(self, terms, **kwargs):
(url,key) = terms.split(',')
if url == "":
url = 'redis://localhost:6379'
# urlsplit on Python 2.6.1 is broken. Hmm. Probably also the reason
# Redis' from_url() doesn't work here.
p = '(?P<scheme>[^:]+)://?(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*'
try:
m = re.search(p, url)
host = m.group('host')
port = int(m.group('port'))
except AttributeError:
raise errors.AnsibleError("Bad URI in redis lookup")
try:
conn = redis.Redis(host=host, port=port)
res = conn.get(key)
if res is None:
res = ""
return res
except:
return "" # connection failed or key not found
|
|
aaff2c9f78a409ac00b0f8325e9bb29bdee50abc
|
python/print_sideways/ps.py
|
python/print_sideways/ps.py
|
#! /usr/bin/env python
# vim: set ai sw=4:
import sys
import os
import math
import argparse
def l10(num):
return int(math.log10(num))
def ps(num):
for d in xrange(l10(num), -1, -1):
print "".join([chr(ord('0')+((x/(10**d))%10)) for x in xrange(1,num+1)])
def ps_base(num, base):
for d in xrange(int(math.log(num,base)), -1, -1):
def dig(num, base):
if base <= 10 or num < 10:
return chr(ord('0')+num)
else:
return chr(ord('a')+num-10)
print "".join([dig((x/(base**d))%base, base) for x in xrange(1,num+1)])
def do_main(argv=None):
if argv == None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description="Print numbers sideways.")
# parser.add_argument('numbers', metavar='N', type=int, nargs='+')
parser.add_argument('numbers', metavar='N', nargs='+')
parser.add_argument('-b', '--base', type=int, action='store', default=0)
args = parser.parse_args(argv)
# print args
if args.base == 0:
for val in args.numbers:
ps(int(val))
else:
for val in args.numbers:
ps_base(int(val, args.base), args.base)
return 0
if __name__ == '__main__':
sys.exit(do_main())
if len(sys.argv) < 2:
print "Please specify number(s) to print sideways."
sys.exit(1)
for num in sys.argv[1:]:
ps(int(num, 0))
|
Add python program to print numbers sideways.
|
Add python program to print numbers sideways.
|
Python
|
bsd-2-clause
|
tedzo/python_play
|
Add python program to print numbers sideways.
|
#! /usr/bin/env python
# vim: set ai sw=4:
import sys
import os
import math
import argparse
def l10(num):
return int(math.log10(num))
def ps(num):
for d in xrange(l10(num), -1, -1):
print "".join([chr(ord('0')+((x/(10**d))%10)) for x in xrange(1,num+1)])
def ps_base(num, base):
for d in xrange(int(math.log(num,base)), -1, -1):
def dig(num, base):
if base <= 10 or num < 10:
return chr(ord('0')+num)
else:
return chr(ord('a')+num-10)
print "".join([dig((x/(base**d))%base, base) for x in xrange(1,num+1)])
def do_main(argv=None):
if argv == None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description="Print numbers sideways.")
# parser.add_argument('numbers', metavar='N', type=int, nargs='+')
parser.add_argument('numbers', metavar='N', nargs='+')
parser.add_argument('-b', '--base', type=int, action='store', default=0)
args = parser.parse_args(argv)
# print args
if args.base == 0:
for val in args.numbers:
ps(int(val))
else:
for val in args.numbers:
ps_base(int(val, args.base), args.base)
return 0
if __name__ == '__main__':
sys.exit(do_main())
if len(sys.argv) < 2:
print "Please specify number(s) to print sideways."
sys.exit(1)
for num in sys.argv[1:]:
ps(int(num, 0))
|
<commit_before><commit_msg>Add python program to print numbers sideways.<commit_after>
|
#! /usr/bin/env python
# vim: set ai sw=4:
import sys
import os
import math
import argparse
def l10(num):
return int(math.log10(num))
def ps(num):
for d in xrange(l10(num), -1, -1):
print "".join([chr(ord('0')+((x/(10**d))%10)) for x in xrange(1,num+1)])
def ps_base(num, base):
for d in xrange(int(math.log(num,base)), -1, -1):
def dig(num, base):
if base <= 10 or num < 10:
return chr(ord('0')+num)
else:
return chr(ord('a')+num-10)
print "".join([dig((x/(base**d))%base, base) for x in xrange(1,num+1)])
def do_main(argv=None):
if argv == None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description="Print numbers sideways.")
# parser.add_argument('numbers', metavar='N', type=int, nargs='+')
parser.add_argument('numbers', metavar='N', nargs='+')
parser.add_argument('-b', '--base', type=int, action='store', default=0)
args = parser.parse_args(argv)
# print args
if args.base == 0:
for val in args.numbers:
ps(int(val))
else:
for val in args.numbers:
ps_base(int(val, args.base), args.base)
return 0
if __name__ == '__main__':
sys.exit(do_main())
if len(sys.argv) < 2:
print "Please specify number(s) to print sideways."
sys.exit(1)
for num in sys.argv[1:]:
ps(int(num, 0))
|
Add python program to print numbers sideways.#! /usr/bin/env python
# vim: set ai sw=4:
import sys
import os
import math
import argparse
def l10(num):
return int(math.log10(num))
def ps(num):
for d in xrange(l10(num), -1, -1):
print "".join([chr(ord('0')+((x/(10**d))%10)) for x in xrange(1,num+1)])
def ps_base(num, base):
for d in xrange(int(math.log(num,base)), -1, -1):
def dig(num, base):
if base <= 10 or num < 10:
return chr(ord('0')+num)
else:
return chr(ord('a')+num-10)
print "".join([dig((x/(base**d))%base, base) for x in xrange(1,num+1)])
def do_main(argv=None):
if argv == None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description="Print numbers sideways.")
# parser.add_argument('numbers', metavar='N', type=int, nargs='+')
parser.add_argument('numbers', metavar='N', nargs='+')
parser.add_argument('-b', '--base', type=int, action='store', default=0)
args = parser.parse_args(argv)
# print args
if args.base == 0:
for val in args.numbers:
ps(int(val))
else:
for val in args.numbers:
ps_base(int(val, args.base), args.base)
return 0
if __name__ == '__main__':
sys.exit(do_main())
if len(sys.argv) < 2:
print "Please specify number(s) to print sideways."
sys.exit(1)
for num in sys.argv[1:]:
ps(int(num, 0))
|
<commit_before><commit_msg>Add python program to print numbers sideways.<commit_after>#! /usr/bin/env python
# vim: set ai sw=4:
import sys
import os
import math
import argparse
def l10(num):
return int(math.log10(num))
def ps(num):
for d in xrange(l10(num), -1, -1):
print "".join([chr(ord('0')+((x/(10**d))%10)) for x in xrange(1,num+1)])
def ps_base(num, base):
for d in xrange(int(math.log(num,base)), -1, -1):
def dig(num, base):
if base <= 10 or num < 10:
return chr(ord('0')+num)
else:
return chr(ord('a')+num-10)
print "".join([dig((x/(base**d))%base, base) for x in xrange(1,num+1)])
def do_main(argv=None):
if argv == None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description="Print numbers sideways.")
# parser.add_argument('numbers', metavar='N', type=int, nargs='+')
parser.add_argument('numbers', metavar='N', nargs='+')
parser.add_argument('-b', '--base', type=int, action='store', default=0)
args = parser.parse_args(argv)
# print args
if args.base == 0:
for val in args.numbers:
ps(int(val))
else:
for val in args.numbers:
ps_base(int(val, args.base), args.base)
return 0
if __name__ == '__main__':
sys.exit(do_main())
if len(sys.argv) < 2:
print "Please specify number(s) to print sideways."
sys.exit(1)
for num in sys.argv[1:]:
ps(int(num, 0))
|
|
6100d873443581f6b4f34cd0e88150878f3825b2
|
peerinst/migrations/0002_auto_20150430_2108.py
|
peerinst/migrations/0002_auto_20150430_2108.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('peerinst', '0001_squashed_0012_auto_20150423_1146'),
]
operations = [
migrations.AlterField(
model_name='answer',
name='show_to_others',
field=models.BooleanField(default=True, verbose_name='Show to others?'),
),
]
|
Add migration for show_to_others default value.
|
Add migration for show_to_others default value.
|
Python
|
agpl-3.0
|
open-craft/dalite-ng,open-craft/dalite-ng,open-craft/dalite-ng
|
Add migration for show_to_others default value.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('peerinst', '0001_squashed_0012_auto_20150423_1146'),
]
operations = [
migrations.AlterField(
model_name='answer',
name='show_to_others',
field=models.BooleanField(default=True, verbose_name='Show to others?'),
),
]
|
<commit_before><commit_msg>Add migration for show_to_others default value.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('peerinst', '0001_squashed_0012_auto_20150423_1146'),
]
operations = [
migrations.AlterField(
model_name='answer',
name='show_to_others',
field=models.BooleanField(default=True, verbose_name='Show to others?'),
),
]
|
Add migration for show_to_others default value.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('peerinst', '0001_squashed_0012_auto_20150423_1146'),
]
operations = [
migrations.AlterField(
model_name='answer',
name='show_to_others',
field=models.BooleanField(default=True, verbose_name='Show to others?'),
),
]
|
<commit_before><commit_msg>Add migration for show_to_others default value.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('peerinst', '0001_squashed_0012_auto_20150423_1146'),
]
operations = [
migrations.AlterField(
model_name='answer',
name='show_to_others',
field=models.BooleanField(default=True, verbose_name='Show to others?'),
),
]
|
|
fc17d313a296bf8c93fa857bf3cf2ffc775ce102
|
lpthw/ex26.py
|
lpthw/ex26.py
|
def break_words(stuff):
"""This function will break up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Takes in a full sentence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words then prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
print "Let's practice everything."
print 'You\'d need to know \'bout escapes with \\ that do \n newlines and \t tabs.'
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explantion
\n\t\twhere there is none.
"""
print "--------------"
print poem
print "--------------"
five = 10 - 2 + 3 - 5
print "This should be five: %s" % five
def secret_formula(start):
jelly_beans = start * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point)
print "With a starting point of: %d" % start_point
print "We'd have %d beans, %d jars, and %d crates." % (beans, jars, crates)
start_point = start_point / 10
print "We can also do that this way:"
print "We'd have %d beans, %d jars, and %d crates." % secret_formula(start_point)
sentence = "All good things come to those who wait."
words = break_words(sentence)
sorted_words = sort_words(words)
print_first_word(words)
print_last_word(words)
print_first_word(sorted_words)
print_last_word(sorted_words)
sorted_words = sort_sentence(sentence)
print sorted_words
print_first_and_last(sentence)
print_first_and_last_sorted(sentence)
|
Add work for Exercise 26: The Midterm.
|
Add work for Exercise 26: The Midterm.
|
Python
|
mit
|
jaredmanning/learning,jaredmanning/learning
|
Add work for Exercise 26: The Midterm.
|
def break_words(stuff):
"""This function will break up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Takes in a full sentence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words then prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
print "Let's practice everything."
print 'You\'d need to know \'bout escapes with \\ that do \n newlines and \t tabs.'
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explantion
\n\t\twhere there is none.
"""
print "--------------"
print poem
print "--------------"
five = 10 - 2 + 3 - 5
print "This should be five: %s" % five
def secret_formula(start):
jelly_beans = start * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point)
print "With a starting point of: %d" % start_point
print "We'd have %d beans, %d jars, and %d crates." % (beans, jars, crates)
start_point = start_point / 10
print "We can also do that this way:"
print "We'd have %d beans, %d jars, and %d crates." % secret_formula(start_point)
sentence = "All good things come to those who wait."
words = break_words(sentence)
sorted_words = sort_words(words)
print_first_word(words)
print_last_word(words)
print_first_word(sorted_words)
print_last_word(sorted_words)
sorted_words = sort_sentence(sentence)
print sorted_words
print_first_and_last(sentence)
print_first_and_last_sorted(sentence)
|
<commit_before><commit_msg>Add work for Exercise 26: The Midterm.<commit_after>
|
def break_words(stuff):
"""This function will break up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Takes in a full sentence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words then prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
print "Let's practice everything."
print 'You\'d need to know \'bout escapes with \\ that do \n newlines and \t tabs.'
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explantion
\n\t\twhere there is none.
"""
print "--------------"
print poem
print "--------------"
five = 10 - 2 + 3 - 5
print "This should be five: %s" % five
def secret_formula(start):
jelly_beans = start * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point)
print "With a starting point of: %d" % start_point
print "We'd have %d beans, %d jars, and %d crates." % (beans, jars, crates)
start_point = start_point / 10
print "We can also do that this way:"
print "We'd have %d beans, %d jars, and %d crates." % secret_formula(start_point)
sentence = "All good things come to those who wait."
words = break_words(sentence)
sorted_words = sort_words(words)
print_first_word(words)
print_last_word(words)
print_first_word(sorted_words)
print_last_word(sorted_words)
sorted_words = sort_sentence(sentence)
print sorted_words
print_first_and_last(sentence)
print_first_and_last_sorted(sentence)
|
Add work for Exercise 26: The Midterm.def break_words(stuff):
"""This function will break up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Takes in a full sentence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words then prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
print "Let's practice everything."
print 'You\'d need to know \'bout escapes with \\ that do \n newlines and \t tabs.'
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explantion
\n\t\twhere there is none.
"""
print "--------------"
print poem
print "--------------"
five = 10 - 2 + 3 - 5
print "This should be five: %s" % five
def secret_formula(start):
jelly_beans = start * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point)
print "With a starting point of: %d" % start_point
print "We'd have %d beans, %d jars, and %d crates." % (beans, jars, crates)
start_point = start_point / 10
print "We can also do that this way:"
print "We'd have %d beans, %d jars, and %d crates." % secret_formula(start_point)
sentence = "All good things come to those who wait."
words = break_words(sentence)
sorted_words = sort_words(words)
print_first_word(words)
print_last_word(words)
print_first_word(sorted_words)
print_last_word(sorted_words)
sorted_words = sort_sentence(sentence)
print sorted_words
print_first_and_last(sentence)
print_first_and_last_sorted(sentence)
|
<commit_before><commit_msg>Add work for Exercise 26: The Midterm.<commit_after>def break_words(stuff):
"""This function will break up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Takes in a full sentence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words then prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
print "Let's practice everything."
print 'You\'d need to know \'bout escapes with \\ that do \n newlines and \t tabs.'
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explantion
\n\t\twhere there is none.
"""
print "--------------"
print poem
print "--------------"
five = 10 - 2 + 3 - 5
print "This should be five: %s" % five
def secret_formula(start):
jelly_beans = start * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point)
print "With a starting point of: %d" % start_point
print "We'd have %d beans, %d jars, and %d crates." % (beans, jars, crates)
start_point = start_point / 10
print "We can also do that this way:"
print "We'd have %d beans, %d jars, and %d crates." % secret_formula(start_point)
sentence = "All good things come to those who wait."
words = break_words(sentence)
sorted_words = sort_words(words)
print_first_word(words)
print_last_word(words)
print_first_word(sorted_words)
print_last_word(sorted_words)
sorted_words = sort_sentence(sentence)
print sorted_words
print_first_and_last(sentence)
print_first_and_last_sorted(sentence)
|
|
7b828fedc64fccff23a85f6778add58ed93c1fff
|
kodi-stable/parts/plugins/x_kodiautotools.py
|
kodi-stable/parts/plugins/x_kodiautotools.py
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
import snapcraft
from snapcraft.plugins import autotools
class KodiAutotoolsPlugin(autotools.AutotoolsPlugin):
def build(self):
# setup build directory
super(autotools.AutotoolsPlugin, self).build()
# run boostrap before autotools build
self.run(['./bootstrap'])
# the plugins hooks are not idemnpotent, where they should be.
# so we need to answer that calling the autotools plugins won't
# retrigger BasePlugin build() which erase the directory.
# However the issue with this hack is that other parts from this
# project will be impacted if they are instantiated after this
# method is ran, which is unlikely, but still possible.
# https://bugs.launchpad.net/snapcraft/+bug/1595964.
snapcraft.BasePlugin.build = lambda self: None
super().build()
|
Add missing custom plugins python code.
|
Add missing custom plugins python code.
|
Python
|
mit
|
jamestait/snappy-playpen,elopio/snappy-playpen,elopio/snappy-playpen,elopio/snappy-playpen,ubuntu/snappy-playpen,elopio/snappy-playpen,jamestait/snappy-playpen,ubuntu/snappy-playpen,jamestait/snappy-playpen,Zap123/snappy-playpen,jamestait/snappy-playpen,Zap123/snappy-playpen,ubuntu/snappy-playpen,ubuntu/snappy-playpen
|
Add missing custom plugins python code.
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
import snapcraft
from snapcraft.plugins import autotools
class KodiAutotoolsPlugin(autotools.AutotoolsPlugin):
def build(self):
# setup build directory
super(autotools.AutotoolsPlugin, self).build()
# run boostrap before autotools build
self.run(['./bootstrap'])
# the plugins hooks are not idemnpotent, where they should be.
# so we need to answer that calling the autotools plugins won't
# retrigger BasePlugin build() which erase the directory.
# However the issue with this hack is that other parts from this
# project will be impacted if they are instantiated after this
# method is ran, which is unlikely, but still possible.
# https://bugs.launchpad.net/snapcraft/+bug/1595964.
snapcraft.BasePlugin.build = lambda self: None
super().build()
|
<commit_before><commit_msg>Add missing custom plugins python code.<commit_after>
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
import snapcraft
from snapcraft.plugins import autotools
class KodiAutotoolsPlugin(autotools.AutotoolsPlugin):
def build(self):
# setup build directory
super(autotools.AutotoolsPlugin, self).build()
# run boostrap before autotools build
self.run(['./bootstrap'])
# the plugins hooks are not idemnpotent, where they should be.
# so we need to answer that calling the autotools plugins won't
# retrigger BasePlugin build() which erase the directory.
# However the issue with this hack is that other parts from this
# project will be impacted if they are instantiated after this
# method is ran, which is unlikely, but still possible.
# https://bugs.launchpad.net/snapcraft/+bug/1595964.
snapcraft.BasePlugin.build = lambda self: None
super().build()
|
Add missing custom plugins python code.# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
import snapcraft
from snapcraft.plugins import autotools
class KodiAutotoolsPlugin(autotools.AutotoolsPlugin):
def build(self):
# setup build directory
super(autotools.AutotoolsPlugin, self).build()
# run boostrap before autotools build
self.run(['./bootstrap'])
# the plugins hooks are not idemnpotent, where they should be.
# so we need to answer that calling the autotools plugins won't
# retrigger BasePlugin build() which erase the directory.
# However the issue with this hack is that other parts from this
# project will be impacted if they are instantiated after this
# method is ran, which is unlikely, but still possible.
# https://bugs.launchpad.net/snapcraft/+bug/1595964.
snapcraft.BasePlugin.build = lambda self: None
super().build()
|
<commit_before><commit_msg>Add missing custom plugins python code.<commit_after># -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
import snapcraft
from snapcraft.plugins import autotools
class KodiAutotoolsPlugin(autotools.AutotoolsPlugin):
def build(self):
# setup build directory
super(autotools.AutotoolsPlugin, self).build()
# run boostrap before autotools build
self.run(['./bootstrap'])
# the plugins hooks are not idemnpotent, where they should be.
# so we need to answer that calling the autotools plugins won't
# retrigger BasePlugin build() which erase the directory.
# However the issue with this hack is that other parts from this
# project will be impacted if they are instantiated after this
# method is ran, which is unlikely, but still possible.
# https://bugs.launchpad.net/snapcraft/+bug/1595964.
snapcraft.BasePlugin.build = lambda self: None
super().build()
|
|
2f9d6dd3fa112844fd605348605673dc0c2a8b5f
|
tests/twisted/avahitest.py
|
tests/twisted/avahitest.py
|
"""
Infrastructure for testing avahi
"""
import servicetest
from servicetest import Event
import dbus
import dbus.glib
import avahi
def get_host_name():
bus = dbus.SystemBus()
server = dbus.Interface(bus.get_object(avahi.DBUS_NAME,
avahi.DBUS_PATH_SERVER), avahi.DBUS_INTERFACE_SERVER)
return server.GetHostName()
class AvahiListener:
def __init__(self, event_queue):
self.event_queue = event_queue
self.bus = dbus.SystemBus()
self.server = dbus.Interface(self.bus.get_object(avahi.DBUS_NAME,
avahi.DBUS_PATH_SERVER), avahi.DBUS_INTERFACE_SERVER)
self.browsers = []
def _service_added_cb(self, interface, protocol, name, stype, domain,
flags):
e = Event ('service-added',
interface=interface, protocol=protocol, name=name, stype=stype,
domain=domain, flags=flags)
self.event_queue.append(e)
def _service_removed_cb(self, interface, protocol, name, stype, domain,
flags):
e = Event ('service-removed',
interface=interface, protocol=protocol, name=name, stype=stype,
domain=domain, flags=flags)
self.event_queue.append(e)
def listen_for_service(self, sname):
browser_path = self.server.ServiceBrowserNew(avahi.IF_UNSPEC,
avahi.PROTO_UNSPEC, sname, "local", dbus.UInt32(0));
browser_obj = self.bus.get_object(avahi.DBUS_NAME, browser_path)
browser = dbus.Interface(browser_obj,
avahi.DBUS_INTERFACE_SERVICE_BROWSER)
browser.connect_to_signal('ItemNew', self._service_added_cb)
browser.connect_to_signal('ItemRemoved', self._service_removed_cb)
self.browsers.append(browser)
|
Add some avahi testing utilities
|
Add some avahi testing utilities
|
Python
|
lgpl-2.1
|
freedesktop-unofficial-mirror/telepathy__telepathy-salut,freedesktop-unofficial-mirror/telepathy__telepathy-salut,freedesktop-unofficial-mirror/telepathy__telepathy-salut,freedesktop-unofficial-mirror/telepathy__telepathy-salut
|
Add some avahi testing utilities
|
"""
Infrastructure for testing avahi
"""
import servicetest
from servicetest import Event
import dbus
import dbus.glib
import avahi
def get_host_name():
bus = dbus.SystemBus()
server = dbus.Interface(bus.get_object(avahi.DBUS_NAME,
avahi.DBUS_PATH_SERVER), avahi.DBUS_INTERFACE_SERVER)
return server.GetHostName()
class AvahiListener:
def __init__(self, event_queue):
self.event_queue = event_queue
self.bus = dbus.SystemBus()
self.server = dbus.Interface(self.bus.get_object(avahi.DBUS_NAME,
avahi.DBUS_PATH_SERVER), avahi.DBUS_INTERFACE_SERVER)
self.browsers = []
def _service_added_cb(self, interface, protocol, name, stype, domain,
flags):
e = Event ('service-added',
interface=interface, protocol=protocol, name=name, stype=stype,
domain=domain, flags=flags)
self.event_queue.append(e)
def _service_removed_cb(self, interface, protocol, name, stype, domain,
flags):
e = Event ('service-removed',
interface=interface, protocol=protocol, name=name, stype=stype,
domain=domain, flags=flags)
self.event_queue.append(e)
def listen_for_service(self, sname):
browser_path = self.server.ServiceBrowserNew(avahi.IF_UNSPEC,
avahi.PROTO_UNSPEC, sname, "local", dbus.UInt32(0));
browser_obj = self.bus.get_object(avahi.DBUS_NAME, browser_path)
browser = dbus.Interface(browser_obj,
avahi.DBUS_INTERFACE_SERVICE_BROWSER)
browser.connect_to_signal('ItemNew', self._service_added_cb)
browser.connect_to_signal('ItemRemoved', self._service_removed_cb)
self.browsers.append(browser)
|
<commit_before><commit_msg>Add some avahi testing utilities<commit_after>
|
"""
Infrastructure for testing avahi
"""
import servicetest
from servicetest import Event
import dbus
import dbus.glib
import avahi
def get_host_name():
bus = dbus.SystemBus()
server = dbus.Interface(bus.get_object(avahi.DBUS_NAME,
avahi.DBUS_PATH_SERVER), avahi.DBUS_INTERFACE_SERVER)
return server.GetHostName()
class AvahiListener:
def __init__(self, event_queue):
self.event_queue = event_queue
self.bus = dbus.SystemBus()
self.server = dbus.Interface(self.bus.get_object(avahi.DBUS_NAME,
avahi.DBUS_PATH_SERVER), avahi.DBUS_INTERFACE_SERVER)
self.browsers = []
def _service_added_cb(self, interface, protocol, name, stype, domain,
flags):
e = Event ('service-added',
interface=interface, protocol=protocol, name=name, stype=stype,
domain=domain, flags=flags)
self.event_queue.append(e)
def _service_removed_cb(self, interface, protocol, name, stype, domain,
flags):
e = Event ('service-removed',
interface=interface, protocol=protocol, name=name, stype=stype,
domain=domain, flags=flags)
self.event_queue.append(e)
def listen_for_service(self, sname):
browser_path = self.server.ServiceBrowserNew(avahi.IF_UNSPEC,
avahi.PROTO_UNSPEC, sname, "local", dbus.UInt32(0));
browser_obj = self.bus.get_object(avahi.DBUS_NAME, browser_path)
browser = dbus.Interface(browser_obj,
avahi.DBUS_INTERFACE_SERVICE_BROWSER)
browser.connect_to_signal('ItemNew', self._service_added_cb)
browser.connect_to_signal('ItemRemoved', self._service_removed_cb)
self.browsers.append(browser)
|
Add some avahi testing utilities"""
Infrastructure for testing avahi
"""
import servicetest
from servicetest import Event
import dbus
import dbus.glib
import avahi
def get_host_name():
bus = dbus.SystemBus()
server = dbus.Interface(bus.get_object(avahi.DBUS_NAME,
avahi.DBUS_PATH_SERVER), avahi.DBUS_INTERFACE_SERVER)
return server.GetHostName()
class AvahiListener:
def __init__(self, event_queue):
self.event_queue = event_queue
self.bus = dbus.SystemBus()
self.server = dbus.Interface(self.bus.get_object(avahi.DBUS_NAME,
avahi.DBUS_PATH_SERVER), avahi.DBUS_INTERFACE_SERVER)
self.browsers = []
def _service_added_cb(self, interface, protocol, name, stype, domain,
flags):
e = Event ('service-added',
interface=interface, protocol=protocol, name=name, stype=stype,
domain=domain, flags=flags)
self.event_queue.append(e)
def _service_removed_cb(self, interface, protocol, name, stype, domain,
flags):
e = Event ('service-removed',
interface=interface, protocol=protocol, name=name, stype=stype,
domain=domain, flags=flags)
self.event_queue.append(e)
def listen_for_service(self, sname):
browser_path = self.server.ServiceBrowserNew(avahi.IF_UNSPEC,
avahi.PROTO_UNSPEC, sname, "local", dbus.UInt32(0));
browser_obj = self.bus.get_object(avahi.DBUS_NAME, browser_path)
browser = dbus.Interface(browser_obj,
avahi.DBUS_INTERFACE_SERVICE_BROWSER)
browser.connect_to_signal('ItemNew', self._service_added_cb)
browser.connect_to_signal('ItemRemoved', self._service_removed_cb)
self.browsers.append(browser)
|
<commit_before><commit_msg>Add some avahi testing utilities<commit_after>"""
Infrastructure for testing avahi
"""
import servicetest
from servicetest import Event
import dbus
import dbus.glib
import avahi
def get_host_name():
bus = dbus.SystemBus()
server = dbus.Interface(bus.get_object(avahi.DBUS_NAME,
avahi.DBUS_PATH_SERVER), avahi.DBUS_INTERFACE_SERVER)
return server.GetHostName()
class AvahiListener:
def __init__(self, event_queue):
self.event_queue = event_queue
self.bus = dbus.SystemBus()
self.server = dbus.Interface(self.bus.get_object(avahi.DBUS_NAME,
avahi.DBUS_PATH_SERVER), avahi.DBUS_INTERFACE_SERVER)
self.browsers = []
def _service_added_cb(self, interface, protocol, name, stype, domain,
flags):
e = Event ('service-added',
interface=interface, protocol=protocol, name=name, stype=stype,
domain=domain, flags=flags)
self.event_queue.append(e)
def _service_removed_cb(self, interface, protocol, name, stype, domain,
flags):
e = Event ('service-removed',
interface=interface, protocol=protocol, name=name, stype=stype,
domain=domain, flags=flags)
self.event_queue.append(e)
def listen_for_service(self, sname):
browser_path = self.server.ServiceBrowserNew(avahi.IF_UNSPEC,
avahi.PROTO_UNSPEC, sname, "local", dbus.UInt32(0));
browser_obj = self.bus.get_object(avahi.DBUS_NAME, browser_path)
browser = dbus.Interface(browser_obj,
avahi.DBUS_INTERFACE_SERVICE_BROWSER)
browser.connect_to_signal('ItemNew', self._service_added_cb)
browser.connect_to_signal('ItemRemoved', self._service_removed_cb)
self.browsers.append(browser)
|
|
ab2573d6a25a91f1a61e3cd5c5eb69682653ab0c
|
src/postgres/migrations/0004_user_generated_id.py
|
src/postgres/migrations/0004_user_generated_id.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
CREATE_USER_SCHEMA = """
CREATE FUNCTION users_user_id_generator(OUT result bigint) AS $$
DECLARE
-- 2015-08-19T00:00:00Z. This is arbitrarily chosen; anything is fine
-- as long as it is a not-too-distant past.
our_epoch bigint := 1449083752000;
seq_id bigint;
now_millis bigint;
BEGIN
SELECT nextval('users_user_id_seq') % (1 << 23)
INTO seq_id;
SELECT FLOOR(EXTRACT(EPOCH FROM clock_timestamp()) * 1000)
INTO now_millis;
result := (now_millis - our_epoch) << 23;
result := result | seq_id;
END;
$$ LANGUAGE PLPGSQL;
"""
DROP_USER_SCHEMA = 'DROP FUNCTION users_user_id_generator();'
SET_ID_FIELD_BIGINT_DEFAULT = """
ALTER TABLE "users_user" ALTER COLUMN "id"
SET DATA TYPE bigint;
ALTER TABLE "users_user" ALTER COLUMN "id"
SET DEFAULT users_user_id_generator();
"""
DROP_ID_FIELD_BIGINT_DEFAULT = """
ALTER TABLE "users_user" ALTER COLUMN "id"
SET DEFAULT nextval('users_user_id_seq');
ALTER TABLE "users_user" ALTER COLUMN "id"
SET DATA TYPE integer USING "id" % (1 << 23);
"""
class Migration(migrations.Migration):
dependencies = [
('postgres', '0003_rename_proposal_generated_id'),
('users', '0005_auto_20151225_1353'),
]
operations = [
migrations.RunSQL(
CREATE_USER_SCHEMA,
DROP_USER_SCHEMA,
),
migrations.RunSQL(
SET_ID_FIELD_BIGINT_DEFAULT,
DROP_ID_FIELD_BIGINT_DEFAULT,
),
]
|
Use Postgres long ID on user model
|
Use Postgres long ID on user model
|
Python
|
mit
|
uranusjr/pycontw2016,pycontw/pycontw2016,pycontw/pycontw2016,pycontw/pycontw2016,uranusjr/pycontw2016,pycontw/pycontw2016,uranusjr/pycontw2016,uranusjr/pycontw2016
|
Use Postgres long ID on user model
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
CREATE_USER_SCHEMA = """
CREATE FUNCTION users_user_id_generator(OUT result bigint) AS $$
DECLARE
-- 2015-08-19T00:00:00Z. This is arbitrarily chosen; anything is fine
-- as long as it is a not-too-distant past.
our_epoch bigint := 1449083752000;
seq_id bigint;
now_millis bigint;
BEGIN
SELECT nextval('users_user_id_seq') % (1 << 23)
INTO seq_id;
SELECT FLOOR(EXTRACT(EPOCH FROM clock_timestamp()) * 1000)
INTO now_millis;
result := (now_millis - our_epoch) << 23;
result := result | seq_id;
END;
$$ LANGUAGE PLPGSQL;
"""
DROP_USER_SCHEMA = 'DROP FUNCTION users_user_id_generator();'
SET_ID_FIELD_BIGINT_DEFAULT = """
ALTER TABLE "users_user" ALTER COLUMN "id"
SET DATA TYPE bigint;
ALTER TABLE "users_user" ALTER COLUMN "id"
SET DEFAULT users_user_id_generator();
"""
DROP_ID_FIELD_BIGINT_DEFAULT = """
ALTER TABLE "users_user" ALTER COLUMN "id"
SET DEFAULT nextval('users_user_id_seq');
ALTER TABLE "users_user" ALTER COLUMN "id"
SET DATA TYPE integer USING "id" % (1 << 23);
"""
class Migration(migrations.Migration):
dependencies = [
('postgres', '0003_rename_proposal_generated_id'),
('users', '0005_auto_20151225_1353'),
]
operations = [
migrations.RunSQL(
CREATE_USER_SCHEMA,
DROP_USER_SCHEMA,
),
migrations.RunSQL(
SET_ID_FIELD_BIGINT_DEFAULT,
DROP_ID_FIELD_BIGINT_DEFAULT,
),
]
|
<commit_before><commit_msg>Use Postgres long ID on user model<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
CREATE_USER_SCHEMA = """
CREATE FUNCTION users_user_id_generator(OUT result bigint) AS $$
DECLARE
-- 2015-08-19T00:00:00Z. This is arbitrarily chosen; anything is fine
-- as long as it is a not-too-distant past.
our_epoch bigint := 1449083752000;
seq_id bigint;
now_millis bigint;
BEGIN
SELECT nextval('users_user_id_seq') % (1 << 23)
INTO seq_id;
SELECT FLOOR(EXTRACT(EPOCH FROM clock_timestamp()) * 1000)
INTO now_millis;
result := (now_millis - our_epoch) << 23;
result := result | seq_id;
END;
$$ LANGUAGE PLPGSQL;
"""
DROP_USER_SCHEMA = 'DROP FUNCTION users_user_id_generator();'
SET_ID_FIELD_BIGINT_DEFAULT = """
ALTER TABLE "users_user" ALTER COLUMN "id"
SET DATA TYPE bigint;
ALTER TABLE "users_user" ALTER COLUMN "id"
SET DEFAULT users_user_id_generator();
"""
DROP_ID_FIELD_BIGINT_DEFAULT = """
ALTER TABLE "users_user" ALTER COLUMN "id"
SET DEFAULT nextval('users_user_id_seq');
ALTER TABLE "users_user" ALTER COLUMN "id"
SET DATA TYPE integer USING "id" % (1 << 23);
"""
class Migration(migrations.Migration):
dependencies = [
('postgres', '0003_rename_proposal_generated_id'),
('users', '0005_auto_20151225_1353'),
]
operations = [
migrations.RunSQL(
CREATE_USER_SCHEMA,
DROP_USER_SCHEMA,
),
migrations.RunSQL(
SET_ID_FIELD_BIGINT_DEFAULT,
DROP_ID_FIELD_BIGINT_DEFAULT,
),
]
|
Use Postgres long ID on user model#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
CREATE_USER_SCHEMA = """
CREATE FUNCTION users_user_id_generator(OUT result bigint) AS $$
DECLARE
-- 2015-08-19T00:00:00Z. This is arbitrarily chosen; anything is fine
-- as long as it is a not-too-distant past.
our_epoch bigint := 1449083752000;
seq_id bigint;
now_millis bigint;
BEGIN
SELECT nextval('users_user_id_seq') % (1 << 23)
INTO seq_id;
SELECT FLOOR(EXTRACT(EPOCH FROM clock_timestamp()) * 1000)
INTO now_millis;
result := (now_millis - our_epoch) << 23;
result := result | seq_id;
END;
$$ LANGUAGE PLPGSQL;
"""
DROP_USER_SCHEMA = 'DROP FUNCTION users_user_id_generator();'
SET_ID_FIELD_BIGINT_DEFAULT = """
ALTER TABLE "users_user" ALTER COLUMN "id"
SET DATA TYPE bigint;
ALTER TABLE "users_user" ALTER COLUMN "id"
SET DEFAULT users_user_id_generator();
"""
DROP_ID_FIELD_BIGINT_DEFAULT = """
ALTER TABLE "users_user" ALTER COLUMN "id"
SET DEFAULT nextval('users_user_id_seq');
ALTER TABLE "users_user" ALTER COLUMN "id"
SET DATA TYPE integer USING "id" % (1 << 23);
"""
class Migration(migrations.Migration):
dependencies = [
('postgres', '0003_rename_proposal_generated_id'),
('users', '0005_auto_20151225_1353'),
]
operations = [
migrations.RunSQL(
CREATE_USER_SCHEMA,
DROP_USER_SCHEMA,
),
migrations.RunSQL(
SET_ID_FIELD_BIGINT_DEFAULT,
DROP_ID_FIELD_BIGINT_DEFAULT,
),
]
|
<commit_before><commit_msg>Use Postgres long ID on user model<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
CREATE_USER_SCHEMA = """
CREATE FUNCTION users_user_id_generator(OUT result bigint) AS $$
DECLARE
-- 2015-08-19T00:00:00Z. This is arbitrarily chosen; anything is fine
-- as long as it is a not-too-distant past.
our_epoch bigint := 1449083752000;
seq_id bigint;
now_millis bigint;
BEGIN
SELECT nextval('users_user_id_seq') % (1 << 23)
INTO seq_id;
SELECT FLOOR(EXTRACT(EPOCH FROM clock_timestamp()) * 1000)
INTO now_millis;
result := (now_millis - our_epoch) << 23;
result := result | seq_id;
END;
$$ LANGUAGE PLPGSQL;
"""
DROP_USER_SCHEMA = 'DROP FUNCTION users_user_id_generator();'
SET_ID_FIELD_BIGINT_DEFAULT = """
ALTER TABLE "users_user" ALTER COLUMN "id"
SET DATA TYPE bigint;
ALTER TABLE "users_user" ALTER COLUMN "id"
SET DEFAULT users_user_id_generator();
"""
DROP_ID_FIELD_BIGINT_DEFAULT = """
ALTER TABLE "users_user" ALTER COLUMN "id"
SET DEFAULT nextval('users_user_id_seq');
ALTER TABLE "users_user" ALTER COLUMN "id"
SET DATA TYPE integer USING "id" % (1 << 23);
"""
class Migration(migrations.Migration):
dependencies = [
('postgres', '0003_rename_proposal_generated_id'),
('users', '0005_auto_20151225_1353'),
]
operations = [
migrations.RunSQL(
CREATE_USER_SCHEMA,
DROP_USER_SCHEMA,
),
migrations.RunSQL(
SET_ID_FIELD_BIGINT_DEFAULT,
DROP_ID_FIELD_BIGINT_DEFAULT,
),
]
|
|
5bd167e0e7497ced568cf0e34c51df4a1f0a391d
|
docs/tutorial/src/python-types/tutorial002.py
|
docs/tutorial/src/python-types/tutorial002.py
|
def get_full_name(first_name: str, last_name: str):
full_name = first_name.title() + " " + last_name.title()
return full_name
print(get_full_name("john", "doe"))
|
Add second tutorial src for python-types
|
:memo: Add second tutorial src for python-types
|
Python
|
mit
|
tiangolo/fastapi,tiangolo/fastapi,tiangolo/fastapi
|
:memo: Add second tutorial src for python-types
|
def get_full_name(first_name: str, last_name: str):
full_name = first_name.title() + " " + last_name.title()
return full_name
print(get_full_name("john", "doe"))
|
<commit_before><commit_msg>:memo: Add second tutorial src for python-types<commit_after>
|
def get_full_name(first_name: str, last_name: str):
full_name = first_name.title() + " " + last_name.title()
return full_name
print(get_full_name("john", "doe"))
|
:memo: Add second tutorial src for python-typesdef get_full_name(first_name: str, last_name: str):
full_name = first_name.title() + " " + last_name.title()
return full_name
print(get_full_name("john", "doe"))
|
<commit_before><commit_msg>:memo: Add second tutorial src for python-types<commit_after>def get_full_name(first_name: str, last_name: str):
full_name = first_name.title() + " " + last_name.title()
return full_name
print(get_full_name("john", "doe"))
|
|
59a2712f353f47e3dc237479cc6cc46666b7d0f1
|
energy_demand/scripts/generate_dummy_data.py
|
energy_demand/scripts/generate_dummy_data.py
|
"""Generate dummy data to use in smif as scenario data for testing
"""
from pprint import pprint
from energy_demand.read_write.data_loader import dummy_data_generation
import yaml
def main():
"""Generate and write out data
"""
data = dummy_data_generation({
'sim_param': {
'base_yr': 2010,
'end_yr': 2050
}
})
pprint(data)
# regions
# gva : year x region
gva = []
for year, region_value in data['GVA'].items():
for region, value in region_value.items():
gva.append({
'interval': 1,
'year': year,
'region': region,
'value': value
})
with open('gva.yaml', 'w') as file_handle:
yaml.dump(gva, file_handle)
# population : year x region
population = []
for year, region_value in data['population'].items():
for region, value in region_value.items():
population.append({
'interval': 1,
'year': year,
'region': region,
'value': value
})
with open('population.yaml', 'w') as file_handle:
yaml.dump(population, file_handle)
# residential_floor_area (rs_floorarea) : year x region
residential_floor_area = []
for year, region_value in data['rs_floorarea'].items():
for region, value in region_value.items():
residential_floor_area.append({
'interval': 1,
'year': year,
'region': region,
'value': value
})
with open('residential_floor_area.yaml', 'w') as file_handle:
yaml.dump(residential_floor_area, file_handle)
if __name__ == '__main__':
main()
|
Write dummy data out to yaml
|
Write dummy data out to yaml
|
Python
|
mit
|
nismod/energy_demand,nismod/energy_demand
|
Write dummy data out to yaml
|
"""Generate dummy data to use in smif as scenario data for testing
"""
from pprint import pprint
from energy_demand.read_write.data_loader import dummy_data_generation
import yaml
def main():
"""Generate and write out data
"""
data = dummy_data_generation({
'sim_param': {
'base_yr': 2010,
'end_yr': 2050
}
})
pprint(data)
# regions
# gva : year x region
gva = []
for year, region_value in data['GVA'].items():
for region, value in region_value.items():
gva.append({
'interval': 1,
'year': year,
'region': region,
'value': value
})
with open('gva.yaml', 'w') as file_handle:
yaml.dump(gva, file_handle)
# population : year x region
population = []
for year, region_value in data['population'].items():
for region, value in region_value.items():
population.append({
'interval': 1,
'year': year,
'region': region,
'value': value
})
with open('population.yaml', 'w') as file_handle:
yaml.dump(population, file_handle)
# residential_floor_area (rs_floorarea) : year x region
residential_floor_area = []
for year, region_value in data['rs_floorarea'].items():
for region, value in region_value.items():
residential_floor_area.append({
'interval': 1,
'year': year,
'region': region,
'value': value
})
with open('residential_floor_area.yaml', 'w') as file_handle:
yaml.dump(residential_floor_area, file_handle)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Write dummy data out to yaml<commit_after>
|
"""Generate dummy data to use in smif as scenario data for testing
"""
from pprint import pprint
from energy_demand.read_write.data_loader import dummy_data_generation
import yaml
def main():
"""Generate and write out data
"""
data = dummy_data_generation({
'sim_param': {
'base_yr': 2010,
'end_yr': 2050
}
})
pprint(data)
# regions
# gva : year x region
gva = []
for year, region_value in data['GVA'].items():
for region, value in region_value.items():
gva.append({
'interval': 1,
'year': year,
'region': region,
'value': value
})
with open('gva.yaml', 'w') as file_handle:
yaml.dump(gva, file_handle)
# population : year x region
population = []
for year, region_value in data['population'].items():
for region, value in region_value.items():
population.append({
'interval': 1,
'year': year,
'region': region,
'value': value
})
with open('population.yaml', 'w') as file_handle:
yaml.dump(population, file_handle)
# residential_floor_area (rs_floorarea) : year x region
residential_floor_area = []
for year, region_value in data['rs_floorarea'].items():
for region, value in region_value.items():
residential_floor_area.append({
'interval': 1,
'year': year,
'region': region,
'value': value
})
with open('residential_floor_area.yaml', 'w') as file_handle:
yaml.dump(residential_floor_area, file_handle)
if __name__ == '__main__':
main()
|
Write dummy data out to yaml"""Generate dummy data to use in smif as scenario data for testing
"""
from pprint import pprint
from energy_demand.read_write.data_loader import dummy_data_generation
import yaml
def main():
"""Generate and write out data
"""
data = dummy_data_generation({
'sim_param': {
'base_yr': 2010,
'end_yr': 2050
}
})
pprint(data)
# regions
# gva : year x region
gva = []
for year, region_value in data['GVA'].items():
for region, value in region_value.items():
gva.append({
'interval': 1,
'year': year,
'region': region,
'value': value
})
with open('gva.yaml', 'w') as file_handle:
yaml.dump(gva, file_handle)
# population : year x region
population = []
for year, region_value in data['population'].items():
for region, value in region_value.items():
population.append({
'interval': 1,
'year': year,
'region': region,
'value': value
})
with open('population.yaml', 'w') as file_handle:
yaml.dump(population, file_handle)
# residential_floor_area (rs_floorarea) : year x region
residential_floor_area = []
for year, region_value in data['rs_floorarea'].items():
for region, value in region_value.items():
residential_floor_area.append({
'interval': 1,
'year': year,
'region': region,
'value': value
})
with open('residential_floor_area.yaml', 'w') as file_handle:
yaml.dump(residential_floor_area, file_handle)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Write dummy data out to yaml<commit_after>"""Generate dummy data to use in smif as scenario data for testing
"""
from pprint import pprint
from energy_demand.read_write.data_loader import dummy_data_generation
import yaml
def main():
"""Generate and write out data
"""
data = dummy_data_generation({
'sim_param': {
'base_yr': 2010,
'end_yr': 2050
}
})
pprint(data)
# regions
# gva : year x region
gva = []
for year, region_value in data['GVA'].items():
for region, value in region_value.items():
gva.append({
'interval': 1,
'year': year,
'region': region,
'value': value
})
with open('gva.yaml', 'w') as file_handle:
yaml.dump(gva, file_handle)
# population : year x region
population = []
for year, region_value in data['population'].items():
for region, value in region_value.items():
population.append({
'interval': 1,
'year': year,
'region': region,
'value': value
})
with open('population.yaml', 'w') as file_handle:
yaml.dump(population, file_handle)
# residential_floor_area (rs_floorarea) : year x region
residential_floor_area = []
for year, region_value in data['rs_floorarea'].items():
for region, value in region_value.items():
residential_floor_area.append({
'interval': 1,
'year': year,
'region': region,
'value': value
})
with open('residential_floor_area.yaml', 'w') as file_handle:
yaml.dump(residential_floor_area, file_handle)
if __name__ == '__main__':
main()
|
|
daa3504942e088fb6cd23eaccff78e613460f517
|
mezzanine/accounts/admin.py
|
mezzanine/accounts/admin.py
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from mezzanine.accounts.models import get_profile_model
Profile = get_profile_model()
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
template = "admin/profile_inline.html"
extra = 0
class UserProfileAdmin(UserAdmin):
inlines = (ProfileInline,) if Profile else ()
admin.site.unregister(User)
admin.site.register(User, UserProfileAdmin)
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from mezzanine.accounts.models import get_profile_model
from mezzanine.core.admin import AdminProfileInline
Profile = get_profile_model()
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
template = "admin/profile_inline.html"
extra = 0
class UserProfileAdmin(UserAdmin):
inlines = (ProfileInline, AdminProfileInline) if Profile else (AdminProfileInline,)
admin.site.unregister(User)
admin.site.register(User, UserProfileAdmin)
|
Include AdminProfileInline so that it is not lost if the user enables Mezzanine accounts.
|
Include AdminProfileInline so that it is not lost if the user enables Mezzanine accounts.
--HG--
branch : admin_site_permissions
|
Python
|
bsd-2-clause
|
spookylukey/mezzanine,christianwgd/mezzanine,molokov/mezzanine,frankier/mezzanine,SoLoHiC/mezzanine,ZeroXn/mezzanine,readevalprint/mezzanine,eino-makitalo/mezzanine,stbarnabas/mezzanine,theclanks/mezzanine,scarcry/snm-mezzanine,Cajoline/mezzanine,molokov/mezzanine,sjdines/mezzanine,joshcartme/mezzanine,frankchin/mezzanine,saintbird/mezzanine,Cicero-Zhao/mezzanine,wbtuomela/mezzanine,stephenmcd/mezzanine,tuxinhang1989/mezzanine,promil23/mezzanine,fusionbox/mezzanine,stephenmcd/mezzanine,dekomote/mezzanine-modeltranslation-backport,promil23/mezzanine,wbtuomela/mezzanine,wrwrwr/mezzanine,adrian-the-git/mezzanine,emile2016/mezzanine,christianwgd/mezzanine,christianwgd/mezzanine,geodesign/mezzanine,douglaskastle/mezzanine,wrwrwr/mezzanine,industrydive/mezzanine,webounty/mezzanine,jerivas/mezzanine,scarcry/snm-mezzanine,dsanders11/mezzanine,PegasusWang/mezzanine,ZeroXn/mezzanine,Kniyl/mezzanine,promil23/mezzanine,dsanders11/mezzanine,dekomote/mezzanine-modeltranslation-backport,gradel/mezzanine,viaregio/mezzanine,jjz/mezzanine,dustinrb/mezzanine,webounty/mezzanine,dustinrb/mezzanine,orlenko/sfpirg,nikolas/mezzanine,molokov/mezzanine,spookylukey/mezzanine,saintbird/mezzanine,viaregio/mezzanine,agepoly/mezzanine,biomassives/mezzanine,sjuxax/mezzanine,emile2016/mezzanine,Skytorn86/mezzanine,industrydive/mezzanine,douglaskastle/mezzanine,ZeroXn/mezzanine,frankchin/mezzanine,damnfine/mezzanine,frankchin/mezzanine,theclanks/mezzanine,webounty/mezzanine,douglaskastle/mezzanine,ryneeverett/mezzanine,vladir/mezzanine,wbtuomela/mezzanine,tuxinhang1989/mezzanine,joshcartme/mezzanine,joshcartme/mezzanine,ryneeverett/mezzanine,theclanks/mezzanine,PegasusWang/mezzanine,jjz/mezzanine,wyzex/mezzanine,eino-makitalo/mezzanine,wyzex/mezzanine,orlenko/plei,jerivas/mezzanine,Kniyl/mezzanine,Kniyl/mezzanine,adrian-the-git/mezzanine,gradel/mezzanine,orlenko/sfpirg,dovydas/mezzanine,Cajoline/mezzanine,stbarnabas/mezzanine,sjuxax/mezzanine,Cicero-Zhao/mezzanine,mush42/mezzanine,geodesign/mezzanine,emile2016/mezzanine,Skytorn86/mezzanine,viaregio/mezzanine,orlenko/plei,SoLoHiC/mezzanine,mush42/mezzanine,fusionbox/mezzanine,dovydas/mezzanine,sjdines/mezzanine,adrian-the-git/mezzanine,orlenko/plei,readevalprint/mezzanine,batpad/mezzanine,eino-makitalo/mezzanine,batpad/mezzanine,vladir/mezzanine,SoLoHiC/mezzanine,geodesign/mezzanine,orlenko/sfpirg,AlexHill/mezzanine,AlexHill/mezzanine,damnfine/mezzanine,damnfine/mezzanine,nikolas/mezzanine,nikolas/mezzanine,agepoly/mezzanine,cccs-web/mezzanine,saintbird/mezzanine,mush42/mezzanine,jerivas/mezzanine,industrydive/mezzanine,biomassives/mezzanine,spookylukey/mezzanine,gradel/mezzanine,dekomote/mezzanine-modeltranslation-backport,Cajoline/mezzanine,cccs-web/mezzanine,sjuxax/mezzanine,sjdines/mezzanine,Skytorn86/mezzanine,stephenmcd/mezzanine,dsanders11/mezzanine,dovydas/mezzanine,frankier/mezzanine,wyzex/mezzanine,biomassives/mezzanine,PegasusWang/mezzanine,scarcry/snm-mezzanine,dustinrb/mezzanine,tuxinhang1989/mezzanine,agepoly/mezzanine,readevalprint/mezzanine,frankier/mezzanine,jjz/mezzanine,vladir/mezzanine,ryneeverett/mezzanine
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from mezzanine.accounts.models import get_profile_model
Profile = get_profile_model()
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
template = "admin/profile_inline.html"
extra = 0
class UserProfileAdmin(UserAdmin):
inlines = (ProfileInline,) if Profile else ()
admin.site.unregister(User)
admin.site.register(User, UserProfileAdmin)
Include AdminProfileInline so that it is not lost if the user enables Mezzanine accounts.
--HG--
branch : admin_site_permissions
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from mezzanine.accounts.models import get_profile_model
from mezzanine.core.admin import AdminProfileInline
Profile = get_profile_model()
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
template = "admin/profile_inline.html"
extra = 0
class UserProfileAdmin(UserAdmin):
inlines = (ProfileInline, AdminProfileInline) if Profile else (AdminProfileInline,)
admin.site.unregister(User)
admin.site.register(User, UserProfileAdmin)
|
<commit_before>
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from mezzanine.accounts.models import get_profile_model
Profile = get_profile_model()
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
template = "admin/profile_inline.html"
extra = 0
class UserProfileAdmin(UserAdmin):
inlines = (ProfileInline,) if Profile else ()
admin.site.unregister(User)
admin.site.register(User, UserProfileAdmin)
<commit_msg>Include AdminProfileInline so that it is not lost if the user enables Mezzanine accounts.
--HG--
branch : admin_site_permissions<commit_after>
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from mezzanine.accounts.models import get_profile_model
from mezzanine.core.admin import AdminProfileInline
Profile = get_profile_model()
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
template = "admin/profile_inline.html"
extra = 0
class UserProfileAdmin(UserAdmin):
inlines = (ProfileInline, AdminProfileInline) if Profile else (AdminProfileInline,)
admin.site.unregister(User)
admin.site.register(User, UserProfileAdmin)
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from mezzanine.accounts.models import get_profile_model
Profile = get_profile_model()
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
template = "admin/profile_inline.html"
extra = 0
class UserProfileAdmin(UserAdmin):
inlines = (ProfileInline,) if Profile else ()
admin.site.unregister(User)
admin.site.register(User, UserProfileAdmin)
Include AdminProfileInline so that it is not lost if the user enables Mezzanine accounts.
--HG--
branch : admin_site_permissions
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from mezzanine.accounts.models import get_profile_model
from mezzanine.core.admin import AdminProfileInline
Profile = get_profile_model()
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
template = "admin/profile_inline.html"
extra = 0
class UserProfileAdmin(UserAdmin):
inlines = (ProfileInline, AdminProfileInline) if Profile else (AdminProfileInline,)
admin.site.unregister(User)
admin.site.register(User, UserProfileAdmin)
|
<commit_before>
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from mezzanine.accounts.models import get_profile_model
Profile = get_profile_model()
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
template = "admin/profile_inline.html"
extra = 0
class UserProfileAdmin(UserAdmin):
inlines = (ProfileInline,) if Profile else ()
admin.site.unregister(User)
admin.site.register(User, UserProfileAdmin)
<commit_msg>Include AdminProfileInline so that it is not lost if the user enables Mezzanine accounts.
--HG--
branch : admin_site_permissions<commit_after>
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from mezzanine.accounts.models import get_profile_model
from mezzanine.core.admin import AdminProfileInline
Profile = get_profile_model()
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
template = "admin/profile_inline.html"
extra = 0
class UserProfileAdmin(UserAdmin):
inlines = (ProfileInline, AdminProfileInline) if Profile else (AdminProfileInline,)
admin.site.unregister(User)
admin.site.register(User, UserProfileAdmin)
|
f61870d3e3c5684101034fa800f6bece03f08c66
|
disasm.py
|
disasm.py
|
import MOS6502
import instructions
def disasm(memory):
index = 0
lines = []
while index < len(memory):
currInst = instructions.instructions[memory[index]]
line = currInst.mnem + " "
line += currInst.operType + " "
if currInst.size > 1:
for i in range(1, currInst.size):
line += hex(memory[i]) + " "
lines.append(line)
index += currInst.size
return lines
code = [0x78, 0xD8, 0xA9, 0x10, 0x8D, 0x00, 0x20, 0xA2]
print disasm(code)
|
Add quick dumb disassembler for upcoming debugger
|
Add quick dumb disassembler for upcoming debugger
|
Python
|
bsd-2-clause
|
pusscat/refNes
|
Add quick dumb disassembler for upcoming debugger
|
import MOS6502
import instructions
def disasm(memory):
index = 0
lines = []
while index < len(memory):
currInst = instructions.instructions[memory[index]]
line = currInst.mnem + " "
line += currInst.operType + " "
if currInst.size > 1:
for i in range(1, currInst.size):
line += hex(memory[i]) + " "
lines.append(line)
index += currInst.size
return lines
code = [0x78, 0xD8, 0xA9, 0x10, 0x8D, 0x00, 0x20, 0xA2]
print disasm(code)
|
<commit_before><commit_msg>Add quick dumb disassembler for upcoming debugger<commit_after>
|
import MOS6502
import instructions
def disasm(memory):
index = 0
lines = []
while index < len(memory):
currInst = instructions.instructions[memory[index]]
line = currInst.mnem + " "
line += currInst.operType + " "
if currInst.size > 1:
for i in range(1, currInst.size):
line += hex(memory[i]) + " "
lines.append(line)
index += currInst.size
return lines
code = [0x78, 0xD8, 0xA9, 0x10, 0x8D, 0x00, 0x20, 0xA2]
print disasm(code)
|
Add quick dumb disassembler for upcoming debuggerimport MOS6502
import instructions
def disasm(memory):
index = 0
lines = []
while index < len(memory):
currInst = instructions.instructions[memory[index]]
line = currInst.mnem + " "
line += currInst.operType + " "
if currInst.size > 1:
for i in range(1, currInst.size):
line += hex(memory[i]) + " "
lines.append(line)
index += currInst.size
return lines
code = [0x78, 0xD8, 0xA9, 0x10, 0x8D, 0x00, 0x20, 0xA2]
print disasm(code)
|
<commit_before><commit_msg>Add quick dumb disassembler for upcoming debugger<commit_after>import MOS6502
import instructions
def disasm(memory):
index = 0
lines = []
while index < len(memory):
currInst = instructions.instructions[memory[index]]
line = currInst.mnem + " "
line += currInst.operType + " "
if currInst.size > 1:
for i in range(1, currInst.size):
line += hex(memory[i]) + " "
lines.append(line)
index += currInst.size
return lines
code = [0x78, 0xD8, 0xA9, 0x10, 0x8D, 0x00, 0x20, 0xA2]
print disasm(code)
|
|
4c02bfc323dcdce642118978da04297dbea77189
|
nightreads/emails/migrations/0002_auto_20160521_1046.py
|
nightreads/emails/migrations/0002_auto_20160521_1046.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-21 10:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('emails', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='email',
name='post',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='posts.Post'),
),
]
|
Set `blank` attrib to True on Email.post field
|
Set `blank` attrib to True on Email.post field
|
Python
|
mit
|
avinassh/nightreads,avinassh/nightreads
|
Set `blank` attrib to True on Email.post field
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-21 10:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('emails', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='email',
name='post',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='posts.Post'),
),
]
|
<commit_before><commit_msg>Set `blank` attrib to True on Email.post field<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-21 10:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('emails', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='email',
name='post',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='posts.Post'),
),
]
|
Set `blank` attrib to True on Email.post field# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-21 10:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('emails', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='email',
name='post',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='posts.Post'),
),
]
|
<commit_before><commit_msg>Set `blank` attrib to True on Email.post field<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-21 10:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('emails', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='email',
name='post',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='posts.Post'),
),
]
|
|
34810a3d6f95fe14a623090f2c8dfe01299be179
|
backend/scripts/fixdrafts.py
|
backend/scripts/fixdrafts.py
|
#!/usr/bin/env python
import rethinkdb as r
import optparse
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
drafts = list(r.table('drafts').run(conn, time_format='raw'))
for draft in drafts:
if 'process' not in draft:
continue
input_conditions = draft['process']['input_conditions']
if 'Specimen Prep' in input_conditions:
sp = input_conditions['Specimen Prep']
for prop in sp['default_properties']:
if prop['attribute'] == 'preparation':
prop['value_choice'] = ["Electropolish", "FIB Liftout",
"Other"]
prop['unit_choice'] = []
prop['value'] = prop['unit']
prop['unit'] = ""
r.table('drafts').get(draft['id']).update(draft).run(conn)
|
Add script to fix Emmanuelle's messed up drafts.
|
Add script to fix Emmanuelle's messed up drafts.
|
Python
|
mit
|
materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org
|
Add script to fix Emmanuelle's messed up drafts.
|
#!/usr/bin/env python
import rethinkdb as r
import optparse
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
drafts = list(r.table('drafts').run(conn, time_format='raw'))
for draft in drafts:
if 'process' not in draft:
continue
input_conditions = draft['process']['input_conditions']
if 'Specimen Prep' in input_conditions:
sp = input_conditions['Specimen Prep']
for prop in sp['default_properties']:
if prop['attribute'] == 'preparation':
prop['value_choice'] = ["Electropolish", "FIB Liftout",
"Other"]
prop['unit_choice'] = []
prop['value'] = prop['unit']
prop['unit'] = ""
r.table('drafts').get(draft['id']).update(draft).run(conn)
|
<commit_before><commit_msg>Add script to fix Emmanuelle's messed up drafts.<commit_after>
|
#!/usr/bin/env python
import rethinkdb as r
import optparse
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
drafts = list(r.table('drafts').run(conn, time_format='raw'))
for draft in drafts:
if 'process' not in draft:
continue
input_conditions = draft['process']['input_conditions']
if 'Specimen Prep' in input_conditions:
sp = input_conditions['Specimen Prep']
for prop in sp['default_properties']:
if prop['attribute'] == 'preparation':
prop['value_choice'] = ["Electropolish", "FIB Liftout",
"Other"]
prop['unit_choice'] = []
prop['value'] = prop['unit']
prop['unit'] = ""
r.table('drafts').get(draft['id']).update(draft).run(conn)
|
Add script to fix Emmanuelle's messed up drafts.#!/usr/bin/env python
import rethinkdb as r
import optparse
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
drafts = list(r.table('drafts').run(conn, time_format='raw'))
for draft in drafts:
if 'process' not in draft:
continue
input_conditions = draft['process']['input_conditions']
if 'Specimen Prep' in input_conditions:
sp = input_conditions['Specimen Prep']
for prop in sp['default_properties']:
if prop['attribute'] == 'preparation':
prop['value_choice'] = ["Electropolish", "FIB Liftout",
"Other"]
prop['unit_choice'] = []
prop['value'] = prop['unit']
prop['unit'] = ""
r.table('drafts').get(draft['id']).update(draft).run(conn)
|
<commit_before><commit_msg>Add script to fix Emmanuelle's messed up drafts.<commit_after>#!/usr/bin/env python
import rethinkdb as r
import optparse
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
drafts = list(r.table('drafts').run(conn, time_format='raw'))
for draft in drafts:
if 'process' not in draft:
continue
input_conditions = draft['process']['input_conditions']
if 'Specimen Prep' in input_conditions:
sp = input_conditions['Specimen Prep']
for prop in sp['default_properties']:
if prop['attribute'] == 'preparation':
prop['value_choice'] = ["Electropolish", "FIB Liftout",
"Other"]
prop['unit_choice'] = []
prop['value'] = prop['unit']
prop['unit'] = ""
r.table('drafts').get(draft['id']).update(draft).run(conn)
|
|
81c67593807ed22049b4ec7d4a1503a35b874444
|
csunplugged/tests/resources/other/test_resource_cache_redirect.py
|
csunplugged/tests/resources/other/test_resource_cache_redirect.py
|
from tests.BaseTestWithDB import BaseTestWithDB
from django.test import tag
from django.http import QueryDict
from django.urls import reverse
from resources.views.views import resource_pdf_cache
from tests.resources.ResourcesTestDataGenerator import ResourcesTestDataGenerator
from utils.get_resource_generator import get_resource_generator
@tag("resource")
class CacheRedirectTest(BaseTestWithDB):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_data = ResourcesTestDataGenerator()
def test_resources_cache_valid_resource(self):
resource = self.test_data.create_resource(
"grid",
"Grid",
"resources/grid.html",
"GridResourceGenerator",
)
query = QueryDict("paper_size=a4")
generator = get_resource_generator(resource.generator_module, query)
response = resource_pdf_cache(resource.name, generator)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/staticfiles/resources/Resource%20Grid%20(a4).pdf"
)
|
Add test for resource cache function
|
Add test for resource cache function
|
Python
|
mit
|
uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged
|
Add test for resource cache function
|
from tests.BaseTestWithDB import BaseTestWithDB
from django.test import tag
from django.http import QueryDict
from django.urls import reverse
from resources.views.views import resource_pdf_cache
from tests.resources.ResourcesTestDataGenerator import ResourcesTestDataGenerator
from utils.get_resource_generator import get_resource_generator
@tag("resource")
class CacheRedirectTest(BaseTestWithDB):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_data = ResourcesTestDataGenerator()
def test_resources_cache_valid_resource(self):
resource = self.test_data.create_resource(
"grid",
"Grid",
"resources/grid.html",
"GridResourceGenerator",
)
query = QueryDict("paper_size=a4")
generator = get_resource_generator(resource.generator_module, query)
response = resource_pdf_cache(resource.name, generator)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/staticfiles/resources/Resource%20Grid%20(a4).pdf"
)
|
<commit_before><commit_msg>Add test for resource cache function<commit_after>
|
from tests.BaseTestWithDB import BaseTestWithDB
from django.test import tag
from django.http import QueryDict
from django.urls import reverse
from resources.views.views import resource_pdf_cache
from tests.resources.ResourcesTestDataGenerator import ResourcesTestDataGenerator
from utils.get_resource_generator import get_resource_generator
@tag("resource")
class CacheRedirectTest(BaseTestWithDB):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_data = ResourcesTestDataGenerator()
def test_resources_cache_valid_resource(self):
resource = self.test_data.create_resource(
"grid",
"Grid",
"resources/grid.html",
"GridResourceGenerator",
)
query = QueryDict("paper_size=a4")
generator = get_resource_generator(resource.generator_module, query)
response = resource_pdf_cache(resource.name, generator)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/staticfiles/resources/Resource%20Grid%20(a4).pdf"
)
|
Add test for resource cache functionfrom tests.BaseTestWithDB import BaseTestWithDB
from django.test import tag
from django.http import QueryDict
from django.urls import reverse
from resources.views.views import resource_pdf_cache
from tests.resources.ResourcesTestDataGenerator import ResourcesTestDataGenerator
from utils.get_resource_generator import get_resource_generator
@tag("resource")
class CacheRedirectTest(BaseTestWithDB):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_data = ResourcesTestDataGenerator()
def test_resources_cache_valid_resource(self):
resource = self.test_data.create_resource(
"grid",
"Grid",
"resources/grid.html",
"GridResourceGenerator",
)
query = QueryDict("paper_size=a4")
generator = get_resource_generator(resource.generator_module, query)
response = resource_pdf_cache(resource.name, generator)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/staticfiles/resources/Resource%20Grid%20(a4).pdf"
)
|
<commit_before><commit_msg>Add test for resource cache function<commit_after>from tests.BaseTestWithDB import BaseTestWithDB
from django.test import tag
from django.http import QueryDict
from django.urls import reverse
from resources.views.views import resource_pdf_cache
from tests.resources.ResourcesTestDataGenerator import ResourcesTestDataGenerator
from utils.get_resource_generator import get_resource_generator
@tag("resource")
class CacheRedirectTest(BaseTestWithDB):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_data = ResourcesTestDataGenerator()
def test_resources_cache_valid_resource(self):
resource = self.test_data.create_resource(
"grid",
"Grid",
"resources/grid.html",
"GridResourceGenerator",
)
query = QueryDict("paper_size=a4")
generator = get_resource_generator(resource.generator_module, query)
response = resource_pdf_cache(resource.name, generator)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/staticfiles/resources/Resource%20Grid%20(a4).pdf"
)
|
|
97bbbc7ac288b1d8220b4e335b9aa19555fee3f3
|
buffer/tests/test_profile.py
|
buffer/tests/test_profile.py
|
import json
from nose.tools import eq_, raises
from mock import MagicMock, patch
from buffer.models.profile import Profile, PATHS
mocked_response = {
'name': 'me',
'service': 'twiter',
'id': 1
}
def test_profile_schedules_getter():
'''
Test schedules gettering from buffer api
'''
mocked_api = MagicMock()
mocked_api.get.return_value = '123'
profile = Profile(mocked_api, mocked_response)
eq_(profile.schedules, '123')
mocked_api.get.assert_called_once_with(url = PATHS['GET_SCHEDULES'] % 1, parser=json.loads)
|
Test profile schedulers gettering from buffer api
|
Test profile schedulers gettering from buffer api
|
Python
|
mit
|
vtemian/buffpy,bufferapp/buffer-python
|
Test profile schedulers gettering from buffer api
|
import json
from nose.tools import eq_, raises
from mock import MagicMock, patch
from buffer.models.profile import Profile, PATHS
mocked_response = {
'name': 'me',
'service': 'twiter',
'id': 1
}
def test_profile_schedules_getter():
'''
Test schedules gettering from buffer api
'''
mocked_api = MagicMock()
mocked_api.get.return_value = '123'
profile = Profile(mocked_api, mocked_response)
eq_(profile.schedules, '123')
mocked_api.get.assert_called_once_with(url = PATHS['GET_SCHEDULES'] % 1, parser=json.loads)
|
<commit_before><commit_msg>Test profile schedulers gettering from buffer api<commit_after>
|
import json
from nose.tools import eq_, raises
from mock import MagicMock, patch
from buffer.models.profile import Profile, PATHS
mocked_response = {
'name': 'me',
'service': 'twiter',
'id': 1
}
def test_profile_schedules_getter():
'''
Test schedules gettering from buffer api
'''
mocked_api = MagicMock()
mocked_api.get.return_value = '123'
profile = Profile(mocked_api, mocked_response)
eq_(profile.schedules, '123')
mocked_api.get.assert_called_once_with(url = PATHS['GET_SCHEDULES'] % 1, parser=json.loads)
|
Test profile schedulers gettering from buffer apiimport json
from nose.tools import eq_, raises
from mock import MagicMock, patch
from buffer.models.profile import Profile, PATHS
mocked_response = {
'name': 'me',
'service': 'twiter',
'id': 1
}
def test_profile_schedules_getter():
'''
Test schedules gettering from buffer api
'''
mocked_api = MagicMock()
mocked_api.get.return_value = '123'
profile = Profile(mocked_api, mocked_response)
eq_(profile.schedules, '123')
mocked_api.get.assert_called_once_with(url = PATHS['GET_SCHEDULES'] % 1, parser=json.loads)
|
<commit_before><commit_msg>Test profile schedulers gettering from buffer api<commit_after>import json
from nose.tools import eq_, raises
from mock import MagicMock, patch
from buffer.models.profile import Profile, PATHS
mocked_response = {
'name': 'me',
'service': 'twiter',
'id': 1
}
def test_profile_schedules_getter():
'''
Test schedules gettering from buffer api
'''
mocked_api = MagicMock()
mocked_api.get.return_value = '123'
profile = Profile(mocked_api, mocked_response)
eq_(profile.schedules, '123')
mocked_api.get.assert_called_once_with(url = PATHS['GET_SCHEDULES'] % 1, parser=json.loads)
|
|
4e27164028847fb7de8d4aef991f1f6e9c64c8be
|
l10n_br_sale/models/sale-l10n_br_sale_service.py
|
l10n_br_sale/models/sale-l10n_br_sale_service.py
|
# Copyright (C) 2014 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import _, api, models
from openerp.exceptions import Warning as UserError
class SaleOrder(models.Model):
_inherit = "sale.order"
@api.model
def _prepare_invoice(self, order, lines):
result = super(SaleOrder, self)._prepare_invoice(order, lines)
result["fiscal_type"] = self.env.context.get("fiscal_type")
return result
@api.model
def _make_invoice(self, order, lines):
context = dict(self.env.context)
obj_invoice_line = self.env["account.invoice.line"]
lines_service = []
lines_product = []
inv_id_product = 0
inv_id_service = 0
def call_make_invoice(self, lines):
self = self.with_context(context)
return super(SaleOrder, self)._make_invoice(order, lines)
if not order.fiscal_category_id.property_journal:
raise UserError(
_("Error !"),
_(
"There is no journal defined for this company in Fiscal "
"Category: %s Company: %s"
)
% (order.fiscal_category_id.name, order.company_id.name),
)
for inv_line in obj_invoice_line.browse(lines):
if inv_line.product_id.fiscal_type == "service":
lines_service.append(inv_line.id)
elif inv_line.product_id.fiscal_type == "product":
lines_product.append(inv_line.id)
if lines_product:
context["fiscal_type"] = "product"
inv_id_product = call_make_invoice(self, lines_product)
if lines_service:
context["fiscal_type"] = "service"
inv_id_service = call_make_invoice(self, lines_service)
if inv_id_product and inv_id_service:
self._cr.execute(
"insert into sale_order_invoice_rel "
"(order_id,invoice_id) values (%s,%s)",
(order.id, inv_id_service),
)
inv_id = inv_id_product or inv_id_service
return inv_id
|
Move models from l10n_br_sale_service to l10n_br_sale.
|
[12.0][MIG][WIP] Move models from l10n_br_sale_service to l10n_br_sale.
|
Python
|
agpl-3.0
|
akretion/l10n-brazil,akretion/l10n-brazil,OCA/l10n-brazil,OCA/l10n-brazil,OCA/l10n-brazil,akretion/l10n-brazil
|
[12.0][MIG][WIP] Move models from l10n_br_sale_service to l10n_br_sale.
|
# Copyright (C) 2014 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import _, api, models
from openerp.exceptions import Warning as UserError
class SaleOrder(models.Model):
_inherit = "sale.order"
@api.model
def _prepare_invoice(self, order, lines):
result = super(SaleOrder, self)._prepare_invoice(order, lines)
result["fiscal_type"] = self.env.context.get("fiscal_type")
return result
@api.model
def _make_invoice(self, order, lines):
context = dict(self.env.context)
obj_invoice_line = self.env["account.invoice.line"]
lines_service = []
lines_product = []
inv_id_product = 0
inv_id_service = 0
def call_make_invoice(self, lines):
self = self.with_context(context)
return super(SaleOrder, self)._make_invoice(order, lines)
if not order.fiscal_category_id.property_journal:
raise UserError(
_("Error !"),
_(
"There is no journal defined for this company in Fiscal "
"Category: %s Company: %s"
)
% (order.fiscal_category_id.name, order.company_id.name),
)
for inv_line in obj_invoice_line.browse(lines):
if inv_line.product_id.fiscal_type == "service":
lines_service.append(inv_line.id)
elif inv_line.product_id.fiscal_type == "product":
lines_product.append(inv_line.id)
if lines_product:
context["fiscal_type"] = "product"
inv_id_product = call_make_invoice(self, lines_product)
if lines_service:
context["fiscal_type"] = "service"
inv_id_service = call_make_invoice(self, lines_service)
if inv_id_product and inv_id_service:
self._cr.execute(
"insert into sale_order_invoice_rel "
"(order_id,invoice_id) values (%s,%s)",
(order.id, inv_id_service),
)
inv_id = inv_id_product or inv_id_service
return inv_id
|
<commit_before><commit_msg>[12.0][MIG][WIP] Move models from l10n_br_sale_service to l10n_br_sale.<commit_after>
|
# Copyright (C) 2014 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import _, api, models
from openerp.exceptions import Warning as UserError
class SaleOrder(models.Model):
_inherit = "sale.order"
@api.model
def _prepare_invoice(self, order, lines):
result = super(SaleOrder, self)._prepare_invoice(order, lines)
result["fiscal_type"] = self.env.context.get("fiscal_type")
return result
@api.model
def _make_invoice(self, order, lines):
context = dict(self.env.context)
obj_invoice_line = self.env["account.invoice.line"]
lines_service = []
lines_product = []
inv_id_product = 0
inv_id_service = 0
def call_make_invoice(self, lines):
self = self.with_context(context)
return super(SaleOrder, self)._make_invoice(order, lines)
if not order.fiscal_category_id.property_journal:
raise UserError(
_("Error !"),
_(
"There is no journal defined for this company in Fiscal "
"Category: %s Company: %s"
)
% (order.fiscal_category_id.name, order.company_id.name),
)
for inv_line in obj_invoice_line.browse(lines):
if inv_line.product_id.fiscal_type == "service":
lines_service.append(inv_line.id)
elif inv_line.product_id.fiscal_type == "product":
lines_product.append(inv_line.id)
if lines_product:
context["fiscal_type"] = "product"
inv_id_product = call_make_invoice(self, lines_product)
if lines_service:
context["fiscal_type"] = "service"
inv_id_service = call_make_invoice(self, lines_service)
if inv_id_product and inv_id_service:
self._cr.execute(
"insert into sale_order_invoice_rel "
"(order_id,invoice_id) values (%s,%s)",
(order.id, inv_id_service),
)
inv_id = inv_id_product or inv_id_service
return inv_id
|
[12.0][MIG][WIP] Move models from l10n_br_sale_service to l10n_br_sale.# Copyright (C) 2014 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import _, api, models
from openerp.exceptions import Warning as UserError
class SaleOrder(models.Model):
_inherit = "sale.order"
@api.model
def _prepare_invoice(self, order, lines):
result = super(SaleOrder, self)._prepare_invoice(order, lines)
result["fiscal_type"] = self.env.context.get("fiscal_type")
return result
@api.model
def _make_invoice(self, order, lines):
context = dict(self.env.context)
obj_invoice_line = self.env["account.invoice.line"]
lines_service = []
lines_product = []
inv_id_product = 0
inv_id_service = 0
def call_make_invoice(self, lines):
self = self.with_context(context)
return super(SaleOrder, self)._make_invoice(order, lines)
if not order.fiscal_category_id.property_journal:
raise UserError(
_("Error !"),
_(
"There is no journal defined for this company in Fiscal "
"Category: %s Company: %s"
)
% (order.fiscal_category_id.name, order.company_id.name),
)
for inv_line in obj_invoice_line.browse(lines):
if inv_line.product_id.fiscal_type == "service":
lines_service.append(inv_line.id)
elif inv_line.product_id.fiscal_type == "product":
lines_product.append(inv_line.id)
if lines_product:
context["fiscal_type"] = "product"
inv_id_product = call_make_invoice(self, lines_product)
if lines_service:
context["fiscal_type"] = "service"
inv_id_service = call_make_invoice(self, lines_service)
if inv_id_product and inv_id_service:
self._cr.execute(
"insert into sale_order_invoice_rel "
"(order_id,invoice_id) values (%s,%s)",
(order.id, inv_id_service),
)
inv_id = inv_id_product or inv_id_service
return inv_id
|
<commit_before><commit_msg>[12.0][MIG][WIP] Move models from l10n_br_sale_service to l10n_br_sale.<commit_after># Copyright (C) 2014 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import _, api, models
from openerp.exceptions import Warning as UserError
class SaleOrder(models.Model):
_inherit = "sale.order"
@api.model
def _prepare_invoice(self, order, lines):
result = super(SaleOrder, self)._prepare_invoice(order, lines)
result["fiscal_type"] = self.env.context.get("fiscal_type")
return result
@api.model
def _make_invoice(self, order, lines):
context = dict(self.env.context)
obj_invoice_line = self.env["account.invoice.line"]
lines_service = []
lines_product = []
inv_id_product = 0
inv_id_service = 0
def call_make_invoice(self, lines):
self = self.with_context(context)
return super(SaleOrder, self)._make_invoice(order, lines)
if not order.fiscal_category_id.property_journal:
raise UserError(
_("Error !"),
_(
"There is no journal defined for this company in Fiscal "
"Category: %s Company: %s"
)
% (order.fiscal_category_id.name, order.company_id.name),
)
for inv_line in obj_invoice_line.browse(lines):
if inv_line.product_id.fiscal_type == "service":
lines_service.append(inv_line.id)
elif inv_line.product_id.fiscal_type == "product":
lines_product.append(inv_line.id)
if lines_product:
context["fiscal_type"] = "product"
inv_id_product = call_make_invoice(self, lines_product)
if lines_service:
context["fiscal_type"] = "service"
inv_id_service = call_make_invoice(self, lines_service)
if inv_id_product and inv_id_service:
self._cr.execute(
"insert into sale_order_invoice_rel "
"(order_id,invoice_id) values (%s,%s)",
(order.id, inv_id_service),
)
inv_id = inv_id_product or inv_id_service
return inv_id
|
|
8d2fe4a34e34267eb2fe1b2d84fcc77c89eaebc2
|
migrations/versions/0a2ba8dce059_oauth2_start.py
|
migrations/versions/0a2ba8dce059_oauth2_start.py
|
"""OAuth2 start
Revision ID: 0a2ba8dce059
Revises: 9f50538504b1
Create Date: 2017-02-11 13:48:45.564677
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy import Text
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '0a2ba8dce059'
down_revision = '9f50538504b1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('oauth_token', postgresql.JSONB(astext_type=Text()), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'oauth_token')
# ### end Alembic commands ###
|
Add this missing OAuth2 migration.
|
Add this missing OAuth2 migration.
Signed-off-by: Laura <07c342be6e560e7f43842e2e21b774e61d85f047@veriny.tf>
|
Python
|
mit
|
MJB47/Jokusoramame,MJB47/Jokusoramame,MJB47/Jokusoramame
|
Add this missing OAuth2 migration.
Signed-off-by: Laura <07c342be6e560e7f43842e2e21b774e61d85f047@veriny.tf>
|
"""OAuth2 start
Revision ID: 0a2ba8dce059
Revises: 9f50538504b1
Create Date: 2017-02-11 13:48:45.564677
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy import Text
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '0a2ba8dce059'
down_revision = '9f50538504b1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('oauth_token', postgresql.JSONB(astext_type=Text()), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'oauth_token')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add this missing OAuth2 migration.
Signed-off-by: Laura <07c342be6e560e7f43842e2e21b774e61d85f047@veriny.tf><commit_after>
|
"""OAuth2 start
Revision ID: 0a2ba8dce059
Revises: 9f50538504b1
Create Date: 2017-02-11 13:48:45.564677
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy import Text
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '0a2ba8dce059'
down_revision = '9f50538504b1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('oauth_token', postgresql.JSONB(astext_type=Text()), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'oauth_token')
# ### end Alembic commands ###
|
Add this missing OAuth2 migration.
Signed-off-by: Laura <07c342be6e560e7f43842e2e21b774e61d85f047@veriny.tf>"""OAuth2 start
Revision ID: 0a2ba8dce059
Revises: 9f50538504b1
Create Date: 2017-02-11 13:48:45.564677
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy import Text
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '0a2ba8dce059'
down_revision = '9f50538504b1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('oauth_token', postgresql.JSONB(astext_type=Text()), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'oauth_token')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add this missing OAuth2 migration.
Signed-off-by: Laura <07c342be6e560e7f43842e2e21b774e61d85f047@veriny.tf><commit_after>"""OAuth2 start
Revision ID: 0a2ba8dce059
Revises: 9f50538504b1
Create Date: 2017-02-11 13:48:45.564677
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy import Text
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '0a2ba8dce059'
down_revision = '9f50538504b1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('oauth_token', postgresql.JSONB(astext_type=Text()), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'oauth_token')
# ### end Alembic commands ###
|
|
5dc1d9d005bd88378f03ab6783858be09c348e4c
|
services/etsy.py
|
services/etsy.py
|
from werkzeug.urls import url_decode
import foauth.providers
class Etsy(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'http://www.etsy.com/'
docs_url = 'http://www.etsy.com/developers/documentation'
category = 'Crafts'
# URLs to interact with the API
request_token_url = 'http://openapi.etsy.com/v2/oauth/request_token'
authorize_url = None # Provided when the request token is granted
access_token_url = 'http://openapi.etsy.com/v2/oauth/access_token'
api_domain = 'openapi.etsy.com'
available_permissions = [
(None, 'read your profile information and public listings'),
('email_r', 'read your email address'),
('listings_r', 'read your inactive and expired (i.e., non-public) listings'),
('listings_w', 'create and edit your listings'),
('listings_d', 'delete your listings'),
('transactions_r', 'read your purchase and sales data'),
('transactions_w', 'update your sales data'),
('billing_r', 'read your Etsy bill charges and payments'),
('profile_r', 'read your private profile information'),
('profile_w', 'update your private profile information'),
('address_r', 'read your shipping address'),
('address_w', 'update and delete your shipping address'),
('favorites_rw', 'add to and remove from your favorite listings and users'),
('shops_rw', 'update your shop description, messages and sections'),
('cart_rw', 'add and remove listings from your shopping cart'),
('recommend_rw', 'view, accept and reject your recommended listings'),
('feedback_r', 'view all details of your feedback (including history)'),
('treasury_w', 'create and delete treasuries and treasury comments'),
]
def get_request_token_url(self):
# Override standard request token URL in order to add permissions
url = super(Etsy, self).get_request_token_url()
perms = (p for (p, desc) in self.available_permissions if p)
return '%s?scope=%s' % (url, ' '.join(perms))
def parse_token(self, content):
# Override standard token request to also get the authorization URL
data = url_decode(content)
if 'login_url' in data:
self.authorize_url = data['login_url']
return super(Etsy, self).parse_token(content)
|
Add the code for Etsy
|
Add the code for Etsy
They've decided not to allow foauth.org to actually access their API, but there's no reason to hide the code in case someone can learn from it.
|
Python
|
bsd-3-clause
|
foauth/foauth.org,foauth/foauth.org,foauth/foauth.org
|
Add the code for Etsy
They've decided not to allow foauth.org to actually access their API, but there's no reason to hide the code in case someone can learn from it.
|
from werkzeug.urls import url_decode
import foauth.providers
class Etsy(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'http://www.etsy.com/'
docs_url = 'http://www.etsy.com/developers/documentation'
category = 'Crafts'
# URLs to interact with the API
request_token_url = 'http://openapi.etsy.com/v2/oauth/request_token'
authorize_url = None # Provided when the request token is granted
access_token_url = 'http://openapi.etsy.com/v2/oauth/access_token'
api_domain = 'openapi.etsy.com'
available_permissions = [
(None, 'read your profile information and public listings'),
('email_r', 'read your email address'),
('listings_r', 'read your inactive and expired (i.e., non-public) listings'),
('listings_w', 'create and edit your listings'),
('listings_d', 'delete your listings'),
('transactions_r', 'read your purchase and sales data'),
('transactions_w', 'update your sales data'),
('billing_r', 'read your Etsy bill charges and payments'),
('profile_r', 'read your private profile information'),
('profile_w', 'update your private profile information'),
('address_r', 'read your shipping address'),
('address_w', 'update and delete your shipping address'),
('favorites_rw', 'add to and remove from your favorite listings and users'),
('shops_rw', 'update your shop description, messages and sections'),
('cart_rw', 'add and remove listings from your shopping cart'),
('recommend_rw', 'view, accept and reject your recommended listings'),
('feedback_r', 'view all details of your feedback (including history)'),
('treasury_w', 'create and delete treasuries and treasury comments'),
]
def get_request_token_url(self):
# Override standard request token URL in order to add permissions
url = super(Etsy, self).get_request_token_url()
perms = (p for (p, desc) in self.available_permissions if p)
return '%s?scope=%s' % (url, ' '.join(perms))
def parse_token(self, content):
# Override standard token request to also get the authorization URL
data = url_decode(content)
if 'login_url' in data:
self.authorize_url = data['login_url']
return super(Etsy, self).parse_token(content)
|
<commit_before><commit_msg>Add the code for Etsy
They've decided not to allow foauth.org to actually access their API, but there's no reason to hide the code in case someone can learn from it.<commit_after>
|
from werkzeug.urls import url_decode
import foauth.providers
class Etsy(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'http://www.etsy.com/'
docs_url = 'http://www.etsy.com/developers/documentation'
category = 'Crafts'
# URLs to interact with the API
request_token_url = 'http://openapi.etsy.com/v2/oauth/request_token'
authorize_url = None # Provided when the request token is granted
access_token_url = 'http://openapi.etsy.com/v2/oauth/access_token'
api_domain = 'openapi.etsy.com'
available_permissions = [
(None, 'read your profile information and public listings'),
('email_r', 'read your email address'),
('listings_r', 'read your inactive and expired (i.e., non-public) listings'),
('listings_w', 'create and edit your listings'),
('listings_d', 'delete your listings'),
('transactions_r', 'read your purchase and sales data'),
('transactions_w', 'update your sales data'),
('billing_r', 'read your Etsy bill charges and payments'),
('profile_r', 'read your private profile information'),
('profile_w', 'update your private profile information'),
('address_r', 'read your shipping address'),
('address_w', 'update and delete your shipping address'),
('favorites_rw', 'add to and remove from your favorite listings and users'),
('shops_rw', 'update your shop description, messages and sections'),
('cart_rw', 'add and remove listings from your shopping cart'),
('recommend_rw', 'view, accept and reject your recommended listings'),
('feedback_r', 'view all details of your feedback (including history)'),
('treasury_w', 'create and delete treasuries and treasury comments'),
]
def get_request_token_url(self):
# Override standard request token URL in order to add permissions
url = super(Etsy, self).get_request_token_url()
perms = (p for (p, desc) in self.available_permissions if p)
return '%s?scope=%s' % (url, ' '.join(perms))
def parse_token(self, content):
# Override standard token request to also get the authorization URL
data = url_decode(content)
if 'login_url' in data:
self.authorize_url = data['login_url']
return super(Etsy, self).parse_token(content)
|
Add the code for Etsy
They've decided not to allow foauth.org to actually access their API, but there's no reason to hide the code in case someone can learn from it.from werkzeug.urls import url_decode
import foauth.providers
class Etsy(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'http://www.etsy.com/'
docs_url = 'http://www.etsy.com/developers/documentation'
category = 'Crafts'
# URLs to interact with the API
request_token_url = 'http://openapi.etsy.com/v2/oauth/request_token'
authorize_url = None # Provided when the request token is granted
access_token_url = 'http://openapi.etsy.com/v2/oauth/access_token'
api_domain = 'openapi.etsy.com'
available_permissions = [
(None, 'read your profile information and public listings'),
('email_r', 'read your email address'),
('listings_r', 'read your inactive and expired (i.e., non-public) listings'),
('listings_w', 'create and edit your listings'),
('listings_d', 'delete your listings'),
('transactions_r', 'read your purchase and sales data'),
('transactions_w', 'update your sales data'),
('billing_r', 'read your Etsy bill charges and payments'),
('profile_r', 'read your private profile information'),
('profile_w', 'update your private profile information'),
('address_r', 'read your shipping address'),
('address_w', 'update and delete your shipping address'),
('favorites_rw', 'add to and remove from your favorite listings and users'),
('shops_rw', 'update your shop description, messages and sections'),
('cart_rw', 'add and remove listings from your shopping cart'),
('recommend_rw', 'view, accept and reject your recommended listings'),
('feedback_r', 'view all details of your feedback (including history)'),
('treasury_w', 'create and delete treasuries and treasury comments'),
]
def get_request_token_url(self):
# Override standard request token URL in order to add permissions
url = super(Etsy, self).get_request_token_url()
perms = (p for (p, desc) in self.available_permissions if p)
return '%s?scope=%s' % (url, ' '.join(perms))
def parse_token(self, content):
# Override standard token request to also get the authorization URL
data = url_decode(content)
if 'login_url' in data:
self.authorize_url = data['login_url']
return super(Etsy, self).parse_token(content)
|
<commit_before><commit_msg>Add the code for Etsy
They've decided not to allow foauth.org to actually access their API, but there's no reason to hide the code in case someone can learn from it.<commit_after>from werkzeug.urls import url_decode
import foauth.providers
class Etsy(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'http://www.etsy.com/'
docs_url = 'http://www.etsy.com/developers/documentation'
category = 'Crafts'
# URLs to interact with the API
request_token_url = 'http://openapi.etsy.com/v2/oauth/request_token'
authorize_url = None # Provided when the request token is granted
access_token_url = 'http://openapi.etsy.com/v2/oauth/access_token'
api_domain = 'openapi.etsy.com'
available_permissions = [
(None, 'read your profile information and public listings'),
('email_r', 'read your email address'),
('listings_r', 'read your inactive and expired (i.e., non-public) listings'),
('listings_w', 'create and edit your listings'),
('listings_d', 'delete your listings'),
('transactions_r', 'read your purchase and sales data'),
('transactions_w', 'update your sales data'),
('billing_r', 'read your Etsy bill charges and payments'),
('profile_r', 'read your private profile information'),
('profile_w', 'update your private profile information'),
('address_r', 'read your shipping address'),
('address_w', 'update and delete your shipping address'),
('favorites_rw', 'add to and remove from your favorite listings and users'),
('shops_rw', 'update your shop description, messages and sections'),
('cart_rw', 'add and remove listings from your shopping cart'),
('recommend_rw', 'view, accept and reject your recommended listings'),
('feedback_r', 'view all details of your feedback (including history)'),
('treasury_w', 'create and delete treasuries and treasury comments'),
]
def get_request_token_url(self):
# Override standard request token URL in order to add permissions
url = super(Etsy, self).get_request_token_url()
perms = (p for (p, desc) in self.available_permissions if p)
return '%s?scope=%s' % (url, ' '.join(perms))
def parse_token(self, content):
# Override standard token request to also get the authorization URL
data = url_decode(content)
if 'login_url' in data:
self.authorize_url = data['login_url']
return super(Etsy, self).parse_token(content)
|
|
b7a1f8b9cbbf795f663987254a681281cda46abf
|
tests/contrib/hooks/test_gcp_dataproc_hook.py
|
tests/contrib/hooks/test_gcp_dataproc_hook.py
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from airflow.contrib.hooks.gcp_dataproc_hook import DataProcHook
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
JOB = 'test-job'
PROJECT_ID = 'test-project-id'
REGION = 'global'
TASK_ID = 'test-task-id'
BASE_STRING = 'airflow.contrib.hooks.gcp_api_base_hook.{}'
DATAPROC_STRING = 'airflow.contrib.hooks.gcp_dataproc_hook.{}'
def mock_init(self, gcp_conn_id, delegate_to=None):
pass
class DataProcHookTest(unittest.TestCase):
def setUp(self):
with mock.patch(BASE_STRING.format('GoogleCloudBaseHook.__init__'),
new=mock_init):
self.dataproc_hook = DataProcHook()
@mock.patch(DATAPROC_STRING.format('_DataProcJob'))
def test_submit(self, job_mock):
with mock.patch(DATAPROC_STRING.format('DataProcHook.get_conn', return_value=None)):
self.dataproc_hook.submit(PROJECT_ID, JOB)
job_mock.assert_called_once_with(mock.ANY, PROJECT_ID, JOB, REGION)
|
Add unit tests for DataProcHook
|
[AIRFLOW-1727] Add unit tests for DataProcHook
Closes #2697 from cjqian/1727
|
Python
|
apache-2.0
|
Tagar/incubator-airflow,Fokko/incubator-airflow,adamhaney/airflow,edgarRd/incubator-airflow,bolkedebruin/airflow,nathanielvarona/airflow,gilt/incubator-airflow,danielvdende/incubator-airflow,yk5/incubator-airflow,zack3241/incubator-airflow,wndhydrnt/airflow,mistercrunch/airflow,danielvdende/incubator-airflow,subodhchhabra/airflow,airbnb/airflow,cfei18/incubator-airflow,lxneng/incubator-airflow,andyxhadji/incubator-airflow,KL-WLCR/incubator-airflow,cjqian/incubator-airflow,KL-WLCR/incubator-airflow,adamhaney/airflow,Fokko/incubator-airflow,OpringaoDoTurno/airflow,CloverHealth/airflow,OpringaoDoTurno/airflow,jfantom/incubator-airflow,Acehaidrey/incubator-airflow,spektom/incubator-airflow,nathanielvarona/airflow,yk5/incubator-airflow,cfei18/incubator-airflow,Acehaidrey/incubator-airflow,yati-sagade/incubator-airflow,zack3241/incubator-airflow,sekikn/incubator-airflow,DinoCow/airflow,yati-sagade/incubator-airflow,lyft/incubator-airflow,fenglu-g/incubator-airflow,Fokko/incubator-airflow,gilt/incubator-airflow,lyft/incubator-airflow,cfei18/incubator-airflow,mtagle/airflow,akosel/incubator-airflow,wooga/airflow,mtagle/airflow,CloverHealth/airflow,jfantom/incubator-airflow,artwr/airflow,fenglu-g/incubator-airflow,nathanielvarona/airflow,danielvdende/incubator-airflow,sekikn/incubator-airflow,Acehaidrey/incubator-airflow,Acehaidrey/incubator-airflow,jgao54/airflow,owlabs/incubator-airflow,cfei18/incubator-airflow,dhuang/incubator-airflow,airbnb/airflow,adamhaney/airflow,apache/airflow,CloverHealth/airflow,gilt/incubator-airflow,andyxhadji/incubator-airflow,yati-sagade/incubator-airflow,spektom/incubator-airflow,akosel/incubator-airflow,airbnb/airflow,andyxhadji/incubator-airflow,cjqian/incubator-airflow,sergiohgz/incubator-airflow,apache/airflow,wooga/airflow,mrkm4ntr/incubator-airflow,Tagar/incubator-airflow,danielvdende/incubator-airflow,RealImpactAnalytics/airflow,yk5/incubator-airflow,wileeam/airflow,r39132/airflow,cjqian/incubator-airflow,nathanielvarona/airflow,OpringaoDoTurno/airflow,malmiron/incubator-airflow,yk5/incubator-airflow,jgao54/airflow,DinoCow/airflow,sergiohgz/incubator-airflow,wndhydrnt/airflow,malmiron/incubator-airflow,artwr/airflow,apache/airflow,gtoonstra/airflow,apache/incubator-airflow,adamhaney/airflow,artwr/airflow,zack3241/incubator-airflow,airbnb/airflow,gtoonstra/airflow,sid88in/incubator-airflow,apache/incubator-airflow,Fokko/incubator-airflow,subodhchhabra/airflow,malmiron/incubator-airflow,lxneng/incubator-airflow,criccomini/airflow,skudriashev/incubator-airflow,DinoCow/airflow,bolkedebruin/airflow,mistercrunch/airflow,mrkm4ntr/incubator-airflow,gtoonstra/airflow,zack3241/incubator-airflow,jfantom/incubator-airflow,wooga/airflow,hgrif/incubator-airflow,wolfier/incubator-airflow,r39132/airflow,criccomini/airflow,wndhydrnt/airflow,lxneng/incubator-airflow,apache/airflow,andyxhadji/incubator-airflow,subodhchhabra/airflow,sid88in/incubator-airflow,cfei18/incubator-airflow,sergiohgz/incubator-airflow,wndhydrnt/airflow,sid88in/incubator-airflow,dhuang/incubator-airflow,fenglu-g/incubator-airflow,gtoonstra/airflow,wooga/airflow,fenglu-g/incubator-airflow,criccomini/airflow,lxneng/incubator-airflow,spektom/incubator-airflow,edgarRd/incubator-airflow,wolfier/incubator-airflow,MortalViews/incubator-airflow,edgarRd/incubator-airflow,spektom/incubator-airflow,skudriashev/incubator-airflow,mistercrunch/airflow,mtagle/airflow,apache/airflow,owlabs/incubator-airflow,hgrif/incubator-airflow,Acehaidrey/incubator-airflow,r39132/airflow,RealImpactAnalytics/airflow,skudriashev/incubator-airflow,DinoCow/airflow,yati-sagade/incubator-airflow,MortalViews/incubator-airflow,CloverHealth/airflow,mistercrunch/airflow,jgao54/airflow,edgarRd/incubator-airflow,KL-WLCR/incubator-airflow,wileeam/airflow,Tagar/incubator-airflow,gilt/incubator-airflow,apache/incubator-airflow,r39132/airflow,jgao54/airflow,jfantom/incubator-airflow,mtagle/airflow,lyft/incubator-airflow,sergiohgz/incubator-airflow,dhuang/incubator-airflow,cfei18/incubator-airflow,dhuang/incubator-airflow,owlabs/incubator-airflow,wileeam/airflow,danielvdende/incubator-airflow,wolfier/incubator-airflow,lyft/incubator-airflow,artwr/airflow,RealImpactAnalytics/airflow,nathanielvarona/airflow,sekikn/incubator-airflow,Tagar/incubator-airflow,danielvdende/incubator-airflow,cjqian/incubator-airflow,hgrif/incubator-airflow,nathanielvarona/airflow,apache/incubator-airflow,skudriashev/incubator-airflow,malmiron/incubator-airflow,sid88in/incubator-airflow,akosel/incubator-airflow,bolkedebruin/airflow,bolkedebruin/airflow,OpringaoDoTurno/airflow,KL-WLCR/incubator-airflow,wileeam/airflow,criccomini/airflow,bolkedebruin/airflow,MortalViews/incubator-airflow,sekikn/incubator-airflow,apache/airflow,wolfier/incubator-airflow,owlabs/incubator-airflow,subodhchhabra/airflow,akosel/incubator-airflow,mrkm4ntr/incubator-airflow,MortalViews/incubator-airflow,hgrif/incubator-airflow,RealImpactAnalytics/airflow,mrkm4ntr/incubator-airflow,Acehaidrey/incubator-airflow
|
[AIRFLOW-1727] Add unit tests for DataProcHook
Closes #2697 from cjqian/1727
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from airflow.contrib.hooks.gcp_dataproc_hook import DataProcHook
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
JOB = 'test-job'
PROJECT_ID = 'test-project-id'
REGION = 'global'
TASK_ID = 'test-task-id'
BASE_STRING = 'airflow.contrib.hooks.gcp_api_base_hook.{}'
DATAPROC_STRING = 'airflow.contrib.hooks.gcp_dataproc_hook.{}'
def mock_init(self, gcp_conn_id, delegate_to=None):
pass
class DataProcHookTest(unittest.TestCase):
def setUp(self):
with mock.patch(BASE_STRING.format('GoogleCloudBaseHook.__init__'),
new=mock_init):
self.dataproc_hook = DataProcHook()
@mock.patch(DATAPROC_STRING.format('_DataProcJob'))
def test_submit(self, job_mock):
with mock.patch(DATAPROC_STRING.format('DataProcHook.get_conn', return_value=None)):
self.dataproc_hook.submit(PROJECT_ID, JOB)
job_mock.assert_called_once_with(mock.ANY, PROJECT_ID, JOB, REGION)
|
<commit_before><commit_msg>[AIRFLOW-1727] Add unit tests for DataProcHook
Closes #2697 from cjqian/1727<commit_after>
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from airflow.contrib.hooks.gcp_dataproc_hook import DataProcHook
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
JOB = 'test-job'
PROJECT_ID = 'test-project-id'
REGION = 'global'
TASK_ID = 'test-task-id'
BASE_STRING = 'airflow.contrib.hooks.gcp_api_base_hook.{}'
DATAPROC_STRING = 'airflow.contrib.hooks.gcp_dataproc_hook.{}'
def mock_init(self, gcp_conn_id, delegate_to=None):
pass
class DataProcHookTest(unittest.TestCase):
def setUp(self):
with mock.patch(BASE_STRING.format('GoogleCloudBaseHook.__init__'),
new=mock_init):
self.dataproc_hook = DataProcHook()
@mock.patch(DATAPROC_STRING.format('_DataProcJob'))
def test_submit(self, job_mock):
with mock.patch(DATAPROC_STRING.format('DataProcHook.get_conn', return_value=None)):
self.dataproc_hook.submit(PROJECT_ID, JOB)
job_mock.assert_called_once_with(mock.ANY, PROJECT_ID, JOB, REGION)
|
[AIRFLOW-1727] Add unit tests for DataProcHook
Closes #2697 from cjqian/1727# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from airflow.contrib.hooks.gcp_dataproc_hook import DataProcHook
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
JOB = 'test-job'
PROJECT_ID = 'test-project-id'
REGION = 'global'
TASK_ID = 'test-task-id'
BASE_STRING = 'airflow.contrib.hooks.gcp_api_base_hook.{}'
DATAPROC_STRING = 'airflow.contrib.hooks.gcp_dataproc_hook.{}'
def mock_init(self, gcp_conn_id, delegate_to=None):
pass
class DataProcHookTest(unittest.TestCase):
def setUp(self):
with mock.patch(BASE_STRING.format('GoogleCloudBaseHook.__init__'),
new=mock_init):
self.dataproc_hook = DataProcHook()
@mock.patch(DATAPROC_STRING.format('_DataProcJob'))
def test_submit(self, job_mock):
with mock.patch(DATAPROC_STRING.format('DataProcHook.get_conn', return_value=None)):
self.dataproc_hook.submit(PROJECT_ID, JOB)
job_mock.assert_called_once_with(mock.ANY, PROJECT_ID, JOB, REGION)
|
<commit_before><commit_msg>[AIRFLOW-1727] Add unit tests for DataProcHook
Closes #2697 from cjqian/1727<commit_after># -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from airflow.contrib.hooks.gcp_dataproc_hook import DataProcHook
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
JOB = 'test-job'
PROJECT_ID = 'test-project-id'
REGION = 'global'
TASK_ID = 'test-task-id'
BASE_STRING = 'airflow.contrib.hooks.gcp_api_base_hook.{}'
DATAPROC_STRING = 'airflow.contrib.hooks.gcp_dataproc_hook.{}'
def mock_init(self, gcp_conn_id, delegate_to=None):
pass
class DataProcHookTest(unittest.TestCase):
def setUp(self):
with mock.patch(BASE_STRING.format('GoogleCloudBaseHook.__init__'),
new=mock_init):
self.dataproc_hook = DataProcHook()
@mock.patch(DATAPROC_STRING.format('_DataProcJob'))
def test_submit(self, job_mock):
with mock.patch(DATAPROC_STRING.format('DataProcHook.get_conn', return_value=None)):
self.dataproc_hook.submit(PROJECT_ID, JOB)
job_mock.assert_called_once_with(mock.ANY, PROJECT_ID, JOB, REGION)
|
|
579879a0c9f54d8f3ab6b57feaeeeaeee1365ab7
|
campaign/migrations/0014_auto_20160717_2119.py
|
campaign/migrations/0014_auto_20160717_2119.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-17 21:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('campaign', '0013_merge'),
]
operations = [
migrations.AlterField(
model_name='campaign',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='transaction',
name='amount',
field=models.DecimalField(decimal_places=2, default=0, max_digits=6),
),
]
|
Integrate salary picker with PayFast
|
Integrate salary picker with PayFast
|
Python
|
mit
|
toast38coza/FlashGiving,toast38coza/FlashGiving,toast38coza/FlashGiving
|
Integrate salary picker with PayFast
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-17 21:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('campaign', '0013_merge'),
]
operations = [
migrations.AlterField(
model_name='campaign',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='transaction',
name='amount',
field=models.DecimalField(decimal_places=2, default=0, max_digits=6),
),
]
|
<commit_before><commit_msg>Integrate salary picker with PayFast<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-17 21:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('campaign', '0013_merge'),
]
operations = [
migrations.AlterField(
model_name='campaign',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='transaction',
name='amount',
field=models.DecimalField(decimal_places=2, default=0, max_digits=6),
),
]
|
Integrate salary picker with PayFast# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-17 21:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('campaign', '0013_merge'),
]
operations = [
migrations.AlterField(
model_name='campaign',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='transaction',
name='amount',
field=models.DecimalField(decimal_places=2, default=0, max_digits=6),
),
]
|
<commit_before><commit_msg>Integrate salary picker with PayFast<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-17 21:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('campaign', '0013_merge'),
]
operations = [
migrations.AlterField(
model_name='campaign',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='transaction',
name='amount',
field=models.DecimalField(decimal_places=2, default=0, max_digits=6),
),
]
|
|
48257a02d71ab4bf5f0b2415ea7ce298d9425516
|
heat_integrationtests/functional/test_templates.py
|
heat_integrationtests/functional/test_templates.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat_integrationtests.functional import functional_base
class TemplateAPITest(functional_base.FunctionalTestsBase):
"""This will test the following template calls:
1. Get the template content for the specific stack
2. List template versions
3. List resource types
4. Show resource details for OS::Heat::TestResource
"""
template = {
'heat_template_version': '2014-10-16',
'description': 'Test Template APIs',
'resources': {
'test1': {
'type': 'OS::Heat::TestResource',
'properties': {
'update_replace': False,
'wait_secs': 0,
'value': 'Test1',
'fail': False,
}
}
}
}
def setUp(self):
super(TemplateAPITest, self).setUp()
def test_get_stack_template(self):
stack_identifier = self.stack_create(
template=self.template
)
template_from_client = self.client.stacks.template(stack_identifier)
self.assertDictEqual(self.template, template_from_client)
def test_template_version(self):
template_versions = self.client.template_versions.list()
supported_template_versions = ["2013-05-23", "2014-10-16",
"2015-04-30", "2015-10-15",
"2012-12-12", "2010-09-09",
"2016-04-08"]
for template in template_versions:
self.assertIn(template.version.split(".")[1],
supported_template_versions)
def test_resource_types(self):
resource_types = self.client.resource_types.list()
self.assertTrue(any(resource.resource_type == "OS::Heat::TestResource"
for resource in resource_types))
def test_show_resource_template(self):
resource_details = self.client.resource_types.get(
resource_type="OS::Heat::TestResource"
)
self.assertEqual("OS::Heat::TestResource",
resource_details['resource_type'])
|
Add tests for heat template apis
|
Add tests for heat template apis
This patch covers 4 template api calls -
1. Get stack template
2. List template versions
3. List resource types
4. Show resource schema
Change-Id: I85b16b8893264d1313e2e184011dd0ddfdaa4349
|
Python
|
apache-2.0
|
jasondunsmore/heat,steveb/heat,steveb/heat,noironetworks/heat,openstack/heat,jasondunsmore/heat,cwolferh/heat-scratch,dims/heat,openstack/heat,noironetworks/heat,dims/heat,cwolferh/heat-scratch
|
Add tests for heat template apis
This patch covers 4 template api calls -
1. Get stack template
2. List template versions
3. List resource types
4. Show resource schema
Change-Id: I85b16b8893264d1313e2e184011dd0ddfdaa4349
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat_integrationtests.functional import functional_base
class TemplateAPITest(functional_base.FunctionalTestsBase):
"""This will test the following template calls:
1. Get the template content for the specific stack
2. List template versions
3. List resource types
4. Show resource details for OS::Heat::TestResource
"""
template = {
'heat_template_version': '2014-10-16',
'description': 'Test Template APIs',
'resources': {
'test1': {
'type': 'OS::Heat::TestResource',
'properties': {
'update_replace': False,
'wait_secs': 0,
'value': 'Test1',
'fail': False,
}
}
}
}
def setUp(self):
super(TemplateAPITest, self).setUp()
def test_get_stack_template(self):
stack_identifier = self.stack_create(
template=self.template
)
template_from_client = self.client.stacks.template(stack_identifier)
self.assertDictEqual(self.template, template_from_client)
def test_template_version(self):
template_versions = self.client.template_versions.list()
supported_template_versions = ["2013-05-23", "2014-10-16",
"2015-04-30", "2015-10-15",
"2012-12-12", "2010-09-09",
"2016-04-08"]
for template in template_versions:
self.assertIn(template.version.split(".")[1],
supported_template_versions)
def test_resource_types(self):
resource_types = self.client.resource_types.list()
self.assertTrue(any(resource.resource_type == "OS::Heat::TestResource"
for resource in resource_types))
def test_show_resource_template(self):
resource_details = self.client.resource_types.get(
resource_type="OS::Heat::TestResource"
)
self.assertEqual("OS::Heat::TestResource",
resource_details['resource_type'])
|
<commit_before><commit_msg>Add tests for heat template apis
This patch covers 4 template api calls -
1. Get stack template
2. List template versions
3. List resource types
4. Show resource schema
Change-Id: I85b16b8893264d1313e2e184011dd0ddfdaa4349<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat_integrationtests.functional import functional_base
class TemplateAPITest(functional_base.FunctionalTestsBase):
"""This will test the following template calls:
1. Get the template content for the specific stack
2. List template versions
3. List resource types
4. Show resource details for OS::Heat::TestResource
"""
template = {
'heat_template_version': '2014-10-16',
'description': 'Test Template APIs',
'resources': {
'test1': {
'type': 'OS::Heat::TestResource',
'properties': {
'update_replace': False,
'wait_secs': 0,
'value': 'Test1',
'fail': False,
}
}
}
}
def setUp(self):
super(TemplateAPITest, self).setUp()
def test_get_stack_template(self):
stack_identifier = self.stack_create(
template=self.template
)
template_from_client = self.client.stacks.template(stack_identifier)
self.assertDictEqual(self.template, template_from_client)
def test_template_version(self):
template_versions = self.client.template_versions.list()
supported_template_versions = ["2013-05-23", "2014-10-16",
"2015-04-30", "2015-10-15",
"2012-12-12", "2010-09-09",
"2016-04-08"]
for template in template_versions:
self.assertIn(template.version.split(".")[1],
supported_template_versions)
def test_resource_types(self):
resource_types = self.client.resource_types.list()
self.assertTrue(any(resource.resource_type == "OS::Heat::TestResource"
for resource in resource_types))
def test_show_resource_template(self):
resource_details = self.client.resource_types.get(
resource_type="OS::Heat::TestResource"
)
self.assertEqual("OS::Heat::TestResource",
resource_details['resource_type'])
|
Add tests for heat template apis
This patch covers 4 template api calls -
1. Get stack template
2. List template versions
3. List resource types
4. Show resource schema
Change-Id: I85b16b8893264d1313e2e184011dd0ddfdaa4349# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat_integrationtests.functional import functional_base
class TemplateAPITest(functional_base.FunctionalTestsBase):
"""This will test the following template calls:
1. Get the template content for the specific stack
2. List template versions
3. List resource types
4. Show resource details for OS::Heat::TestResource
"""
template = {
'heat_template_version': '2014-10-16',
'description': 'Test Template APIs',
'resources': {
'test1': {
'type': 'OS::Heat::TestResource',
'properties': {
'update_replace': False,
'wait_secs': 0,
'value': 'Test1',
'fail': False,
}
}
}
}
def setUp(self):
super(TemplateAPITest, self).setUp()
def test_get_stack_template(self):
stack_identifier = self.stack_create(
template=self.template
)
template_from_client = self.client.stacks.template(stack_identifier)
self.assertDictEqual(self.template, template_from_client)
def test_template_version(self):
template_versions = self.client.template_versions.list()
supported_template_versions = ["2013-05-23", "2014-10-16",
"2015-04-30", "2015-10-15",
"2012-12-12", "2010-09-09",
"2016-04-08"]
for template in template_versions:
self.assertIn(template.version.split(".")[1],
supported_template_versions)
def test_resource_types(self):
resource_types = self.client.resource_types.list()
self.assertTrue(any(resource.resource_type == "OS::Heat::TestResource"
for resource in resource_types))
def test_show_resource_template(self):
resource_details = self.client.resource_types.get(
resource_type="OS::Heat::TestResource"
)
self.assertEqual("OS::Heat::TestResource",
resource_details['resource_type'])
|
<commit_before><commit_msg>Add tests for heat template apis
This patch covers 4 template api calls -
1. Get stack template
2. List template versions
3. List resource types
4. Show resource schema
Change-Id: I85b16b8893264d1313e2e184011dd0ddfdaa4349<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat_integrationtests.functional import functional_base
class TemplateAPITest(functional_base.FunctionalTestsBase):
"""This will test the following template calls:
1. Get the template content for the specific stack
2. List template versions
3. List resource types
4. Show resource details for OS::Heat::TestResource
"""
template = {
'heat_template_version': '2014-10-16',
'description': 'Test Template APIs',
'resources': {
'test1': {
'type': 'OS::Heat::TestResource',
'properties': {
'update_replace': False,
'wait_secs': 0,
'value': 'Test1',
'fail': False,
}
}
}
}
def setUp(self):
super(TemplateAPITest, self).setUp()
def test_get_stack_template(self):
stack_identifier = self.stack_create(
template=self.template
)
template_from_client = self.client.stacks.template(stack_identifier)
self.assertDictEqual(self.template, template_from_client)
def test_template_version(self):
template_versions = self.client.template_versions.list()
supported_template_versions = ["2013-05-23", "2014-10-16",
"2015-04-30", "2015-10-15",
"2012-12-12", "2010-09-09",
"2016-04-08"]
for template in template_versions:
self.assertIn(template.version.split(".")[1],
supported_template_versions)
def test_resource_types(self):
resource_types = self.client.resource_types.list()
self.assertTrue(any(resource.resource_type == "OS::Heat::TestResource"
for resource in resource_types))
def test_show_resource_template(self):
resource_details = self.client.resource_types.get(
resource_type="OS::Heat::TestResource"
)
self.assertEqual("OS::Heat::TestResource",
resource_details['resource_type'])
|
|
a11ed2f8db4d755764a94f9e71bd28e4e3cf582b
|
py/subarray-sum-equals-k.py
|
py/subarray-sum-equals-k.py
|
from collections import Counter
class Solution(object):
def subarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
subsum = [0]
for i, n in enumerate(nums):
subsum.append(subsum[-1] + n)
c = Counter()
ans = 0
for i, n in enumerate(subsum):
if n - k in c:
ans += c[n - k]
c[n] += 1
return ans
|
Add py solution for 560. Subarray Sum Equals K
|
Add py solution for 560. Subarray Sum Equals K
560. Subarray Sum Equals K: https://leetcode.com/problems/subarray-sum-equals-k/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 560. Subarray Sum Equals K
560. Subarray Sum Equals K: https://leetcode.com/problems/subarray-sum-equals-k/
|
from collections import Counter
class Solution(object):
def subarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
subsum = [0]
for i, n in enumerate(nums):
subsum.append(subsum[-1] + n)
c = Counter()
ans = 0
for i, n in enumerate(subsum):
if n - k in c:
ans += c[n - k]
c[n] += 1
return ans
|
<commit_before><commit_msg>Add py solution for 560. Subarray Sum Equals K
560. Subarray Sum Equals K: https://leetcode.com/problems/subarray-sum-equals-k/<commit_after>
|
from collections import Counter
class Solution(object):
def subarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
subsum = [0]
for i, n in enumerate(nums):
subsum.append(subsum[-1] + n)
c = Counter()
ans = 0
for i, n in enumerate(subsum):
if n - k in c:
ans += c[n - k]
c[n] += 1
return ans
|
Add py solution for 560. Subarray Sum Equals K
560. Subarray Sum Equals K: https://leetcode.com/problems/subarray-sum-equals-k/from collections import Counter
class Solution(object):
def subarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
subsum = [0]
for i, n in enumerate(nums):
subsum.append(subsum[-1] + n)
c = Counter()
ans = 0
for i, n in enumerate(subsum):
if n - k in c:
ans += c[n - k]
c[n] += 1
return ans
|
<commit_before><commit_msg>Add py solution for 560. Subarray Sum Equals K
560. Subarray Sum Equals K: https://leetcode.com/problems/subarray-sum-equals-k/<commit_after>from collections import Counter
class Solution(object):
def subarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
subsum = [0]
for i, n in enumerate(nums):
subsum.append(subsum[-1] + n)
c = Counter()
ans = 0
for i, n in enumerate(subsum):
if n - k in c:
ans += c[n - k]
c[n] += 1
return ans
|
|
c788e509500025252154b6dbde65d1b6bf9ee3f7
|
nbgrader/tests/apps/test_nbgrader_formgrade.py
|
nbgrader/tests/apps/test_nbgrader_formgrade.py
|
from .. import run_nbgrader
from .base import BaseTestApp
class TestNbGraderFormgrade(BaseTestApp):
def test_help(self):
"""Does the help display without error?"""
run_nbgrader(["formgrade", "--help-all"])
|
Add back in command line test for nbgrader formgrade
|
Add back in command line test for nbgrader formgrade
|
Python
|
bsd-3-clause
|
jupyter/nbgrader,jupyter/nbgrader,jupyter/nbgrader,jupyter/nbgrader,jhamrick/nbgrader,jhamrick/nbgrader,jhamrick/nbgrader,jupyter/nbgrader,jhamrick/nbgrader
|
Add back in command line test for nbgrader formgrade
|
from .. import run_nbgrader
from .base import BaseTestApp
class TestNbGraderFormgrade(BaseTestApp):
def test_help(self):
"""Does the help display without error?"""
run_nbgrader(["formgrade", "--help-all"])
|
<commit_before><commit_msg>Add back in command line test for nbgrader formgrade<commit_after>
|
from .. import run_nbgrader
from .base import BaseTestApp
class TestNbGraderFormgrade(BaseTestApp):
def test_help(self):
"""Does the help display without error?"""
run_nbgrader(["formgrade", "--help-all"])
|
Add back in command line test for nbgrader formgradefrom .. import run_nbgrader
from .base import BaseTestApp
class TestNbGraderFormgrade(BaseTestApp):
def test_help(self):
"""Does the help display without error?"""
run_nbgrader(["formgrade", "--help-all"])
|
<commit_before><commit_msg>Add back in command line test for nbgrader formgrade<commit_after>from .. import run_nbgrader
from .base import BaseTestApp
class TestNbGraderFormgrade(BaseTestApp):
def test_help(self):
"""Does the help display without error?"""
run_nbgrader(["formgrade", "--help-all"])
|
|
d2341cb0cb876e95e1f49f6e1e7b094c67899bfb
|
vm/test_put_rand_jobs.py
|
vm/test_put_rand_jobs.py
|
import httplib
import json
import random
from dispatch import Job
N_JOBS = 10
url = 'jcluster12.appspot.com'
# url = 'localhost:8080'
jobs = []
for i in range(N_JOBS):
job = Job()
job.jobId = i
job.paraEA = random.uniform(0.5, 1.0)
job.paraSigma = random.uniform(0.001, 0.01)
job.running = False
job.finished = False
jobs.append(job)
l = { 'jobs': [ job.getJSON() for job in jobs ]}
# HTTP PUT Job's
connection = httplib.HTTPConnection(url)
headers = {"User-Agent": "python-httplib"}
connection.request('PUT', '/put/', json.dumps(l, indent=2), headers)
result = connection.getresponse()
if result.status == 200:
print 'PUT jobs OK - HTTP 200'
else:
print result.status
connection.close()
|
Add test script to put 10 random jobs with proper param range
|
Add test script to put 10 random jobs with proper param range
|
Python
|
apache-2.0
|
henrique/DistEvo
|
Add test script to put 10 random jobs with proper param range
|
import httplib
import json
import random
from dispatch import Job
N_JOBS = 10
url = 'jcluster12.appspot.com'
# url = 'localhost:8080'
jobs = []
for i in range(N_JOBS):
job = Job()
job.jobId = i
job.paraEA = random.uniform(0.5, 1.0)
job.paraSigma = random.uniform(0.001, 0.01)
job.running = False
job.finished = False
jobs.append(job)
l = { 'jobs': [ job.getJSON() for job in jobs ]}
# HTTP PUT Job's
connection = httplib.HTTPConnection(url)
headers = {"User-Agent": "python-httplib"}
connection.request('PUT', '/put/', json.dumps(l, indent=2), headers)
result = connection.getresponse()
if result.status == 200:
print 'PUT jobs OK - HTTP 200'
else:
print result.status
connection.close()
|
<commit_before><commit_msg>Add test script to put 10 random jobs with proper param range<commit_after>
|
import httplib
import json
import random
from dispatch import Job
N_JOBS = 10
url = 'jcluster12.appspot.com'
# url = 'localhost:8080'
jobs = []
for i in range(N_JOBS):
job = Job()
job.jobId = i
job.paraEA = random.uniform(0.5, 1.0)
job.paraSigma = random.uniform(0.001, 0.01)
job.running = False
job.finished = False
jobs.append(job)
l = { 'jobs': [ job.getJSON() for job in jobs ]}
# HTTP PUT Job's
connection = httplib.HTTPConnection(url)
headers = {"User-Agent": "python-httplib"}
connection.request('PUT', '/put/', json.dumps(l, indent=2), headers)
result = connection.getresponse()
if result.status == 200:
print 'PUT jobs OK - HTTP 200'
else:
print result.status
connection.close()
|
Add test script to put 10 random jobs with proper param rangeimport httplib
import json
import random
from dispatch import Job
N_JOBS = 10
url = 'jcluster12.appspot.com'
# url = 'localhost:8080'
jobs = []
for i in range(N_JOBS):
job = Job()
job.jobId = i
job.paraEA = random.uniform(0.5, 1.0)
job.paraSigma = random.uniform(0.001, 0.01)
job.running = False
job.finished = False
jobs.append(job)
l = { 'jobs': [ job.getJSON() for job in jobs ]}
# HTTP PUT Job's
connection = httplib.HTTPConnection(url)
headers = {"User-Agent": "python-httplib"}
connection.request('PUT', '/put/', json.dumps(l, indent=2), headers)
result = connection.getresponse()
if result.status == 200:
print 'PUT jobs OK - HTTP 200'
else:
print result.status
connection.close()
|
<commit_before><commit_msg>Add test script to put 10 random jobs with proper param range<commit_after>import httplib
import json
import random
from dispatch import Job
N_JOBS = 10
url = 'jcluster12.appspot.com'
# url = 'localhost:8080'
jobs = []
for i in range(N_JOBS):
job = Job()
job.jobId = i
job.paraEA = random.uniform(0.5, 1.0)
job.paraSigma = random.uniform(0.001, 0.01)
job.running = False
job.finished = False
jobs.append(job)
l = { 'jobs': [ job.getJSON() for job in jobs ]}
# HTTP PUT Job's
connection = httplib.HTTPConnection(url)
headers = {"User-Agent": "python-httplib"}
connection.request('PUT', '/put/', json.dumps(l, indent=2), headers)
result = connection.getresponse()
if result.status == 200:
print 'PUT jobs OK - HTTP 200'
else:
print result.status
connection.close()
|
|
bbabd09aa76fddd9a1ace88d17a07a242c8c37f1
|
examples/test_mfa_login.py
|
examples/test_mfa_login.py
|
from seleniumbase import BaseCase
class TestMFALogin(BaseCase):
def test_mfa_login(self):
self.open("https://seleniumbase.io/realworld/login")
self.type("#username", "demo_user")
self.type("#password", "secret_pass")
totp_code = self.get_totp_code("GAXG2MTEOR3DMMDG")
self.type("#totpcode", totp_code)
self.click("#log-in")
self.highlight("img#image1")
self.assert_text("Welcome!", "h1")
self.save_screenshot_to_logs()
|
Add an example test for Multi-Factor Auth Login
|
Add an example test for Multi-Factor Auth Login
|
Python
|
mit
|
mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase
|
Add an example test for Multi-Factor Auth Login
|
from seleniumbase import BaseCase
class TestMFALogin(BaseCase):
def test_mfa_login(self):
self.open("https://seleniumbase.io/realworld/login")
self.type("#username", "demo_user")
self.type("#password", "secret_pass")
totp_code = self.get_totp_code("GAXG2MTEOR3DMMDG")
self.type("#totpcode", totp_code)
self.click("#log-in")
self.highlight("img#image1")
self.assert_text("Welcome!", "h1")
self.save_screenshot_to_logs()
|
<commit_before><commit_msg>Add an example test for Multi-Factor Auth Login<commit_after>
|
from seleniumbase import BaseCase
class TestMFALogin(BaseCase):
def test_mfa_login(self):
self.open("https://seleniumbase.io/realworld/login")
self.type("#username", "demo_user")
self.type("#password", "secret_pass")
totp_code = self.get_totp_code("GAXG2MTEOR3DMMDG")
self.type("#totpcode", totp_code)
self.click("#log-in")
self.highlight("img#image1")
self.assert_text("Welcome!", "h1")
self.save_screenshot_to_logs()
|
Add an example test for Multi-Factor Auth Loginfrom seleniumbase import BaseCase
class TestMFALogin(BaseCase):
def test_mfa_login(self):
self.open("https://seleniumbase.io/realworld/login")
self.type("#username", "demo_user")
self.type("#password", "secret_pass")
totp_code = self.get_totp_code("GAXG2MTEOR3DMMDG")
self.type("#totpcode", totp_code)
self.click("#log-in")
self.highlight("img#image1")
self.assert_text("Welcome!", "h1")
self.save_screenshot_to_logs()
|
<commit_before><commit_msg>Add an example test for Multi-Factor Auth Login<commit_after>from seleniumbase import BaseCase
class TestMFALogin(BaseCase):
def test_mfa_login(self):
self.open("https://seleniumbase.io/realworld/login")
self.type("#username", "demo_user")
self.type("#password", "secret_pass")
totp_code = self.get_totp_code("GAXG2MTEOR3DMMDG")
self.type("#totpcode", totp_code)
self.click("#log-in")
self.highlight("img#image1")
self.assert_text("Welcome!", "h1")
self.save_screenshot_to_logs()
|
|
2f611cccedfcb75dd23167f27d556f4f9b3e9a70
|
quantecon/optimize/tests/test_root_finding.py
|
quantecon/optimize/tests/test_root_finding.py
|
import numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
from numba import njit
from quantecon.optimize import newton, newton_secant
@njit
def func(x):
"""
Function for testing on.
"""
return (x**3 - 1)
@njit
def func_prime(x):
"""
Derivative for func.
"""
return (3*x**2)
@njit
def func_two(x):
"""
Harder function for testing on.
"""
return np.sin(4 * (x - 1/4)) + x + x**20 - 1
@njit
def func_two_prime(x):
"""
Derivative for func_two.
"""
return 4*np.cos(4*(x - 1/4)) + 20*x**19 + 1
def test_newton_basic():
"""
Uses the function f defined above to test the scalar maximization
routine.
"""
true_fval = 1.0
fval = newton(func, 5, func_prime)
assert_almost_equal(true_fval, fval.root, decimal=4)
def test_newton_basic_two():
"""
Uses the function f defined above to test the scalar maximization
routine.
"""
true_fval = 1.0
fval = newton(func, 5, func_prime)
assert_allclose(true_fval, fval.root, rtol=1e-5, atol=0)
def test_newton_hard():
"""
Harder test for convergence.
"""
true_fval = 0.408
fval = newton(func_two, 0.4, func_two_prime)
assert_allclose(true_fval, fval.root, rtol=1e-5, atol=0.01)
def test_secant_basic():
"""
Basic test for secant option.
"""
true_fval = 1.0
fval = newton_secant(func, 5)
assert_allclose(true_fval, fval.root, rtol=1e-5, atol=0.001)
def test_secant_hard():
"""
Harder test for convergence for secant function.
"""
true_fval = 0.408
fval = newton_secant(func_two, 0.4)
assert_allclose(true_fval, fval.root, rtol=1e-5, atol=0.01)
# executing testcases.
if __name__ == '__main__':
import sys
import nose
argv = sys.argv[:]
argv.append('--verbose')
argv.append('--nocapture')
nose.main(argv=argv, defaultTest=__file__)
|
Add test cases for newton methods
|
Add test cases for newton methods
|
Python
|
bsd-3-clause
|
oyamad/QuantEcon.py,QuantEcon/QuantEcon.py,QuantEcon/QuantEcon.py,oyamad/QuantEcon.py
|
Add test cases for newton methods
|
import numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
from numba import njit
from quantecon.optimize import newton, newton_secant
@njit
def func(x):
"""
Function for testing on.
"""
return (x**3 - 1)
@njit
def func_prime(x):
"""
Derivative for func.
"""
return (3*x**2)
@njit
def func_two(x):
"""
Harder function for testing on.
"""
return np.sin(4 * (x - 1/4)) + x + x**20 - 1
@njit
def func_two_prime(x):
"""
Derivative for func_two.
"""
return 4*np.cos(4*(x - 1/4)) + 20*x**19 + 1
def test_newton_basic():
"""
Uses the function f defined above to test the scalar maximization
routine.
"""
true_fval = 1.0
fval = newton(func, 5, func_prime)
assert_almost_equal(true_fval, fval.root, decimal=4)
def test_newton_basic_two():
"""
Uses the function f defined above to test the scalar maximization
routine.
"""
true_fval = 1.0
fval = newton(func, 5, func_prime)
assert_allclose(true_fval, fval.root, rtol=1e-5, atol=0)
def test_newton_hard():
"""
Harder test for convergence.
"""
true_fval = 0.408
fval = newton(func_two, 0.4, func_two_prime)
assert_allclose(true_fval, fval.root, rtol=1e-5, atol=0.01)
def test_secant_basic():
"""
Basic test for secant option.
"""
true_fval = 1.0
fval = newton_secant(func, 5)
assert_allclose(true_fval, fval.root, rtol=1e-5, atol=0.001)
def test_secant_hard():
"""
Harder test for convergence for secant function.
"""
true_fval = 0.408
fval = newton_secant(func_two, 0.4)
assert_allclose(true_fval, fval.root, rtol=1e-5, atol=0.01)
# executing testcases.
if __name__ == '__main__':
import sys
import nose
argv = sys.argv[:]
argv.append('--verbose')
argv.append('--nocapture')
nose.main(argv=argv, defaultTest=__file__)
|
<commit_before><commit_msg>Add test cases for newton methods<commit_after>
|
import numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
from numba import njit
from quantecon.optimize import newton, newton_secant
@njit
def func(x):
"""
Function for testing on.
"""
return (x**3 - 1)
@njit
def func_prime(x):
"""
Derivative for func.
"""
return (3*x**2)
@njit
def func_two(x):
"""
Harder function for testing on.
"""
return np.sin(4 * (x - 1/4)) + x + x**20 - 1
@njit
def func_two_prime(x):
"""
Derivative for func_two.
"""
return 4*np.cos(4*(x - 1/4)) + 20*x**19 + 1
def test_newton_basic():
"""
Uses the function f defined above to test the scalar maximization
routine.
"""
true_fval = 1.0
fval = newton(func, 5, func_prime)
assert_almost_equal(true_fval, fval.root, decimal=4)
def test_newton_basic_two():
"""
Uses the function f defined above to test the scalar maximization
routine.
"""
true_fval = 1.0
fval = newton(func, 5, func_prime)
assert_allclose(true_fval, fval.root, rtol=1e-5, atol=0)
def test_newton_hard():
"""
Harder test for convergence.
"""
true_fval = 0.408
fval = newton(func_two, 0.4, func_two_prime)
assert_allclose(true_fval, fval.root, rtol=1e-5, atol=0.01)
def test_secant_basic():
"""
Basic test for secant option.
"""
true_fval = 1.0
fval = newton_secant(func, 5)
assert_allclose(true_fval, fval.root, rtol=1e-5, atol=0.001)
def test_secant_hard():
"""
Harder test for convergence for secant function.
"""
true_fval = 0.408
fval = newton_secant(func_two, 0.4)
assert_allclose(true_fval, fval.root, rtol=1e-5, atol=0.01)
# executing testcases.
if __name__ == '__main__':
import sys
import nose
argv = sys.argv[:]
argv.append('--verbose')
argv.append('--nocapture')
nose.main(argv=argv, defaultTest=__file__)
|
Add test cases for newton methodsimport numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
from numba import njit
from quantecon.optimize import newton, newton_secant
@njit
def func(x):
"""
Function for testing on.
"""
return (x**3 - 1)
@njit
def func_prime(x):
"""
Derivative for func.
"""
return (3*x**2)
@njit
def func_two(x):
"""
Harder function for testing on.
"""
return np.sin(4 * (x - 1/4)) + x + x**20 - 1
@njit
def func_two_prime(x):
"""
Derivative for func_two.
"""
return 4*np.cos(4*(x - 1/4)) + 20*x**19 + 1
def test_newton_basic():
"""
Uses the function f defined above to test the scalar maximization
routine.
"""
true_fval = 1.0
fval = newton(func, 5, func_prime)
assert_almost_equal(true_fval, fval.root, decimal=4)
def test_newton_basic_two():
"""
Uses the function f defined above to test the scalar maximization
routine.
"""
true_fval = 1.0
fval = newton(func, 5, func_prime)
assert_allclose(true_fval, fval.root, rtol=1e-5, atol=0)
def test_newton_hard():
"""
Harder test for convergence.
"""
true_fval = 0.408
fval = newton(func_two, 0.4, func_two_prime)
assert_allclose(true_fval, fval.root, rtol=1e-5, atol=0.01)
def test_secant_basic():
"""
Basic test for secant option.
"""
true_fval = 1.0
fval = newton_secant(func, 5)
assert_allclose(true_fval, fval.root, rtol=1e-5, atol=0.001)
def test_secant_hard():
"""
Harder test for convergence for secant function.
"""
true_fval = 0.408
fval = newton_secant(func_two, 0.4)
assert_allclose(true_fval, fval.root, rtol=1e-5, atol=0.01)
# executing testcases.
if __name__ == '__main__':
import sys
import nose
argv = sys.argv[:]
argv.append('--verbose')
argv.append('--nocapture')
nose.main(argv=argv, defaultTest=__file__)
|
<commit_before><commit_msg>Add test cases for newton methods<commit_after>import numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
from numba import njit
from quantecon.optimize import newton, newton_secant
@njit
def func(x):
"""
Function for testing on.
"""
return (x**3 - 1)
@njit
def func_prime(x):
"""
Derivative for func.
"""
return (3*x**2)
@njit
def func_two(x):
"""
Harder function for testing on.
"""
return np.sin(4 * (x - 1/4)) + x + x**20 - 1
@njit
def func_two_prime(x):
"""
Derivative for func_two.
"""
return 4*np.cos(4*(x - 1/4)) + 20*x**19 + 1
def test_newton_basic():
"""
Uses the function f defined above to test the scalar maximization
routine.
"""
true_fval = 1.0
fval = newton(func, 5, func_prime)
assert_almost_equal(true_fval, fval.root, decimal=4)
def test_newton_basic_two():
"""
Uses the function f defined above to test the scalar maximization
routine.
"""
true_fval = 1.0
fval = newton(func, 5, func_prime)
assert_allclose(true_fval, fval.root, rtol=1e-5, atol=0)
def test_newton_hard():
"""
Harder test for convergence.
"""
true_fval = 0.408
fval = newton(func_two, 0.4, func_two_prime)
assert_allclose(true_fval, fval.root, rtol=1e-5, atol=0.01)
def test_secant_basic():
"""
Basic test for secant option.
"""
true_fval = 1.0
fval = newton_secant(func, 5)
assert_allclose(true_fval, fval.root, rtol=1e-5, atol=0.001)
def test_secant_hard():
"""
Harder test for convergence for secant function.
"""
true_fval = 0.408
fval = newton_secant(func_two, 0.4)
assert_allclose(true_fval, fval.root, rtol=1e-5, atol=0.01)
# executing testcases.
if __name__ == '__main__':
import sys
import nose
argv = sys.argv[:]
argv.append('--verbose')
argv.append('--nocapture')
nose.main(argv=argv, defaultTest=__file__)
|
|
95f0383393d88328d49ae2af7a042be812d98372
|
sql/branch.py
|
sql/branch.py
|
from gratipay import wireup
env = wireup.env()
db = wireup.db(env)
participants = []
with open('./sql/emails.txt') as f:
emails = [line.rstrip() for line in f]
participants = db.all("""
SELECT p.*::participants
FROM participants p
WHERE email_address IN %s
""", (tuple(emails), ))
for p in participants:
p.queue_email('initial')
|
Add script to queue emails from file
|
Add script to queue emails from file
|
Python
|
mit
|
gratipay/gratipay.com,gratipay/gratipay.com,mccolgst/www.gittip.com,studio666/gratipay.com,studio666/gratipay.com,eXcomm/gratipay.com,eXcomm/gratipay.com,eXcomm/gratipay.com,studio666/gratipay.com,eXcomm/gratipay.com,mccolgst/www.gittip.com,mccolgst/www.gittip.com,studio666/gratipay.com,gratipay/gratipay.com,mccolgst/www.gittip.com,gratipay/gratipay.com
|
Add script to queue emails from file
|
from gratipay import wireup
env = wireup.env()
db = wireup.db(env)
participants = []
with open('./sql/emails.txt') as f:
emails = [line.rstrip() for line in f]
participants = db.all("""
SELECT p.*::participants
FROM participants p
WHERE email_address IN %s
""", (tuple(emails), ))
for p in participants:
p.queue_email('initial')
|
<commit_before><commit_msg>Add script to queue emails from file<commit_after>
|
from gratipay import wireup
env = wireup.env()
db = wireup.db(env)
participants = []
with open('./sql/emails.txt') as f:
emails = [line.rstrip() for line in f]
participants = db.all("""
SELECT p.*::participants
FROM participants p
WHERE email_address IN %s
""", (tuple(emails), ))
for p in participants:
p.queue_email('initial')
|
Add script to queue emails from filefrom gratipay import wireup
env = wireup.env()
db = wireup.db(env)
participants = []
with open('./sql/emails.txt') as f:
emails = [line.rstrip() for line in f]
participants = db.all("""
SELECT p.*::participants
FROM participants p
WHERE email_address IN %s
""", (tuple(emails), ))
for p in participants:
p.queue_email('initial')
|
<commit_before><commit_msg>Add script to queue emails from file<commit_after>from gratipay import wireup
env = wireup.env()
db = wireup.db(env)
participants = []
with open('./sql/emails.txt') as f:
emails = [line.rstrip() for line in f]
participants = db.all("""
SELECT p.*::participants
FROM participants p
WHERE email_address IN %s
""", (tuple(emails), ))
for p in participants:
p.queue_email('initial')
|
|
de73dfd4445da3ff552a82cf53d239f2653f7ade
|
tests/pytests/functional/modules/test_sdb.py
|
tests/pytests/functional/modules/test_sdb.py
|
import tempfile
import pytest
@pytest.fixture(scope="module")
def minion_config_overrides():
with tempfile.TemporaryDirectory() as tempdir:
yield {
"mydude": {
"driver": "sqlite3",
"database": tempdir + "/test_sdb.sq3",
"table": __name__,
"create_table": True,
}
}
@pytest.mark.xfail
@pytest.mark.parametrize(
"expected_value",
(
"foo",
b"bang",
["cool", b"guy", "dude", b"\x00\x31\x99\x42"],
{
"this": b"has some",
b"complicated": "things",
"all": [{"going": "on"}, {"but": "that", 42: "should be fine"}],
},
),
)
def test_setting_sdb_values_with_text_and_bytes_should_retain_data_types(
expected_value, modules
):
modules.sdb.set("sdb://mydude/fnord", expected_value)
actual_value = modules.sdb.get("sdb://mydude/fnord", strict=True)
assert actual_value == expected_value
|
Add sqlite sdb smoke tests
|
Add sqlite sdb smoke tests
Right now sqlite sdb.get will fail for most purposes because the text
values are not decoded. This exhibits that behavior (except of course
for the lone byte-string parameter).
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add sqlite sdb smoke tests
Right now sqlite sdb.get will fail for most purposes because the text
values are not decoded. This exhibits that behavior (except of course
for the lone byte-string parameter).
|
import tempfile
import pytest
@pytest.fixture(scope="module")
def minion_config_overrides():
with tempfile.TemporaryDirectory() as tempdir:
yield {
"mydude": {
"driver": "sqlite3",
"database": tempdir + "/test_sdb.sq3",
"table": __name__,
"create_table": True,
}
}
@pytest.mark.xfail
@pytest.mark.parametrize(
"expected_value",
(
"foo",
b"bang",
["cool", b"guy", "dude", b"\x00\x31\x99\x42"],
{
"this": b"has some",
b"complicated": "things",
"all": [{"going": "on"}, {"but": "that", 42: "should be fine"}],
},
),
)
def test_setting_sdb_values_with_text_and_bytes_should_retain_data_types(
expected_value, modules
):
modules.sdb.set("sdb://mydude/fnord", expected_value)
actual_value = modules.sdb.get("sdb://mydude/fnord", strict=True)
assert actual_value == expected_value
|
<commit_before><commit_msg>Add sqlite sdb smoke tests
Right now sqlite sdb.get will fail for most purposes because the text
values are not decoded. This exhibits that behavior (except of course
for the lone byte-string parameter).<commit_after>
|
import tempfile
import pytest
@pytest.fixture(scope="module")
def minion_config_overrides():
with tempfile.TemporaryDirectory() as tempdir:
yield {
"mydude": {
"driver": "sqlite3",
"database": tempdir + "/test_sdb.sq3",
"table": __name__,
"create_table": True,
}
}
@pytest.mark.xfail
@pytest.mark.parametrize(
"expected_value",
(
"foo",
b"bang",
["cool", b"guy", "dude", b"\x00\x31\x99\x42"],
{
"this": b"has some",
b"complicated": "things",
"all": [{"going": "on"}, {"but": "that", 42: "should be fine"}],
},
),
)
def test_setting_sdb_values_with_text_and_bytes_should_retain_data_types(
expected_value, modules
):
modules.sdb.set("sdb://mydude/fnord", expected_value)
actual_value = modules.sdb.get("sdb://mydude/fnord", strict=True)
assert actual_value == expected_value
|
Add sqlite sdb smoke tests
Right now sqlite sdb.get will fail for most purposes because the text
values are not decoded. This exhibits that behavior (except of course
for the lone byte-string parameter).import tempfile
import pytest
@pytest.fixture(scope="module")
def minion_config_overrides():
with tempfile.TemporaryDirectory() as tempdir:
yield {
"mydude": {
"driver": "sqlite3",
"database": tempdir + "/test_sdb.sq3",
"table": __name__,
"create_table": True,
}
}
@pytest.mark.xfail
@pytest.mark.parametrize(
"expected_value",
(
"foo",
b"bang",
["cool", b"guy", "dude", b"\x00\x31\x99\x42"],
{
"this": b"has some",
b"complicated": "things",
"all": [{"going": "on"}, {"but": "that", 42: "should be fine"}],
},
),
)
def test_setting_sdb_values_with_text_and_bytes_should_retain_data_types(
expected_value, modules
):
modules.sdb.set("sdb://mydude/fnord", expected_value)
actual_value = modules.sdb.get("sdb://mydude/fnord", strict=True)
assert actual_value == expected_value
|
<commit_before><commit_msg>Add sqlite sdb smoke tests
Right now sqlite sdb.get will fail for most purposes because the text
values are not decoded. This exhibits that behavior (except of course
for the lone byte-string parameter).<commit_after>import tempfile
import pytest
@pytest.fixture(scope="module")
def minion_config_overrides():
with tempfile.TemporaryDirectory() as tempdir:
yield {
"mydude": {
"driver": "sqlite3",
"database": tempdir + "/test_sdb.sq3",
"table": __name__,
"create_table": True,
}
}
@pytest.mark.xfail
@pytest.mark.parametrize(
"expected_value",
(
"foo",
b"bang",
["cool", b"guy", "dude", b"\x00\x31\x99\x42"],
{
"this": b"has some",
b"complicated": "things",
"all": [{"going": "on"}, {"but": "that", 42: "should be fine"}],
},
),
)
def test_setting_sdb_values_with_text_and_bytes_should_retain_data_types(
expected_value, modules
):
modules.sdb.set("sdb://mydude/fnord", expected_value)
actual_value = modules.sdb.get("sdb://mydude/fnord", strict=True)
assert actual_value == expected_value
|
|
6089d06942e7516d64c68c2e1cbab04d3500bcc7
|
robocrm/migrations/0054_auto_20160620_2356.py
|
robocrm/migrations/0054_auto_20160620_2356.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import robocrm.fields
import robocrm.models
import django.core.validators
import django.core.files.storage
class Migration(migrations.Migration):
dependencies = [
('robocrm', '0053_robouser_resume'),
]
operations = [
migrations.AlterField(
model_name='robouser',
name='magnetic',
field=robocrm.fields.CharNullField(help_text='9 Character Magnetic Card ID (found on Student ID). Only you can see this ID.', null=True, blank=True, max_length=9, unique=True, validators=[django.core.validators.RegexValidator(code='invalid_magnetic', message='Magnetic must be 9 numeric characters(0-9)', regex='^[0-9]{9}$')]),
preserve_default=True,
),
migrations.AlterField(
model_name='robouser',
name='resume',
field=models.FileField(upload_to=robocrm.models.RoboUser.resume_upload_to, help_text='Upload your resume to be included in the Roboclub resume book (pdf format only)', null=True, blank=True, storage=django.core.files.storage.FileSystemStorage(location='/home/aaron/roboclub/roboticsclub.org/private', base_url='/private/')),
preserve_default=True,
),
migrations.AlterField(
model_name='robouser',
name='rfid',
field=robocrm.fields.CharNullField(help_text='8 Hex-digit RFID. Some card readers return decimal number in opposite endianness.', null=True, blank=True, max_length=10, unique=True, validators=[django.core.validators.RegexValidator(code='invalid_rfid', message='RFID must be 8 hexadecimal characters(0-9, A-F)', regex='^[A-F0-9]{8}$')]),
preserve_default=True,
),
]
|
Add migration for magnetic, resume, and rfid
|
Add migration for magnetic, resume, and rfid
|
Python
|
mit
|
CMU-Robotics-Club/roboticsclub.org,CMU-Robotics-Club/roboticsclub.org,CMU-Robotics-Club/roboticsclub.org
|
Add migration for magnetic, resume, and rfid
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import robocrm.fields
import robocrm.models
import django.core.validators
import django.core.files.storage
class Migration(migrations.Migration):
dependencies = [
('robocrm', '0053_robouser_resume'),
]
operations = [
migrations.AlterField(
model_name='robouser',
name='magnetic',
field=robocrm.fields.CharNullField(help_text='9 Character Magnetic Card ID (found on Student ID). Only you can see this ID.', null=True, blank=True, max_length=9, unique=True, validators=[django.core.validators.RegexValidator(code='invalid_magnetic', message='Magnetic must be 9 numeric characters(0-9)', regex='^[0-9]{9}$')]),
preserve_default=True,
),
migrations.AlterField(
model_name='robouser',
name='resume',
field=models.FileField(upload_to=robocrm.models.RoboUser.resume_upload_to, help_text='Upload your resume to be included in the Roboclub resume book (pdf format only)', null=True, blank=True, storage=django.core.files.storage.FileSystemStorage(location='/home/aaron/roboclub/roboticsclub.org/private', base_url='/private/')),
preserve_default=True,
),
migrations.AlterField(
model_name='robouser',
name='rfid',
field=robocrm.fields.CharNullField(help_text='8 Hex-digit RFID. Some card readers return decimal number in opposite endianness.', null=True, blank=True, max_length=10, unique=True, validators=[django.core.validators.RegexValidator(code='invalid_rfid', message='RFID must be 8 hexadecimal characters(0-9, A-F)', regex='^[A-F0-9]{8}$')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration for magnetic, resume, and rfid<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import robocrm.fields
import robocrm.models
import django.core.validators
import django.core.files.storage
class Migration(migrations.Migration):
dependencies = [
('robocrm', '0053_robouser_resume'),
]
operations = [
migrations.AlterField(
model_name='robouser',
name='magnetic',
field=robocrm.fields.CharNullField(help_text='9 Character Magnetic Card ID (found on Student ID). Only you can see this ID.', null=True, blank=True, max_length=9, unique=True, validators=[django.core.validators.RegexValidator(code='invalid_magnetic', message='Magnetic must be 9 numeric characters(0-9)', regex='^[0-9]{9}$')]),
preserve_default=True,
),
migrations.AlterField(
model_name='robouser',
name='resume',
field=models.FileField(upload_to=robocrm.models.RoboUser.resume_upload_to, help_text='Upload your resume to be included in the Roboclub resume book (pdf format only)', null=True, blank=True, storage=django.core.files.storage.FileSystemStorage(location='/home/aaron/roboclub/roboticsclub.org/private', base_url='/private/')),
preserve_default=True,
),
migrations.AlterField(
model_name='robouser',
name='rfid',
field=robocrm.fields.CharNullField(help_text='8 Hex-digit RFID. Some card readers return decimal number in opposite endianness.', null=True, blank=True, max_length=10, unique=True, validators=[django.core.validators.RegexValidator(code='invalid_rfid', message='RFID must be 8 hexadecimal characters(0-9, A-F)', regex='^[A-F0-9]{8}$')]),
preserve_default=True,
),
]
|
Add migration for magnetic, resume, and rfid# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import robocrm.fields
import robocrm.models
import django.core.validators
import django.core.files.storage
class Migration(migrations.Migration):
dependencies = [
('robocrm', '0053_robouser_resume'),
]
operations = [
migrations.AlterField(
model_name='robouser',
name='magnetic',
field=robocrm.fields.CharNullField(help_text='9 Character Magnetic Card ID (found on Student ID). Only you can see this ID.', null=True, blank=True, max_length=9, unique=True, validators=[django.core.validators.RegexValidator(code='invalid_magnetic', message='Magnetic must be 9 numeric characters(0-9)', regex='^[0-9]{9}$')]),
preserve_default=True,
),
migrations.AlterField(
model_name='robouser',
name='resume',
field=models.FileField(upload_to=robocrm.models.RoboUser.resume_upload_to, help_text='Upload your resume to be included in the Roboclub resume book (pdf format only)', null=True, blank=True, storage=django.core.files.storage.FileSystemStorage(location='/home/aaron/roboclub/roboticsclub.org/private', base_url='/private/')),
preserve_default=True,
),
migrations.AlterField(
model_name='robouser',
name='rfid',
field=robocrm.fields.CharNullField(help_text='8 Hex-digit RFID. Some card readers return decimal number in opposite endianness.', null=True, blank=True, max_length=10, unique=True, validators=[django.core.validators.RegexValidator(code='invalid_rfid', message='RFID must be 8 hexadecimal characters(0-9, A-F)', regex='^[A-F0-9]{8}$')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration for magnetic, resume, and rfid<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import robocrm.fields
import robocrm.models
import django.core.validators
import django.core.files.storage
class Migration(migrations.Migration):
dependencies = [
('robocrm', '0053_robouser_resume'),
]
operations = [
migrations.AlterField(
model_name='robouser',
name='magnetic',
field=robocrm.fields.CharNullField(help_text='9 Character Magnetic Card ID (found on Student ID). Only you can see this ID.', null=True, blank=True, max_length=9, unique=True, validators=[django.core.validators.RegexValidator(code='invalid_magnetic', message='Magnetic must be 9 numeric characters(0-9)', regex='^[0-9]{9}$')]),
preserve_default=True,
),
migrations.AlterField(
model_name='robouser',
name='resume',
field=models.FileField(upload_to=robocrm.models.RoboUser.resume_upload_to, help_text='Upload your resume to be included in the Roboclub resume book (pdf format only)', null=True, blank=True, storage=django.core.files.storage.FileSystemStorage(location='/home/aaron/roboclub/roboticsclub.org/private', base_url='/private/')),
preserve_default=True,
),
migrations.AlterField(
model_name='robouser',
name='rfid',
field=robocrm.fields.CharNullField(help_text='8 Hex-digit RFID. Some card readers return decimal number in opposite endianness.', null=True, blank=True, max_length=10, unique=True, validators=[django.core.validators.RegexValidator(code='invalid_rfid', message='RFID must be 8 hexadecimal characters(0-9, A-F)', regex='^[A-F0-9]{8}$')]),
preserve_default=True,
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.